text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func TestModels_Unmarshal_Time(t *testing.T) {
text := []byte("{\"id\":\"d221ad31-3a7b-52c0-b71d-b255b1ff63ba\",\"time1\":\"0001-01-01T00:00:00\",\"time2\":\"2019-09-01T00:07:26Z\",\"time3\":\"2020-05-16T20:55:32.0116793\",\"int\":10,\"string\":\"test string\"}")
testModel := TestModel{}
testModel.Time1 = &Time{}
testModel.Time1.Time = time.Now() // this ensures we test the value is set back to default when issue #17 is hit.
err := json.Unmarshal(text, &testModel)
if err != nil {
t.Errorf("Error occurred during deserialization: %v", err)
}
if (testModel.Time1.Time != time.Time{}) {
t.Errorf("Expecting deserialized time to equal default time. Actual time: %v", testModel.Time1)
}
parsedTime, err := time.Parse(time.RFC3339, "2019-09-01T00:07:26Z")
if err != nil {
t.Errorf(err.Error())
}
if testModel.Time2.Time != parsedTime {
t.Errorf("Expected time: %v Actual time: %v", parsedTime, testModel.Time2.Time)
}
// Test workaround for issue #59 https://github.com/microsoft/azure-devops-go-api/issues/59
parsedTime59, err := time.Parse("2006-01-02T15:04:05.999999999", "2020-05-16T20:55:32.0116793")
if testModel.Time3.Time != parsedTime59 {
t.Errorf("Expected time: %v Actual time: %v", parsedTime59, testModel.Time3.Time)
}
} | explode_data.jsonl/38127 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 517
} | [
2830,
3393,
16969,
40687,
27121,
39080,
1155,
353,
8840,
836,
8,
341,
15425,
1669,
3056,
3782,
99141,
307,
23488,
67,
17,
17,
16,
329,
18,
16,
12,
18,
64,
22,
65,
12,
20,
17,
66,
15,
1455,
22,
16,
67,
1455,
17,
20,
20,
65,
16,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestRollingUpdateComponent_Render(t *testing.T) {
testCases := map[string]struct {
inDeployments []stream.ECSDeployment
inFailureMsgs []string
wantedNumLines int
wantedOut string
}{
"should render only deployments if there are no failure messages": {
inDeployments: []stream.ECSDeployment{
{
Status: "PRIMARY",
TaskDefRevision: "2",
DesiredCount: 10,
RunningCount: 10,
RolloutState: "COMPLETED",
},
},
wantedNumLines: 3,
wantedOut: `Deployments
Revision Rollout Desired Running Failed Pending
PRIMARY 2 [completed] 10 10 0 0
`,
},
"should render a single failure event": {
inFailureMsgs: []string{"(service my-svc) (task 1234) failed container health checks."},
wantedNumLines: 2,
wantedOut: `Latest failure event
- (service my-svc) (task 1234) failed container health checks.
`,
},
"should split really long failure event messages": {
inFailureMsgs: []string{
"(service webapp-test-frontend-Service-ss036XlczgjO) (port 80) is unhealthy in (target-group arn:aws:elasticloadbalancing:us-west-2:1111: targetgroup/aaaaaaaaaaaa) due to (reason some-error).",
},
wantedNumLines: 4,
wantedOut: `Latest failure event
- (service webapp-test-frontend-Service-ss036XlczgjO) (port 80) is unhea
lthy in (target-group arn:aws:elasticloadbalancing:us-west-2:1111: tar
getgroup/aaaaaaaaaaaa) due to (reason some-error).
`,
},
"should render multiple failure messages in reverse order": {
inFailureMsgs: []string{
"(service my-svc) (task 1234) failed container health checks.",
"(service my-svc) (task 5678) failed container health checks.",
},
wantedNumLines: 3,
wantedOut: `Latest 2 failure events
- (service my-svc) (task 5678) failed container health checks.
- (service my-svc) (task 1234) failed container health checks.
`,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
buf := new(strings.Builder)
c := &rollingUpdateComponent{
deployments: tc.inDeployments,
failureMsgs: tc.inFailureMsgs,
}
// WHEN
nl, err := c.Render(buf)
// THEN
require.NoError(t, err)
require.Equal(t, tc.wantedNumLines, nl, "number of lines expected did not match")
require.Equal(t, tc.wantedOut, buf.String(), "the content written did not match")
})
}
} | explode_data.jsonl/17896 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 965
} | [
2830,
3393,
32355,
287,
4289,
2189,
42102,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
2415,
14032,
60,
1235,
341,
197,
17430,
69464,
1368,
3056,
4027,
5142,
34,
5491,
747,
39130,
198,
197,
17430,
17507,
6611,
82,
3056,
917,
271,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestVoteSignBytesTestVectors(t *testing.T) {
tests := []struct {
chainID string
vote *Vote
want []byte
}{
0: {
"", &Vote{},
// NOTE: Height and Round are skipped here. This case needs to be considered while parsing.
[]byte{0xd, 0x2a, 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
},
// with proper (fixed size) height and round (PreCommit):
1: {
"", &Vote{Height: 1, Round: 1, Type: PrecommitType},
[]byte{
0x21, // length
0x8, // (field_number << 3) | wire_type
0x2, // PrecommitType
0x11, // (field_number << 3) | wire_type
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height
0x19, // (field_number << 3) | wire_type
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round
0x2a, // (field_number << 3) | wire_type
// remaining fields (timestamp):
0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
},
// with proper (fixed size) height and round (PreVote):
2: {
"", &Vote{Height: 1, Round: 1, Type: PrevoteType},
[]byte{
0x21, // length
0x8, // (field_number << 3) | wire_type
0x1, // PrevoteType
0x11, // (field_number << 3) | wire_type
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height
0x19, // (field_number << 3) | wire_type
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round
0x2a, // (field_number << 3) | wire_type
// remaining fields (timestamp):
0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
},
3: {
"", &Vote{Height: 1, Round: 1},
[]byte{
0x1f, // length
0x11, // (field_number << 3) | wire_type
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height
0x19, // (field_number << 3) | wire_type
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round
// remaining fields (timestamp):
0x2a,
0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
},
// containing non-empty chain_id:
4: {
"test_chain_id", &Vote{Height: 1, Round: 1},
[]byte{
0x2e, // length
0x11, // (field_number << 3) | wire_type
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height
0x19, // (field_number << 3) | wire_type
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round
// remaining fields:
0x2a, // (field_number << 3) | wire_type
0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, // timestamp
0x32, // (field_number << 3) | wire_type
0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64}, // chainID
},
}
for i, tc := range tests {
got := tc.vote.SignBytes(tc.chainID)
require.Equal(t, tc.want, got, "test case #%v: got unexpected sign bytes for Vote.", i)
}
} | explode_data.jsonl/54532 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2109
} | [
2830,
3393,
41412,
7264,
7078,
2271,
84744,
1155,
353,
8840,
836,
8,
1476,
78216,
1669,
3056,
1235,
341,
197,
197,
8819,
915,
914,
198,
197,
5195,
1272,
262,
353,
41412,
198,
197,
50780,
262,
3056,
3782,
198,
197,
59403,
197,
197,
15,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestSingleNodeCommit(t *testing.T) {
tt := newNetwork(nil)
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
sm := tt.peers[1].(*raft)
if sm.raftLog.committed != 3 {
t.Errorf("committed = %d, want %d", sm.raftLog.committed, 3)
}
} | explode_data.jsonl/67336 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 199
} | [
2830,
3393,
72352,
33441,
1155,
353,
8840,
836,
8,
341,
3244,
83,
1669,
501,
12320,
27907,
340,
3244,
83,
5219,
76878,
8472,
90,
3830,
25,
220,
16,
11,
2014,
25,
220,
16,
11,
3990,
25,
17310,
30365,
39,
454,
3518,
3244,
83,
5219,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestHmhdBox_AvgPDUSize(t *testing.T) {
hb := HmhdBox{
avgPDUSize: 11,
}
if hb.AvgPDUSize() != 11 {
t.Fatalf("AvgPDUSize() not correct.")
}
} | explode_data.jsonl/11010 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 78
} | [
2830,
3393,
39,
76,
15990,
1611,
1566,
7239,
47,
21547,
1695,
1155,
353,
8840,
836,
8,
341,
9598,
65,
1669,
472,
76,
15990,
1611,
515,
197,
197,
13961,
47,
21547,
1695,
25,
220,
16,
16,
345,
197,
630,
743,
45135,
875,
7239,
47,
21... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestMasterGenerate(t *testing.T) {
installConfig := &installconfig.InstallConfig{
Config: &types.InstallConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
},
BaseDomain: "test-domain",
Networking: &types.Networking{
ServiceCIDR: ipnet.MustParseCIDR("10.0.1.0/24"),
},
Platform: types.Platform{
AWS: &aws.Platform{
Region: "us-east",
},
},
ControlPlane: &types.MachinePool{
Name: "master",
Replicas: pointer.Int64Ptr(3),
},
},
}
rootCA := &tls.RootCA{}
err := rootCA.Generate(nil)
assert.NoError(t, err, "unexpected error generating root CA")
parents := asset.Parents{}
parents.Add(installConfig, rootCA)
master := &Master{}
err = master.Generate(parents)
assert.NoError(t, err, "unexpected error generating master asset")
expectedIgnitionConfigNames := []string{
"master.ign",
}
actualFiles := master.Files()
actualIgnitionConfigNames := make([]string, len(actualFiles))
for i, f := range actualFiles {
actualIgnitionConfigNames[i] = f.Filename
}
assert.Equal(t, expectedIgnitionConfigNames, actualIgnitionConfigNames, "unexpected names for master ignition configs")
} | explode_data.jsonl/5937 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 445
} | [
2830,
3393,
18041,
31115,
1155,
353,
8840,
836,
8,
341,
197,
12248,
2648,
1669,
609,
12248,
1676,
71207,
541,
2648,
515,
197,
66156,
25,
609,
9242,
71207,
541,
2648,
515,
298,
23816,
12175,
25,
77520,
16,
80222,
515,
571,
21297,
25,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestResourceQuotaQuery(t *testing.T) {
c, err := graphql.New()
require.NoError(t, err)
k8sClient, _, err := client.NewClientWithConfig()
require.NoError(t, err)
_, err = k8sClient.ResourceQuotas(testNamespace).Create(fixResourceQuota())
require.NoError(t, err)
err = waiter.WaitAtMost(func() (bool, error) {
_, err := k8sClient.ResourceQuotas(testNamespace).Get(resourceQuotaName, metav1.GetOptions{})
if err == nil {
return true, nil
}
return false, err
}, time.Minute)
require.NoError(t, err)
var listResult resourceQuotas
err = c.Do(fixListResourceQuotasQuery(), &listResult)
require.NoError(t, err)
assert.Contains(t, listResult.ResourceQuotas, fixListResourceQuotasResponse())
t.Log("Checking authorization directives...")
ops := &auth.OperationsInput{
auth.List: {fixListResourceQuotasQuery()},
}
AuthSuite.Run(t, ops)
} | explode_data.jsonl/72064 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 325
} | [
2830,
3393,
4783,
2183,
6089,
2859,
1155,
353,
8840,
836,
8,
341,
1444,
11,
1848,
1669,
48865,
7121,
741,
17957,
35699,
1155,
11,
1848,
692,
16463,
23,
82,
2959,
11,
8358,
1848,
1669,
2943,
7121,
2959,
2354,
2648,
741,
17957,
35699,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestResizeableSlice(t *testing.T) {
// Set up an encoded slice with a byte array.
ra := &responseAuth{
NonceEven: [20]byte{},
ContSession: 1,
Auth: [20]byte{},
}
b := make([]byte, 322)
if _, err := rand.Read(b); err != nil {
t.Fatal("Couldn't read random bytes into the byte array")
}
bb, err := tpmutil.Pack(ra, b)
if err != nil {
t.Fatal("Couldn't pack the bytes:", err)
}
var ra2 responseAuth
var b2 []byte
if _, err := tpmutil.Unpack(bb, &ra2, &b2); err != nil {
t.Fatal("Couldn't unpack the resizeable values:", err)
}
if !bytes.Equal(b2, b) {
t.Fatal("ResizeableSlice was not resized or copied correctly")
}
} | explode_data.jsonl/75347 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 281
} | [
2830,
3393,
30561,
480,
33236,
1155,
353,
8840,
836,
8,
341,
197,
322,
2573,
705,
458,
20498,
15983,
448,
264,
4922,
1334,
624,
197,
956,
1669,
609,
2322,
5087,
515,
197,
197,
90528,
13159,
25,
256,
508,
17,
15,
90184,
38837,
197,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestAllConvertedEntriesAreSentAndReceived(t *testing.T) {
t.Parallel()
testcases := []struct {
entries int
maxFlushCount uint
}{
{
entries: 10,
maxFlushCount: 10,
},
{
entries: 10,
maxFlushCount: 3,
},
{
entries: 100,
maxFlushCount: 20,
},
}
for i, tc := range testcases {
tc := tc
t.Run(strconv.Itoa(i), func(t *testing.T) {
t.Parallel()
converter := NewConverter(
WithWorkerCount(1),
WithMaxFlushCount(tc.maxFlushCount),
WithFlushInterval(10*time.Millisecond), // To minimize time spent in test
)
converter.Start()
defer converter.Stop()
go func() {
for _, ent := range complexEntries(tc.entries) {
assert.NoError(t, converter.Batch(ent))
}
}()
var (
actualCount int
timeoutTimer = time.NewTimer(10 * time.Second)
ch = converter.OutChannel()
)
defer timeoutTimer.Stop()
forLoop:
for {
if tc.entries == actualCount {
break
}
select {
case pLogs, ok := <-ch:
if !ok {
break forLoop
}
rLogs := pLogs.ResourceLogs()
require.Equal(t, 1, rLogs.Len())
rLog := rLogs.At(0)
ills := rLog.InstrumentationLibraryLogs()
require.Equal(t, 1, ills.Len())
ill := ills.At(0)
actualCount += ill.Logs().Len()
assert.LessOrEqual(t, uint(ill.Logs().Len()), tc.maxFlushCount,
"Received more log records in one flush than configured by maxFlushCount",
)
case <-timeoutTimer.C:
break forLoop
}
}
assert.Equal(t, tc.entries, actualCount,
"didn't receive expected number of entries after conversion",
)
})
}
} | explode_data.jsonl/61121 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 797
} | [
2830,
3393,
2403,
61941,
24533,
11526,
31358,
3036,
23260,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
18185,
23910,
1669,
3056,
1235,
341,
197,
197,
12940,
981,
526,
198,
197,
22543,
46874,
2507,
2622,
198,
197,
59403,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestBuildServiceMapAddRemove(t *testing.T) {
fp := newFakeProxier()
services := []*v1.Service{
makeTestService("ns2", "cluster-ip", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "UDP", 1234, 4321, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port2", "UDP", 1235, 5321, 0)
}),
makeTestService("ns2", "node-port", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeNodePort
svc.Spec.ClusterIP = "172.16.55.10"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "UDP", 345, 678, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port2", "TCP", 344, 677, 0)
}),
makeTestService("ns1", "load-balancer", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.11"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001)
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "10.1.2.4"},
},
}
}),
makeTestService("ns1", "only-local-load-balancer", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.12"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003)
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "10.1.2.3"},
},
}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.HealthCheckNodePort = 345
}),
}
for i := range services {
fp.addService(services[i])
}
result := UpdateServiceMap(fp.serviceMap, fp.serviceChanges)
if len(fp.serviceMap) != 8 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
// The only-local-loadbalancer ones get added
if len(result.HCServiceNodePorts) != 1 {
t.Errorf("expected 1 healthcheck port, got %v", result.HCServiceNodePorts)
} else {
nsn := makeNSN("ns1", "only-local-load-balancer")
if port, found := result.HCServiceNodePorts[nsn]; !found || port != 345 {
t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.HCServiceNodePorts)
}
}
if len(result.UDPStaleClusterIP) != 0 {
// Services only added, so nothing stale yet
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
}
// Remove some stuff
// oneService is a modification of services[0] with removed first port.
oneService := makeTestService("ns2", "cluster-ip", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p2", "UDP", 1235, 5321, 0)
})
fp.updateService(services[0], oneService)
fp.deleteService(services[1])
fp.deleteService(services[2])
fp.deleteService(services[3])
result = UpdateServiceMap(fp.serviceMap, fp.serviceChanges)
if len(fp.serviceMap) != 1 {
t.Errorf("expected service map length 1, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected 0 healthcheck ports, got %v", result.HCServiceNodePorts)
}
// All services but one were deleted. While you'd expect only the ClusterIPs
// from the three deleted services here, we still have the ClusterIP for
// the not-deleted service, because one of it's ServicePorts was deleted.
expectedStaleUDPServices := []string{"172.16.55.10", "172.16.55.4", "172.16.55.11", "172.16.55.12"}
if len(result.UDPStaleClusterIP) != len(expectedStaleUDPServices) {
t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.UDPStaleClusterIP.UnsortedList())
}
for _, ip := range expectedStaleUDPServices {
if !result.UDPStaleClusterIP.Has(ip) {
t.Errorf("expected stale UDP service service %s", ip)
}
}
} | explode_data.jsonl/22232 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1714
} | [
2830,
3393,
11066,
1860,
2227,
2212,
13021,
1155,
353,
8840,
836,
8,
341,
65219,
1669,
501,
52317,
1336,
87,
1268,
2822,
1903,
2161,
1669,
29838,
85,
16,
13860,
515,
197,
77438,
2271,
1860,
445,
4412,
17,
497,
330,
18855,
74732,
497,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInvalidInstanceID(t *testing.T) {
ctx := context.Background()
client, err := NewClient(ctx, testIIDConfig)
if err != nil {
t.Fatal(err)
}
if err := client.DeleteInstanceID(ctx, ""); err == nil {
t.Errorf("DeleteInstanceID(empty) = nil; want error")
}
} | explode_data.jsonl/54622 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 101
} | [
2830,
3393,
7928,
2523,
915,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
2266,
19047,
741,
25291,
11,
1848,
1669,
1532,
2959,
7502,
11,
1273,
58948,
2648,
340,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
630,
743,
1848... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestConfigureSecurityGroupPermissionsSshOnly(t *testing.T) {
driver := NewTestDriver()
group := securityGroup
group.IpPermissions = []*ec2.IpPermission{
{
IpProtocol: aws.String("tcp"),
FromPort: aws.Int64(int64(testSSHPort)),
ToPort: aws.Int64(int64(testSSHPort)),
},
}
perms, err := driver.configureSecurityGroupPermissions(group)
assert.Nil(t, err)
assert.Len(t, perms, 1)
assert.Equal(t, testDockerPort, *perms[0].FromPort)
} | explode_data.jsonl/7404 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 195
} | [
2830,
3393,
28560,
15352,
2808,
23851,
50,
927,
7308,
1155,
353,
8840,
836,
8,
341,
33652,
1669,
1532,
2271,
11349,
741,
44260,
1669,
4763,
2808,
198,
44260,
2447,
79,
23851,
284,
29838,
757,
17,
2447,
79,
14966,
515,
197,
197,
515,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestType(t *testing.T) {
s, err := Run()
ok(t, err)
defer s.Close()
c, err := redis.Dial("tcp", s.Addr())
ok(t, err)
// String key
{
s.Set("foo", "bar!")
v, err := redis.String(c.Do("TYPE", "foo"))
ok(t, err)
equals(t, "string", v)
}
// Hash key
{
s.HSet("aap", "noot", "mies")
v, err := redis.String(c.Do("TYPE", "aap"))
ok(t, err)
equals(t, "hash", v)
}
// New key
{
v, err := redis.String(c.Do("TYPE", "nosuch"))
ok(t, err)
equals(t, "none", v)
}
// Wrong usage
{
_, err := redis.Int(c.Do("TYPE"))
assert(t, err != nil, "do TYPE error")
_, err = redis.Int(c.Do("TYPE", "spurious", "arguments"))
assert(t, err != nil, "do TYPE error")
}
// Direct usage:
{
equals(t, "hash", s.Type("aap"))
equals(t, "", s.Type("nokey"))
}
} | explode_data.jsonl/23147 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 395
} | [
2830,
3393,
929,
1155,
353,
8840,
836,
8,
341,
1903,
11,
1848,
1669,
6452,
741,
59268,
1155,
11,
1848,
340,
16867,
274,
10421,
741,
1444,
11,
1848,
1669,
20870,
98462,
445,
27161,
497,
274,
93626,
2398,
59268,
1155,
11,
1848,
692,
197... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDownloadFileContent(t *testing.T) {
url := "http://foo.com"
defer gock.Off()
gock.New(url).
Get("/").
Reply(200).
BodyString("Foo")
resp, err := downloadFileContent(url)
if string(resp) != "Foo" {
t.Errorf(
"downloadFileContent(%s) was incorrect. Got: %s, want: %s",
url,
string(resp),
"Foo",
)
}
if err != nil {
t.Errorf("[FAILED] - Got error %v to download file of url %s", err, url)
}
} | explode_data.jsonl/7960 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 194
} | [
2830,
3393,
11377,
1703,
2762,
1155,
353,
8840,
836,
8,
341,
19320,
1669,
330,
1254,
1110,
7975,
905,
698,
16867,
728,
377,
13,
4596,
2822,
3174,
1176,
7121,
6522,
4292,
197,
37654,
4283,
38609,
197,
197,
20841,
7,
17,
15,
15,
4292,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestEtcdGetServiceNotFound(t *testing.T) {
ctx := api.NewDefaultContext()
fakeClient := tools.NewFakeEtcdClient(t)
key, _ := makeServiceKey(ctx, "foo")
fakeClient.Data[key] = tools.EtcdResponseWithError{
R: &etcd.Response{
Node: nil,
},
E: tools.EtcdErrorNotFound,
}
registry := NewTestEtcdRegistry(fakeClient)
_, err := registry.GetService(ctx, "foo")
if !errors.IsNotFound(err) {
t.Errorf("Unexpected error returned: %#v", err)
}
} | explode_data.jsonl/8169 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 186
} | [
2830,
3393,
31860,
4385,
1949,
1860,
10372,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
6330,
7121,
3675,
1972,
741,
1166,
726,
2959,
1669,
7375,
7121,
52317,
31860,
4385,
2959,
1155,
340,
23634,
11,
716,
1669,
1281,
1860,
1592,
7502,
11... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestWriteZero(t *testing.T) {
zero, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0/ws")
if err != nil {
t.Fatal(err)
}
tpt := &WebsocketTransport{}
l, err := tpt.Listen(zero)
if err != nil {
t.Fatal(err)
}
defer l.Close()
msg := []byte(nil)
go func() {
d, _ := tpt.Dialer(nil)
c, err := d.Dial(l.Multiaddr())
defer c.Close()
if err != nil {
t.Error(err)
return
}
for i := 0; i < 100; i++ {
n, err := c.Write(msg)
if n != 0 {
t.Errorf("expected to write 0 bytes, wrote %d", n)
}
if err != nil {
t.Error(err)
return
}
}
}()
c, err := l.Accept()
defer c.Close()
if err != nil {
t.Fatal(err)
}
buf := make([]byte, 100)
n, err := c.Read(buf)
if n != 0 {
t.Errorf("read %d bytes, expected 0", n)
}
if err != io.EOF {
t.Errorf("expected EOF, got err: %s", err)
}
} | explode_data.jsonl/40774 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 427
} | [
2830,
3393,
7985,
17999,
1155,
353,
8840,
836,
8,
341,
197,
14154,
11,
1848,
1669,
7491,
7121,
20358,
6214,
4283,
573,
19,
14,
16,
17,
22,
13,
15,
13,
15,
13,
16,
95958,
14,
15,
91021,
1138,
743,
1848,
961,
2092,
341,
197,
3244,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestCalcMinRequiredTxRelayFee(t *testing.T) {
tests := []struct {
name string // test description.
size int64 // Transaction size in bytes.
relayFee btcutil.Amount // minimum relay transaction fee.
want int64 // Expected fee.
}{
{
// Ensure combination of size and fee that are less than 1000
// produce a non-zero fee.
"250 bytes with relay fee of 3",
250,
3,
3,
},
{
"100 bytes with default minimum relay fee",
100,
DefaultMinRelayTxFee,
100,
},
{
"max standard tx size with default minimum relay fee",
maxStandardTxWeight / 4,
DefaultMinRelayTxFee,
100000,
},
{
"max standard tx size with max satoshi relay fee",
maxStandardTxWeight / 4,
btcutil.MaxSatoshi / 100, // overflow on purpose
btcutil.MaxSatoshi,
},
{
"1500 bytes with 5000 relay fee",
1500,
5000,
7500,
},
{
"1500 bytes with 3000 relay fee",
1500,
3000,
4500,
},
{
"782 bytes with 5000 relay fee",
782,
5000,
3910,
},
{
"782 bytes with 3000 relay fee",
782,
3000,
2346,
},
{
"782 bytes with 2550 relay fee",
782,
2550,
1994,
},
}
for _, test := range tests {
got := calcMinRequiredTxRelayFee(test.size, test.relayFee)
if got != test.want {
t.Errorf("TestCalcMinRequiredTxRelayFee test '%s' "+
"failed: got %v want %v", test.name, got,
test.want)
continue
}
}
} | explode_data.jsonl/47308 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 661
} | [
2830,
3393,
47168,
6217,
8164,
31584,
6740,
352,
41941,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
257,
914,
260,
442,
1273,
4008,
624,
197,
13832,
257,
526,
21,
19,
688,
442,
17869,
1379,
304,
5820,
624,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestRereadKeyRing(t *testing.T) {
kring, err := ReadKeyRing(readerFromHex(testKeys1And2Hex))
if err != nil {
t.Errorf("error in initial parse: %s", err)
return
}
out := new(bytes.Buffer)
err = kring[0].Serialize(out)
if err != nil {
t.Errorf("error in serialization: %s", err)
return
}
kring, err = ReadKeyRing(out)
if err != nil {
t.Errorf("error in second parse: %s", err)
return
}
if len(kring) != 1 || uint32(kring[0].PrimaryKey.KeyId) != 0xC20C31BB {
t.Errorf("bad keyring: %#v", kring)
}
} | explode_data.jsonl/2271 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 230
} | [
2830,
3393,
49,
485,
329,
1592,
43466,
1155,
353,
8840,
836,
8,
341,
197,
9855,
287,
11,
1848,
1669,
4457,
1592,
43466,
21987,
3830,
20335,
8623,
8850,
16,
3036,
17,
20335,
1171,
743,
1848,
961,
2092,
341,
197,
3244,
13080,
445,
841,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestCreateOrg(t *testing.T) {
Convey("Create org", t, func() {
setup(MockRoute{"POST", "/v2/organizations", []string{createOrgPayload}, "", 201, "", nil}, t)
defer teardown()
c := &Config{
ApiAddress: server.URL,
Token: "foobar",
}
client, err := NewClient(c)
So(err, ShouldBeNil)
org, err := client.CreateOrg(OrgRequest{Name: "my-org"})
So(err, ShouldBeNil)
So(org.Guid, ShouldEqual, "22b3b0a0-6511-47e5-8f7a-93bbd2ff446e")
})
} | explode_data.jsonl/4436 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 216
} | [
2830,
3393,
4021,
42437,
1155,
353,
8840,
836,
8,
341,
93070,
5617,
445,
4021,
1240,
497,
259,
11,
2915,
368,
341,
197,
84571,
66436,
4899,
4913,
2946,
497,
3521,
85,
17,
14,
69253,
497,
3056,
917,
90,
3182,
42437,
29683,
2137,
7342,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIntLessThanOrEqualTo(t *testing.T) {
if err := (&validators.IntLessThanOrEqualTo{Value: 6, Target: 5}).Validate(); err == nil {
t.Errorf("validators.IntLessThanOrEqualTo faild")
}
if err := (&validators.IntLessThanOrEqualTo{Value: 6, Target: 6}).Validate(); err != nil {
t.Errorf("validators.IntLessThanOrEqualTo faild")
}
if err := (&validators.IntLessThanOrEqualTo{Value: 6, Target: 7}).Validate(); err != nil {
t.Errorf("validators.IntLessThanOrEqualTo faild")
}
} | explode_data.jsonl/73062 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 197
} | [
2830,
3393,
1072,
27451,
89387,
1155,
353,
8840,
836,
8,
341,
743,
1848,
1669,
15899,
59424,
7371,
27451,
89387,
90,
1130,
25,
220,
21,
11,
13483,
25,
220,
20,
16630,
17926,
2129,
1848,
621,
2092,
341,
197,
3244,
13080,
445,
59424,
73... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestReadIMsg(t *testing.T) {
// Store out the determined system endianness before manually manipulating it
systemEndianness := endianness
var (
tt imsgTest
buf *bytes.Reader
result *IMsg
err error
)
// First run tests for little endian systems
endianness = binary.LittleEndian
for _, tt = range unmarshalTests {
t.Run(
fmt.Sprintf("%s little endian", tt.name),
func(t *testing.T) {
buf = bytes.NewReader(tt.littleEndianBytes)
result, err = ReadIMsg(buf)
if tt.expectedErrorType == nil {
if err != nil {
t.Fatalf("unexpected ReadIMsg failure: %s", err)
}
if !reflect.DeepEqual(result, tt.imsg) {
t.Fatalf("result of ReadIMsg does not match expected output (%#v != %#v)", result, tt.imsg)
}
} else {
if err == nil {
t.Fatalf("incorrectly read imsg")
}
if !errors.As(err, &tt.expectedErrorType) {
t.Fatalf("failed to read imsg in unexpected way: %s", err)
}
}
})
}
// Next run tests for big endian systems
endianness = binary.BigEndian
for _, tt = range unmarshalTests {
t.Run(
fmt.Sprintf("%s big endian", tt.name),
func(t *testing.T) {
buf = bytes.NewReader(tt.bigEndianBytes)
result, err = ReadIMsg(buf)
if tt.expectedErrorType == nil {
if err != nil {
t.Fatalf("unexpected ReadIMsg failure: %s", err)
}
if !reflect.DeepEqual(result, tt.imsg) {
t.Fatalf("result of ReadIMsg does not match expected output (%#v != %#v)", result, tt.imsg)
}
} else {
if err == nil {
t.Fatalf("incorrectly read imsg")
}
if !errors.As(err, &tt.expectedErrorType) {
t.Fatalf("failed to read imsg in unexpected way: %s", err)
}
}
})
}
// Restore the determined system endianness
endianness = systemEndianness
} | explode_data.jsonl/1341 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 806
} | [
2830,
3393,
4418,
1791,
1991,
1155,
353,
8840,
836,
8,
341,
197,
322,
9129,
700,
279,
10838,
1849,
835,
72,
82033,
1573,
20083,
62514,
432,
198,
40293,
3727,
72,
82033,
1669,
835,
72,
82033,
271,
2405,
2399,
197,
3244,
83,
257,
732,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestToValueInterface(t *testing.T) {
f := func(i interface{}) bool {
return i == t
}
vm := New()
vm.Set("f", f)
vm.Set("t", t)
v, err := vm.RunString(`f(t)`)
if err != nil {
t.Fatal(err)
}
if v != valueTrue {
t.Fatalf("v: %v", v)
}
} | explode_data.jsonl/10484 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 127
} | [
2830,
3393,
1249,
1130,
5051,
1155,
353,
8840,
836,
8,
1476,
1166,
1669,
2915,
1956,
3749,
28875,
1807,
341,
197,
853,
600,
621,
259,
198,
197,
532,
54879,
1669,
1532,
741,
54879,
4202,
445,
69,
497,
282,
340,
54879,
4202,
445,
83,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestUpdateUserActive(t *testing.T) {
th := Setup(t)
defer th.TearDown()
user := th.CreateUser()
EnableUserDeactivation := th.App.Config().TeamSettings.EnableUserDeactivation
defer func() {
th.App.UpdateConfig(func(cfg *model.Config) { cfg.TeamSettings.EnableUserDeactivation = EnableUserDeactivation })
}()
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.TeamSettings.EnableUserDeactivation = true
})
err := th.App.UpdateUserActive(user.Id, false)
assert.Nil(t, err)
} | explode_data.jsonl/31414 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 173
} | [
2830,
3393,
4289,
1474,
5728,
1155,
353,
8840,
836,
8,
341,
70479,
1669,
18626,
1155,
340,
16867,
270,
836,
682,
4454,
2822,
19060,
1669,
270,
7251,
1474,
2822,
197,
11084,
1474,
1912,
23002,
1669,
270,
5105,
10753,
1005,
14597,
6086,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDetailing(t *testing.T) {
var detailingTest = []struct {
baseReport *IncompatibilityReport
}{
{sudoGnosticFlowBaseReport(t, "../examples/petstore/petstore.yaml")},
{sudoGnosticFlowBaseReport(t, "oas-examples/petstore.json")},
{sudoGnosticFlowBaseReport(t, "../examples/bookstore/bookstore.yaml")},
{sudoGnosticFlowBaseReport(t, "oas-examples/openapi.yaml")},
{sudoGnosticFlowBaseReport(t, "oas-examples/adsense.yaml")},
}
for _, trial := range detailingTest {
t.Run(trial.baseReport.ReportIdentifier, func(tt *testing.T) {
numIncompatibilitiesBaseReport := len(trial.baseReport.Incompatibilities)
numIncompatibilitiesIDReport := len(detailReport(trial.baseReport).Incompatibilities)
if numIncompatibilitiesBaseReport != numIncompatibilitiesIDReport {
t.Errorf("len(IDReport(%s)): got: %d wanted: %d", trial.baseReport.ReportIdentifier,
numIncompatibilitiesIDReport,
numIncompatibilitiesBaseReport,
)
}
})
}
} | explode_data.jsonl/26491 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 366
} | [
2830,
3393,
10649,
287,
1155,
353,
8840,
836,
8,
341,
2405,
44193,
2271,
284,
3056,
1235,
341,
197,
24195,
10361,
353,
641,
18331,
3147,
10361,
198,
197,
59403,
197,
197,
90,
18881,
38,
86424,
18878,
3978,
10361,
1155,
11,
7005,
51668,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestParser_ParseDeck_CardCount(t *testing.T) {
s := `
%% This is a field
%% This is a field
---
%% This is a field
%% This is a field
---`
d, err := anki.NewParser(strings.NewReader(s)).ParseDeck()
if err != nil {
t.Fatalf("unexpected error: %s", err)
} else if len(d.Cards) != 2 {
t.Fatalf("unexpected card count: %d", len(d.Cards))
}
} | explode_data.jsonl/40547 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 150
} | [
2830,
3393,
6570,
77337,
39368,
920,
567,
2507,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
22074,
197,
2769,
1096,
374,
264,
2070,
271,
197,
2769,
1096,
374,
264,
2070,
271,
197,
44364,
197,
2769,
1096,
374,
264,
2070,
271,
197,
2769,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestGetBackend(t *testing.T) {
// Boilerplate mocking code
mockCtrl := gomock.NewController(t)
// Set fake values
backendName := "foobar"
backendUUID := "1234"
// Create the expected return object
expectedBackendExternal := &storage.BackendExternal{
Name: backendName,
BackendUUID: backendUUID,
}
// Create a mocked backend
mockBackend := mockstorage.NewMockBackend(mockCtrl)
// Set backend behavior we don't care about for this testcase
mockBackend.EXPECT().Name().Return(backendName).AnyTimes() // Always return the fake name
mockBackend.EXPECT().BackendUUID().Return(backendUUID).AnyTimes() // Always return the fake uuid
// Set backend behavior we do care about for this testcase
mockBackend.EXPECT().ConstructExternal(gomock.Any()).Return(expectedBackendExternal) // Return the expected object
// Create an instance of the orchestrator
orchestrator := getOrchestrator(t)
// Add the mocked backend to the orchestrator
orchestrator.backends[backendUUID] = mockBackend
// Run the test
actualBackendExternal, err := orchestrator.GetBackend(ctx(), backendName)
// Verify the results
assert.Nilf(t, err, "Error getting backend; %v", err)
assert.Equal(t, expectedBackendExternal, actualBackendExternal, "Did not get the expected backend object")
} | explode_data.jsonl/62763 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 408
} | [
2830,
3393,
1949,
29699,
1155,
353,
8840,
836,
8,
341,
197,
322,
45665,
1750,
66483,
2038,
198,
77333,
15001,
1669,
342,
316,
1176,
7121,
2051,
1155,
692,
197,
322,
2573,
12418,
2750,
198,
197,
20942,
675,
1669,
330,
50267,
698,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPushWithContentTypeDefault(t *testing.T) {
contentType := defaultChartsContentType
t.Logf("Test basic push action with default Content-Type '%s'", contentType)
name := "test-push"
dir := "charts"
setupRepo(t, name, dir)
defer teardownRepo(t, name)
key := dir + "/foo-1.2.3.tgz"
// set a cleanup in beforehand
defer removeObject(t, name, key)
cmd, stdout, stderr := command(fmt.Sprintf("helm s3 push testdata/foo-1.2.3.tgz %s", name))
if err := cmd.Run(); err != nil {
t.Errorf("Unexpected error: %v", err)
}
if stdout.String() != "" {
t.Errorf("Expected stdout to be empty, but got %q", stdout.String())
}
if stderr.String() != "" {
t.Errorf("Expected stderr to be empty, but got %q", stderr.String())
}
assertContentType(t, contentType, name, key)
} | explode_data.jsonl/13705 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 305
} | [
2830,
3393,
16644,
2354,
29504,
3675,
1155,
353,
8840,
836,
8,
341,
27751,
929,
1669,
1638,
64878,
29504,
198,
3244,
98954,
445,
2271,
6770,
4484,
1917,
448,
1638,
8883,
10804,
7677,
82,
22772,
32103,
692,
11609,
1669,
330,
1944,
98643,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestCheckErrorUsingTab(t *testing.T) {
tests := []struct {
fileName string
in string
out []string
}{
{"1.c", "line", []string(nil)},
{"1.c", "line ||", []string(nil)},
{"1.c", "line &&", []string(nil)},
{"1.c", " || line", []string{"ERROR 1.c:1:Operator should be located at line end"}},
{"1.c", " && line", []string{"ERROR 1.c:1:Operator should be located at line end"}},
{"1.java", "line", []string(nil)},
{"1.java", "line ||", []string{"ERROR 1.java:1:Operator should be located at line start"}},
{"1.java", "line &&", []string{"ERROR 1.java:1:Operator should be located at line start"}},
{"1.java", " || line", []string(nil)},
{"1.java", " && line", []string(nil)},
{"1.c", "", []string(nil)},
}
for _, test := range tests {
records := check(test.fileName, test.in)
out := GetRecordsStr(records)
if !reflect.DeepEqual(out, test.out) {
t.Errorf("test fail, filename %#v input:%#v expected:%#v output:%#v",
test.fileName, test.in, test.out, out)
}
}
} | explode_data.jsonl/37640 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 436
} | [
2830,
3393,
3973,
1454,
16429,
8582,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
17661,
675,
914,
198,
197,
17430,
981,
914,
198,
197,
13967,
414,
3056,
917,
198,
197,
59403,
197,
197,
4913,
16,
520,
497,
330,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestConvert(t *testing.T) {
ent := func() *entry.Entry {
e := entry.New()
e.Severity = entry.Error
e.AddResourceKey("type", "global")
e.AddAttribute("one", "two")
e.AddAttribute("two", "three")
e.Body = map[string]interface{}{
"bool": true,
"int": 123,
"double": 12.34,
"string": "hello",
"bytes": []byte("asdf"),
}
return e
}()
pLogs := Convert(ent)
require.Equal(t, 1, pLogs.ResourceLogs().Len())
rls := pLogs.ResourceLogs().At(0)
require.Equal(t, 1, rls.Resource().Attributes().Len())
{
att, ok := rls.Resource().Attributes().Get("type")
if assert.True(t, ok) {
if assert.Equal(t, att.Type(), pdata.AttributeValueTypeString) {
assert.Equal(t, att.StringVal(), "global")
}
}
}
ills := rls.InstrumentationLibraryLogs()
require.Equal(t, 1, ills.Len())
logs := ills.At(0).Logs()
require.Equal(t, 1, logs.Len())
lr := logs.At(0)
assert.Equal(t, pdata.SeverityNumberERROR, lr.SeverityNumber())
assert.Equal(t, "Error", lr.SeverityText())
if atts := lr.Attributes(); assert.Equal(t, 2, atts.Len()) {
m := pdata.NewAttributeMap()
m.InsertString("one", "two")
m.InsertString("two", "three")
assert.EqualValues(t, m.Sort(), atts.Sort())
}
if assert.Equal(t, pdata.AttributeValueTypeMap, lr.Body().Type()) {
m := pdata.NewAttributeMap()
// Don't include a nested object because AttributeValueMap sorting
// doesn't sort recursively.
m.InsertBool("bool", true)
m.InsertInt("int", 123)
m.InsertDouble("double", 12.34)
m.InsertString("string", "hello")
m.InsertString("bytes", "asdf")
assert.EqualValues(t, m.Sort(), lr.Body().MapVal().Sort())
}
} | explode_data.jsonl/61120 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 692
} | [
2830,
3393,
12012,
1155,
353,
8840,
836,
8,
341,
77655,
1669,
2915,
368,
353,
4085,
22330,
341,
197,
7727,
1669,
4343,
7121,
741,
197,
7727,
808,
2054,
487,
284,
4343,
6141,
198,
197,
7727,
1904,
4783,
1592,
445,
1313,
497,
330,
9752,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSetValue(t *testing.T) {
a := assert.New(t)
obj := myStruct{InferredName: "Hello."}
var value interface{}
value = 10
meta := getCachedColumnCollectionFromInstance(obj)
pk := meta.Columns()[0]
a.Nil(pk.SetValue(&obj, value))
a.Equal(10, obj.PrimaryKeyCol)
} | explode_data.jsonl/58396 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 110
} | [
2830,
3393,
48068,
1155,
353,
8840,
836,
8,
341,
11323,
1669,
2060,
7121,
1155,
340,
22671,
1669,
847,
9422,
90,
641,
5554,
675,
25,
330,
9707,
1189,
630,
2405,
897,
3749,
16094,
16309,
284,
220,
16,
15,
198,
84004,
1669,
43099,
3854,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetEntry(t *testing.T) {
ds := fakedatastore.New(t)
test := setupServiceTest(t, ds)
defer test.Cleanup()
// Create fedeated bundles, that we use on "FederatesWith"
createFederatedBundles(t, test.ds)
parent := td.NewID("foo")
entry1SpiffeID := td.NewID("bar")
expiresAt := time.Now().Unix()
goodEntry, err := ds.CreateRegistrationEntry(ctx, &datastore.CreateRegistrationEntryRequest{
Entry: &common.RegistrationEntry{
ParentId: parent.String(),
SpiffeId: entry1SpiffeID.String(),
Ttl: 60,
Selectors: []*common.Selector{
{Type: "unix", Value: "uid:1000"},
{Type: "unix", Value: "gid:1000"},
},
FederatesWith: []string{
federatedTd.IDString(),
},
Admin: true,
EntryExpiry: expiresAt,
DnsNames: []string{"dns1", "dns2"},
Downstream: true,
},
})
require.NoError(t, err)
malformedEntry, err := ds.CreateRegistrationEntry(ctx, &datastore.CreateRegistrationEntryRequest{
Entry: &common.RegistrationEntry{
ParentId: parent.String(),
SpiffeId: "malformed id",
Selectors: []*common.Selector{
{Type: "unix", Value: "uid:1000"},
},
EntryExpiry: expiresAt,
},
})
require.NoError(t, err)
for _, tt := range []struct {
name string
code codes.Code
dsError error
entryID string
err string
expectEntry *types.Entry
expectLogs []spiretest.LogEntry
outputMask *types.EntryMask
}{
{
name: "success",
entryID: goodEntry.Entry.EntryId,
expectEntry: &types.Entry{
Id: goodEntry.Entry.EntryId,
ParentId: api.ProtoFromID(parent),
SpiffeId: api.ProtoFromID(entry1SpiffeID),
},
outputMask: &types.EntryMask{
ParentId: true,
SpiffeId: true,
},
},
{
name: "no outputMask",
entryID: goodEntry.Entry.EntryId,
expectEntry: &types.Entry{
Id: goodEntry.Entry.EntryId,
ParentId: api.ProtoFromID(parent),
SpiffeId: api.ProtoFromID(entry1SpiffeID),
Ttl: 60,
Selectors: []*types.Selector{
{Type: "unix", Value: "uid:1000"},
{Type: "unix", Value: "gid:1000"},
},
FederatesWith: []string{federatedTd.String()},
Admin: true,
DnsNames: []string{"dns1", "dns2"},
Downstream: true,
ExpiresAt: expiresAt,
},
},
{
name: "outputMask all false",
entryID: goodEntry.Entry.EntryId,
expectEntry: &types.Entry{Id: goodEntry.Entry.EntryId},
outputMask: &types.EntryMask{},
},
{
name: "missing ID",
code: codes.InvalidArgument,
err: "missing ID",
expectLogs: []spiretest.LogEntry{
{
Level: logrus.ErrorLevel,
Message: "Invalid argument: missing ID",
},
},
},
{
name: "fetch fails",
code: codes.Internal,
entryID: goodEntry.Entry.EntryId,
err: "failed to fetch entry: ds error",
expectLogs: []spiretest.LogEntry{
{
Level: logrus.ErrorLevel,
Message: "Failed to fetch entry",
Data: logrus.Fields{
telemetry.RegistrationID: goodEntry.Entry.EntryId,
logrus.ErrorKey: "ds error",
},
},
},
dsError: errors.New("ds error"),
},
{
name: "entry not found",
code: codes.NotFound,
entryID: "invalidEntryID",
err: "entry not found",
expectLogs: []spiretest.LogEntry{
{
Level: logrus.ErrorLevel,
Message: "Entry not found",
Data: logrus.Fields{
telemetry.RegistrationID: "invalidEntryID",
},
},
},
},
{
name: "malformed entry",
code: codes.Internal,
entryID: malformedEntry.Entry.EntryId,
err: "failed to convert entry: invalid SPIFFE ID: spiffeid: invalid scheme",
expectLogs: []spiretest.LogEntry{
{
Level: logrus.ErrorLevel,
Message: "Failed to convert entry",
Data: logrus.Fields{
telemetry.RegistrationID: malformedEntry.Entry.EntryId,
logrus.ErrorKey: "invalid SPIFFE ID: spiffeid: invalid scheme",
},
},
},
},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
test.logHook.Reset()
ds.SetNextError(tt.dsError)
resp, err := test.client.GetEntry(ctx, &entryv1.GetEntryRequest{
Id: tt.entryID,
OutputMask: tt.outputMask,
})
spiretest.AssertLogs(t, test.logHook.AllEntries(), tt.expectLogs)
if tt.err != "" {
spiretest.RequireGRPCStatusContains(t, err, tt.code, tt.err)
require.Nil(t, resp)
return
}
require.NoError(t, err)
require.NotNil(t, resp)
spiretest.AssertProtoEqual(t, tt.expectEntry, resp)
})
}
} | explode_data.jsonl/65674 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2129
} | [
2830,
3393,
1949,
5874,
1155,
353,
8840,
836,
8,
341,
83336,
1669,
282,
7741,
459,
4314,
7121,
1155,
340,
18185,
1669,
6505,
1860,
2271,
1155,
11,
11472,
340,
16867,
1273,
727,
60639,
2822,
197,
322,
4230,
282,
15326,
657,
48607,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFull(t *testing.T) {
expectedPath := "/path/to/whatever/*"
expectedRanges := []agerotate.Range{
agerotate.Range{
Age: 3600 * time.Second,
Interval: 0 * time.Second,
},
agerotate.Range{
Age: 21600 * time.Second,
Interval: 2 * time.Hour,
},
agerotate.Range{
Age: 604800 * time.Second,
Interval: 43200 * time.Second,
},
}
in := strings.NewReader(fullInput)
files, ranges, err := Parse(in, ":")
if err != nil {
t.Fatalf("Got unexpected error %q", err)
}
if string(files) != expectedPath {
t.Fatalf("Expected files path %q, got %q", expectedPath, files)
}
if len(ranges) != len(expectedRanges) {
t.Fatalf("Expected %d ranges, got %d", len(expectedRanges), len(ranges))
}
for i := range expectedRanges {
if expectedRanges[i] != ranges[i] {
t.Fatalf("Expected range %d to be %q, got %q", i, expectedRanges[i], ranges[i])
}
}
} | explode_data.jsonl/54254 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 384
} | [
2830,
3393,
9432,
1155,
353,
8840,
836,
8,
341,
42400,
1820,
1669,
3521,
2343,
32429,
14,
68286,
1057,
698,
42400,
74902,
1669,
3056,
1409,
20223,
24783,
515,
197,
197,
1409,
20223,
24783,
515,
298,
197,
16749,
25,
414,
220,
18,
21,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestMsgFilter_SetPrefix(t *testing.T) {
filter, _ := newMsgFilter(context.Background(), &clientRESTMock{})
if filter.prefix != "" {
t.Fatal("expected prefix to be empty")
}
filter.SetPrefix("!")
if filter.prefix != "!" {
t.Errorf("wrong prefix. Got %s, wants %s", filter.prefix, "!")
}
} | explode_data.jsonl/1333 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 115
} | [
2830,
3393,
6611,
5632,
14812,
14335,
1155,
353,
8840,
836,
8,
341,
50108,
11,
716,
1669,
501,
6611,
5632,
5378,
19047,
1507,
609,
2972,
38307,
11571,
37790,
743,
4051,
38543,
961,
1591,
341,
197,
3244,
26133,
445,
7325,
9252,
311,
387,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func Test_SaveKey(t *testing.T) {
t.Log("Expecting SaveKey to properly save the secret key.")
s := NewSecrets()
if s == nil {
t.Error("Test context failure. secret is nil")
return
}
err := s.GenerateKey()
if err != nil {
t.Errorf("Test context failure. key generate error: %s", err)
return
}
var fd *os.File
fd, err = ioutil.TempFile("", "readKey-")
fileName := fd.Name()
fd.Close()
os.Remove(fileName)
// ------------- call the function
err = s.SaveKey(fileName)
defer os.Remove(fileName)
// -------------- testing
if err != nil {
t.Errorf("Expected SaveKey() to have no error. Got '%s'", err)
} else if info, err1 := os.Stat(fileName); err != nil {
t.Errorf("Expected SaveKey() to return nil. Got %s", err1)
} else if int(info.Size()) != len(s.key64) {
}
} | explode_data.jsonl/59958 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 305
} | [
2830,
3393,
78746,
1592,
1155,
353,
8840,
836,
8,
341,
3244,
5247,
445,
17536,
287,
10255,
1592,
311,
10277,
3581,
279,
6234,
1376,
31225,
1903,
1669,
1532,
19773,
82,
741,
743,
274,
621,
2092,
341,
197,
3244,
6141,
445,
2271,
2266,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestPointer(t *testing.T) {
type X struct {
A int16
B string
}
x := X{A: 12345, B: "test"}
if ValueOf(&x).Pointer() != reflect.ValueOf(&x).Pointer() {
t.FailNow()
}
elemPtr := ValueOf(x).Pointer()
a := *(*int16)(unsafe.Pointer(elemPtr))
if a != x.A {
t.FailNow()
}
b := *(*string)(unsafe.Pointer(elemPtr + unsafe.Offsetof(x.B)))
if b != x.B {
t.FailNow()
}
s := []string{""}
if ValueOf(s).Pointer() != reflect.ValueOf(s).Pointer() {
t.FailNow()
}
f := func() bool { return true }
prt := ValueOf(f).Pointer()
f = *(*func() bool)(unsafe.Pointer(&prt))
if !f() {
t.FailNow()
}
t.Log(ValueOf(f).FuncForPC().Name())
prt = ValueOf(t.Name).Pointer()
tName := *(*func() string)(unsafe.Pointer(&prt))
if tName() != "TestPointer" {
t.FailNow()
}
t.Log(ValueOf(t.Name).FuncForPC().Name())
t.Log(ValueOf(s).FuncForPC() == nil)
} | explode_data.jsonl/29655 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 404
} | [
2830,
3393,
9084,
1155,
353,
8840,
836,
8,
341,
13158,
1599,
2036,
341,
197,
22985,
526,
16,
21,
198,
197,
12791,
914,
198,
197,
532,
10225,
1669,
1599,
90,
32,
25,
220,
16,
17,
18,
19,
20,
11,
425,
25,
330,
1944,
16707,
743,
51... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestServeHTTP(t *testing.T) {
pr1 := PullRequest{}
pr1.Commits.Nodes = append(pr1.Commits.Nodes, struct{ Commit Commit }{})
pr1.Commits.Nodes[0].Commit.Status.Contexts = []Context{{
Context: githubql.String("coverage/coveralls"),
Description: githubql.String("Coverage increased (+0.1%) to 27.599%"),
}}
hist, err := history.New(100, nil, "")
if err != nil {
t.Fatalf("Failed to create history client: %v", err)
}
cfg := func() *config.Config { return &config.Config{} }
c := &Controller{
pools: []Pool{
{
MissingPRs: []PullRequest{pr1},
Action: Merge,
},
},
mergeChecker: newMergeChecker(cfg, &fgc{}),
History: hist,
}
s := httptest.NewServer(c)
defer s.Close()
resp, err := http.Get(s.URL)
if err != nil {
t.Errorf("GET error: %v", err)
}
defer resp.Body.Close()
var pools []Pool
if err := json.NewDecoder(resp.Body).Decode(&pools); err != nil {
t.Fatalf("JSON decoding error: %v", err)
}
if !reflect.DeepEqual(c.pools, pools) {
t.Errorf("Received pools %v do not match original pools %v.", pools, c.pools)
}
} | explode_data.jsonl/42786 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 456
} | [
2830,
3393,
60421,
9230,
1155,
353,
8840,
836,
8,
341,
25653,
16,
1669,
31828,
1900,
16094,
25653,
16,
2961,
44703,
52184,
284,
8737,
24974,
16,
2961,
44703,
52184,
11,
2036,
90,
9205,
9205,
335,
37790,
25653,
16,
2961,
44703,
52184,
58... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInvalidCases(t *testing.T) {
Convey("Invalid repo dir", t, func() {
port := test.GetFreePort()
baseURL := test.GetBaseURL(port)
conf := config.New()
conf.HTTP.Port = port
htpasswdPath := test.MakeHtpasswdFileFromString(getCredString(username, passphrase))
defer os.Remove(htpasswdPath)
conf.HTTP.Auth = &config.AuthConfig{
HTPasswd: config.AuthHTPasswd{
Path: htpasswdPath,
},
}
ctlr := api.NewController(conf)
err := os.Mkdir("oci-repo-test", 0o000)
if err != nil {
panic(err)
}
ctlr.Config.Storage.RootDirectory = "oci-repo-test"
go startServer(ctlr)
defer func(ctrl *api.Controller) {
err := ctrl.Server.Shutdown(context.Background())
if err != nil {
panic(err)
}
err = os.RemoveAll(ctrl.Config.Storage.RootDirectory)
if err != nil {
panic(err)
}
}(ctlr)
test.WaitTillServerReady(baseURL)
digest := "sha256:8dd57e171a61368ffcfde38045ddb6ed74a32950c271c1da93eaddfb66a77e78"
name := "zot-c-test"
client := resty.New()
params := make(map[string]string)
params["from"] = "zot-cveid-test"
params["mount"] = digest
postResponse, err := client.R().
SetBasicAuth(username, passphrase).SetQueryParams(params).
Post(fmt.Sprintf("%s/v2/%s/blobs/uploads/", baseURL, name))
So(err, ShouldBeNil)
So(postResponse.StatusCode(), ShouldEqual, http.StatusInternalServerError)
})
} | explode_data.jsonl/77699 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 584
} | [
2830,
3393,
7928,
37302,
1155,
353,
8840,
836,
8,
341,
93070,
5617,
445,
7928,
15867,
5419,
497,
259,
11,
2915,
368,
341,
197,
52257,
1669,
1273,
2234,
10940,
7084,
741,
197,
24195,
3144,
1669,
1273,
2234,
3978,
3144,
21230,
692,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestInfoStream_closedMidstream(t *testing.T) {
assert := assert.New(t)
_, log, cleanup := setup(t)
defer cleanup()
infoCh := make(chan *blog.Info)
wait := parallel.Go(func() {
for range infoCh {
cleanup()
}
})
err := log.InfoStream(context.Background(), infoCh)
wait()
assert.True(blog.IsClosedError(err))
} | explode_data.jsonl/65 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 127
} | [
2830,
3393,
1731,
3027,
44697,
33648,
4027,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
340,
197,
6878,
1487,
11,
21290,
1669,
6505,
1155,
340,
16867,
21290,
741,
27043,
1143,
1669,
1281,
35190,
353,
11659,
20132,
340,
4... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestDequeueIndexEmpty(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := testDB()
_, tx, ok, err := db.DequeueIndex(context.Background())
if err != nil {
t.Fatalf("unexpected error dequeueing index: %s", err)
}
if ok {
_ = tx.Done(nil)
t.Fatalf("unexpected dequeue")
}
} | explode_data.jsonl/11075 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 136
} | [
2830,
3393,
1912,
4584,
1552,
3522,
1155,
353,
8840,
836,
8,
341,
743,
7497,
55958,
368,
341,
197,
3244,
57776,
741,
197,
532,
20939,
8840,
39820,
11646,
2271,
3506,
1155,
340,
20939,
1669,
1273,
3506,
2822,
197,
6878,
9854,
11,
5394,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestCapability_JobCapabilities(t *testing.T) {
assert := assert.New(t)
// TODO We really want to ensure that the *invariant* of having an
// invalid configuration will make all other capabilities fail out,
// regardless of the position in which the invalid setting is present.
//
// Similarly for valid configurations; most importantly, we need
// to ensure that later configurations don't overwrite earlier
// valid configs. Eg. one rendition has a format of MP4, another
// has a format of mpegts, and a third has no format specified;
// the output capability string should have both mpegts and mp4.
// Regardless of ordering of the original inputs.
//
// Use a rapid check to facilitate this.
checkSuccess := func(params *StreamParameters, caps []Capability) bool {
jobCaps, err := JobCapabilities(params)
ret := assert.Nil(err)
expectedCaps := &Capabilities{bitstring: NewCapabilityString(caps)}
ret = assert.Equal(expectedCaps, jobCaps) && ret
return ret
}
// check with everything empty
assert.True(checkSuccess(&StreamParameters{}, []Capability{
Capability_H264,
Capability_AuthToken,
}), "failed with empty params")
// check with everything enabled
profs := []ffmpeg.VideoProfile{
{Format: ffmpeg.FormatMPEGTS},
{Format: ffmpeg.FormatMP4},
{FramerateDen: 1},
{Profile: ffmpeg.ProfileH264Main},
{Profile: ffmpeg.ProfileH264High},
{GOP: 1},
}
detector := DetectionConfig{
Freq: 1,
Profiles: []ffmpeg.DetectorProfile{&ffmpeg.SceneClassificationProfile{}},
}
storageURI := "s3+http://K:P@localhost:9000/bucket"
os, err := drivers.ParseOSURL(storageURI, false)
assert.Nil(err)
params := &StreamParameters{Profiles: profs, OS: os.NewSession(""), Detection: detector}
assert.True(checkSuccess(params, []Capability{
Capability_H264,
Capability_MP4,
Capability_MPEGTS,
Capability_FractionalFramerates,
Capability_StorageS3,
Capability_ProfileH264Main,
Capability_ProfileH264High,
Capability_GOP,
Capability_AuthToken,
Capability_SceneClassification,
}), "failed with everything enabled")
// check fractional framerates
params.Profiles = []ffmpeg.VideoProfile{{FramerateDen: 1}}
params.OS = nil
params.Detection = DetectionConfig{}
assert.True(checkSuccess(params, []Capability{
Capability_H264,
Capability_MPEGTS,
Capability_FractionalFramerates,
Capability_AuthToken,
}), "failed with fractional framerates")
// check error case with format
params.Profiles = []ffmpeg.VideoProfile{{Format: -1}}
_, err = JobCapabilities(params)
assert.Equal(capFormatConv, err)
// check error case with profiles
params.Profiles = []ffmpeg.VideoProfile{{Profile: -1}}
_, err = JobCapabilities(params)
assert.Equal(capProfileConv, err)
// check error case with storage
params.Profiles = nil
params.OS = &stubOS{storageType: -1}
_, err = JobCapabilities(params)
assert.Equal(capStorageConv, err)
} | explode_data.jsonl/74079 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 956
} | [
2830,
3393,
63746,
10598,
674,
55315,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
692,
197,
322,
5343,
1205,
2167,
1366,
311,
5978,
429,
279,
353,
258,
15969,
9,
315,
3432,
458,
198,
197,
322,
8318,
6546,
686,
1281,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestDoltTransactionCommitTwoClients(t *testing.T) {
if types.IsFormat_DOLT_1(types.Format_Default) {
t.Skip()
}
// In this test, we're setting both clients to match transaction commits to dolt commits.
// Autocommit is disabled, as it's the recommended way to use this feature.
harness := newDoltHarness(t)
enginetest.TestTransactionScript(t, harness, enginetest.TransactionTest{
Name: "dolt commit on transaction commit two clients",
SetUpScript: []string{
"CREATE TABLE x (y BIGINT PRIMARY KEY, z BIGINT);",
"INSERT INTO x VALUES (1,1);",
},
Assertions: []enginetest.ScriptTestAssertion{
{
Query: "/* client a */ SET @@autocommit=0;",
Expected: []sql.Row{{}},
},
{
Query: "/* client b */ SET @@autocommit=0;",
Expected: []sql.Row{{}},
},
// start transaction implicitly commits the current transaction, so we have to do so before we turn on dolt commits
{
Query: "/* client a */ START TRANSACTION;",
Expected: []sql.Row{},
},
{
Query: "/* client b */ START TRANSACTION;",
Expected: []sql.Row{},
},
{
Query: "/* client a */ SET @@dolt_transaction_commit=1;",
Expected: []sql.Row{{}},
},
{
Query: "/* client b */ SET @@dolt_transaction_commit=1;",
Expected: []sql.Row{{}},
},
{
Query: "/* client a */ SET @initial_head=@@mydb_head;",
Expected: []sql.Row{{}},
},
{
Query: "/* client b */ SET @initial_head=@@mydb_head;",
Expected: []sql.Row{{}},
},
{
Query: "/* client a */ INSERT INTO x VALUES (2,2);",
Expected: []sql.Row{{sql.NewOkResult(1)}},
},
{
Query: "/* client b */ INSERT INTO x VALUES (3,3);",
Expected: []sql.Row{{sql.NewOkResult(1)}},
},
{
Query: "/* client a */ SELECT * FROM x ORDER BY y;",
Expected: []sql.Row{{1, 1}, {2, 2}},
},
{
Query: "/* client b */ SELECT * FROM x ORDER BY y;",
Expected: []sql.Row{{1, 1}, {3, 3}},
},
{
Query: "/* client a */ SELECT @@mydb_head like @initial_head;",
Expected: []sql.Row{{true}},
},
{
Query: "/* client b */ SELECT @@mydb_head like @initial_head;",
Expected: []sql.Row{{true}},
},
{
Query: "/* client b */ COMMIT;",
Expected: []sql.Row{},
},
{
Query: "/* client a */ SELECT @@mydb_head like @initial_head;",
Expected: []sql.Row{{true}},
},
{
Query: "/* client a */ COMMIT;",
Expected: []sql.Row{},
},
{
Query: "/* client a */ SELECT @@mydb_head like @initial_head;",
Expected: []sql.Row{{false}},
},
{
Query: "/* client b */ SELECT @@mydb_head like @initial_head;",
Expected: []sql.Row{{false}},
},
{
Query: "/* client a */ SELECT * FROM x ORDER BY y;",
Expected: []sql.Row{{1, 1}, {2, 2}, {3, 3}},
},
{
Query: "/* client b */ SELECT * FROM x ORDER BY y;",
Expected: []sql.Row{{1, 1}, {2, 2}, {3, 3}},
},
{
Query: "/* client c */ SELECT * FROM x ORDER BY y;",
Expected: []sql.Row{{1, 1}, {2, 2}, {3, 3}},
},
},
})
db := harness.databases[0].GetDoltDB()
cs, err := doltdb.NewCommitSpec("HEAD")
require.NoError(t, err)
headRefs, err := db.GetHeadRefs(context.Background())
require.NoError(t, err)
commit2, err := db.Resolve(context.Background(), cs, headRefs[0])
require.NoError(t, err)
cm2, err := commit2.GetCommitMeta(context.Background())
require.NoError(t, err)
require.Contains(t, cm2.Description, "Transaction commit")
as, err := doltdb.NewAncestorSpec("~1")
require.NoError(t, err)
commit1, err := commit2.GetAncestor(context.Background(), as)
require.NoError(t, err)
cm1, err := commit1.GetCommitMeta(context.Background())
require.NoError(t, err)
require.Contains(t, cm1.Description, "Transaction commit")
commit0, err := commit1.GetAncestor(context.Background(), as)
require.NoError(t, err)
cm0, err := commit0.GetCommitMeta(context.Background())
require.NoError(t, err)
require.Equal(t, "Initialize data repository", cm0.Description)
} | explode_data.jsonl/5288 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1771
} | [
2830,
3393,
35,
6181,
8070,
33441,
11613,
47174,
1155,
353,
8840,
836,
8,
341,
743,
4494,
4506,
4061,
1557,
35320,
62,
16,
52613,
9978,
60336,
8,
341,
197,
3244,
57776,
741,
197,
630,
197,
322,
758,
419,
1273,
11,
582,
2299,
6243,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestObjectWithScalars(t *testing.T) {
assertParseOneTerm(t, "number", "{\"abc\": 7, \"def\": 8}", ObjectTerm(Item(StringTerm("abc"), IntNumberTerm(7)), Item(StringTerm("def"), IntNumberTerm(8))))
assertParseOneTerm(t, "bool", "{\"abc\": false, \"def\": true}", ObjectTerm(Item(StringTerm("abc"), BooleanTerm(false)), Item(StringTerm("def"), BooleanTerm(true))))
assertParseOneTerm(t, "string", "{\"abc\": \"foo\", \"def\": \"bar\"}", ObjectTerm(Item(StringTerm("abc"), StringTerm("foo")), Item(StringTerm("def"), StringTerm("bar"))))
assertParseOneTerm(t, "mixed", "{\"abc\": 7, \"def\": null}", ObjectTerm(Item(StringTerm("abc"), IntNumberTerm(7)), Item(StringTerm("def"), NullTerm())))
assertParseOneTerm(t, "number key", "{8: 7, \"def\": null}", ObjectTerm(Item(IntNumberTerm(8), IntNumberTerm(7)), Item(StringTerm("def"), NullTerm())))
assertParseOneTerm(t, "number key 2", "{8.5: 7, \"def\": null}", ObjectTerm(Item(FloatNumberTerm(8.5), IntNumberTerm(7)), Item(StringTerm("def"), NullTerm())))
assertParseOneTerm(t, "bool key", "{true: false}", ObjectTerm(Item(BooleanTerm(true), BooleanTerm(false))))
assertParseOneTerm(t, "trailing comma", `{"a": "bar", "b": 64, }`, ObjectTerm(Item(StringTerm("a"), StringTerm("bar")), Item(StringTerm("b"), IntNumberTerm(64))))
assertParseOneTerm(t, "leading comma", `{, "a": "bar", "b": 64 }`, ObjectTerm(Item(StringTerm("a"), StringTerm("bar")), Item(StringTerm("b"), IntNumberTerm(64))))
assertParseOneTerm(t, "leading comma not comprehension", `{, 1 | 1: "bar"}`, ObjectTerm(Item(CallTerm(RefTerm(VarTerm("or")), NumberTerm("1"), NumberTerm("1")), StringTerm("bar"))))
} | explode_data.jsonl/50452 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 557
} | [
2830,
3393,
1190,
2354,
54005,
1155,
353,
8840,
836,
8,
341,
6948,
14463,
3966,
17249,
1155,
11,
330,
4082,
497,
54734,
13683,
11693,
220,
22,
11,
7245,
750,
11693,
220,
23,
9545,
3002,
17249,
29771,
2242,
17249,
445,
13683,
3975,
1333,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPinRecursiveFail(t *testing.T) {
ctx := context.Background()
dstore := dssync.MutexWrap(ds.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
p := NewPinner(dstore, dserv, dserv)
a, _ := randNode()
b, _ := randNode()
err := a.AddNodeLink("child", b)
if err != nil {
t.Fatal(err)
}
// NOTE: This isnt a time based test, we expect the pin to fail
mctx, cancel := context.WithTimeout(ctx, time.Millisecond)
defer cancel()
err = p.Pin(mctx, a, true)
if err == nil {
t.Fatal("should have failed to pin here")
}
err = dserv.Add(ctx, b)
if err != nil {
t.Fatal(err)
}
err = dserv.Add(ctx, a)
if err != nil {
t.Fatal(err)
}
// this one is time based... but shouldnt cause any issues
mctx, cancel = context.WithTimeout(ctx, time.Second)
defer cancel()
err = p.Pin(mctx, a, true)
if err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/31306 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 392
} | [
2830,
3393,
19861,
78542,
19524,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
2266,
19047,
741,
2698,
4314,
1669,
294,
778,
1721,
99014,
26787,
33783,
7121,
2227,
1043,
4314,
2398,
2233,
4314,
1669,
2504,
4314,
7121,
4713,
4314,
1500,
4314,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestX(t *testing.T) {
t.Skip()
var n yaml.Node
yaml.Unmarshal([]byte(`
# map
map: {
# doc
} # line 1
`), &n)
pretty.Print(n)
t.Fail()
} | explode_data.jsonl/12143 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 78
} | [
2830,
3393,
55,
1155,
353,
8840,
836,
8,
341,
3244,
57776,
741,
2405,
308,
32246,
21714,
198,
14522,
9467,
38097,
10556,
3782,
61528,
2,
2415,
198,
2186,
25,
341,
197,
2,
4629,
198,
92,
671,
1555,
220,
16,
271,
63,
701,
609,
77,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func Test_ObjectTracker_Unpopulated_Is_Unsatisfied(t *testing.T) {
g := gomega.NewWithT(t)
ot := newObjTracker(schema.GroupVersionKind{}, nil)
g.Expect(ot.Satisfied()).NotTo(gomega.BeTrue(), "unpopulated tracker should not be satisfied")
} | explode_data.jsonl/52313 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 92
} | [
2830,
3393,
27839,
31133,
40687,
8374,
7757,
31879,
40687,
82,
46265,
1155,
353,
8840,
836,
8,
341,
3174,
1669,
342,
32696,
7121,
2354,
51,
1155,
340,
197,
354,
1669,
74259,
31133,
42735,
5407,
5637,
10629,
22655,
2092,
340,
3174,
81893,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestMapPinning(t *testing.T) {
tmp := testutils.TempBPFFS(t)
c := qt.New(t)
spec := &MapSpec{
Name: "test",
Type: Hash,
KeySize: 4,
ValueSize: 4,
MaxEntries: 1,
Pinning: PinByName,
}
m1, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp})
if err != nil {
t.Fatal("Can't create map:", err)
}
defer m1.Close()
pinned := m1.IsPinned()
c.Assert(pinned, qt.Equals, true)
if err := m1.Put(uint32(0), uint32(42)); err != nil {
t.Fatal("Can't write value:", err)
}
// This is a terrible hack: if loading a pinned map tries to load BTF,
// it will get a nil *btf.Spec from this *btf.Map. This is turn will make
// btf.NewHandle fail.
spec.BTF = new(btf.Map)
m2, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp})
if err != nil {
t.Fatal("Can't create map:", err)
}
defer m2.Close()
var value uint32
if err := m2.Lookup(uint32(0), &value); err != nil {
t.Fatal("Can't read from map:", err)
}
if value != 42 {
t.Fatal("Pinning doesn't use pinned maps")
}
spec.KeySize = 8
m3, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp})
if err == nil {
m3.Close()
t.Fatalf("Opening a pinned map with a mismatching spec did not fail")
}
if !errors.Is(err, ErrMapIncompatible) {
t.Fatalf("Opening a pinned map with a mismatching spec failed with the wrong error")
}
} | explode_data.jsonl/21678 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 563
} | [
2830,
3393,
2227,
47,
19770,
1155,
353,
8840,
836,
8,
341,
20082,
1669,
1273,
6031,
65009,
26095,
1748,
50,
1155,
340,
1444,
1669,
38949,
7121,
1155,
692,
98100,
1669,
609,
2227,
8327,
515,
197,
21297,
25,
981,
330,
1944,
756,
197,
27... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestDuplicateHeader(t *testing.T) {
resp := testGet("/dupeheader", nil)
if len(resp.headers["Server"]) > 1 {
t.Fatalf("Expected only one header, got %#v", resp.headers["Server"])
}
if resp.headers["Server"][0] != "myserver" {
t.Fatalf("Incorrect header, exp 'myserver', got %q", resp.headers["Server"][0])
}
} | explode_data.jsonl/40630 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 145
} | [
2830,
3393,
53979,
4047,
1155,
353,
8840,
836,
8,
341,
262,
9039,
1669,
1273,
1949,
4283,
1054,
375,
2708,
497,
2092,
340,
262,
421,
2422,
20267,
18022,
1183,
5475,
14013,
861,
220,
16,
341,
286,
259,
30762,
445,
18896,
1172,
825,
424... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestReaderDiscard(t *testing.T) {
tests := []struct {
name string
r io.Reader
bufSize int // 0 means 16
peekSize int
n int // input to Discard
want int // from Discard
wantErr error // from Discard
wantBuffered int
}{
{
name: "normal case",
r: strings.NewReader("abcdefghijklmnopqrstuvwxyz"),
peekSize: 16,
n: 6,
want: 6,
wantBuffered: 10,
},
{
name: "discard causing read",
r: strings.NewReader("abcdefghijklmnopqrstuvwxyz"),
n: 6,
want: 6,
wantBuffered: 10,
},
{
name: "discard all without peek",
r: strings.NewReader("abcdefghijklmnopqrstuvwxyz"),
n: 26,
want: 26,
wantBuffered: 0,
},
{
name: "discard more than end",
r: strings.NewReader("abcdefghijklmnopqrstuvwxyz"),
n: 27,
want: 26,
wantErr: io.EOF,
wantBuffered: 0,
},
// Any error from filling shouldn't show up until we
// get past the valid bytes. Here we return we return 5 valid bytes at the same time
// as an error, but test that we don't see the error from Discard.
{
name: "fill error, discard less",
r: newScriptedReader(func(p []byte) (n int, err error) {
if len(p) < 5 {
panic("unexpected small read")
}
return 5, errors.New("5-then-error")
}),
n: 4,
want: 4,
wantErr: nil,
wantBuffered: 1,
},
{
name: "fill error, discard equal",
r: newScriptedReader(func(p []byte) (n int, err error) {
if len(p) < 5 {
panic("unexpected small read")
}
return 5, errors.New("5-then-error")
}),
n: 5,
want: 5,
wantErr: nil,
wantBuffered: 0,
},
{
name: "fill error, discard more",
r: newScriptedReader(func(p []byte) (n int, err error) {
if len(p) < 5 {
panic("unexpected small read")
}
return 5, errors.New("5-then-error")
}),
n: 6,
want: 5,
wantErr: errors.New("5-then-error"),
wantBuffered: 0,
},
// Discard of 0 shouldn't cause a read:
{
name: "discard zero",
r: newScriptedReader(), // will panic on Read
n: 0,
want: 0,
wantErr: nil,
wantBuffered: 0,
},
{
name: "discard negative",
r: newScriptedReader(), // will panic on Read
n: -1,
want: 0,
wantErr: ErrNegativeCount,
wantBuffered: 0,
},
}
for _, tt := range tests {
br := NewReaderSize(tt.r, tt.bufSize)
if tt.peekSize > 0 {
peekBuf, err := br.Peek(tt.peekSize)
if err != nil {
t.Errorf("%s: Peek(%d): %v", tt.name, tt.peekSize, err)
continue
}
if len(peekBuf) != tt.peekSize {
t.Errorf("%s: len(Peek(%d)) = %v; want %v", tt.name, tt.peekSize, len(peekBuf), tt.peekSize)
continue
}
}
discarded, err := br.Discard(tt.n)
if ge, we := fmt.Sprint(err), fmt.Sprint(tt.wantErr); discarded != tt.want || ge != we {
t.Errorf("%s: Discard(%d) = (%v, %v); want (%v, %v)", tt.name, tt.n, discarded, ge, tt.want, we)
continue
}
if bn := br.Buffered(); bn != tt.wantBuffered {
t.Errorf("%s: after Discard, Buffered = %d; want %d", tt.name, bn, tt.wantBuffered)
}
}
} | explode_data.jsonl/2896 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1778
} | [
2830,
3393,
5062,
23477,
567,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
257,
914,
198,
197,
7000,
286,
6399,
47431,
198,
197,
26398,
1695,
220,
526,
442,
220,
15,
3363,
220,
16,
21,
198,
197,
197,
29107... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestApplicationContext_RegisterBeanFn(t *testing.T) {
c, ch := container()
c.Property("room", "Class 3 Grade 1")
// 用接口注册时实际使用的是原始类型
c.Object(Teacher(newHistoryTeacher(""))).Export((*Teacher)(nil))
c.Provide(NewStudent, "", "${room}").Name("st1")
c.Provide(NewPtrStudent, "", "${room}").Name("st2")
c.Provide(NewStudent, "?", "${room:=https://}").Name("st3")
c.Provide(NewPtrStudent, "?", "${room:=4567}").Name("st4")
err := c.Refresh()
assert.Nil(t, err)
p := <-ch
var st1 *Student
err = p.BeanRegistry().Get(&st1, "st1")
assert.Nil(t, err)
fmt.Println(json.ToString(st1))
assert.Equal(t, st1.Room, p.Properties().Get("room"))
var st2 *Student
err = p.BeanRegistry().Get(&st2, "st2")
assert.Nil(t, err)
fmt.Println(json.ToString(st2))
assert.Equal(t, st2.Room, p.Properties().Get("room"))
fmt.Printf("%x\n", reflect.ValueOf(st1).Pointer())
fmt.Printf("%x\n", reflect.ValueOf(st2).Pointer())
var st3 *Student
err = p.BeanRegistry().Get(&st3, "st3")
assert.Nil(t, err)
fmt.Println(json.ToString(st3))
assert.Equal(t, st3.Room, p.Properties().Get("room"))
var st4 *Student
err = p.BeanRegistry().Get(&st4, "st4")
assert.Nil(t, err)
fmt.Println(json.ToString(st4))
assert.Equal(t, st4.Room, p.Properties().Get("room"))
} | explode_data.jsonl/17405 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 583
} | [
2830,
3393,
19736,
73124,
10437,
24911,
1155,
353,
8840,
836,
8,
341,
1444,
11,
521,
1669,
5476,
741,
1444,
15727,
445,
2966,
497,
330,
1957,
220,
18,
23812,
220,
16,
5130,
197,
322,
220,
11622,
107736,
61689,
13343,
99912,
37029,
10014... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInMemory_DeleteExpiredData(t *testing.T) {
db := GetInMemoryDB()
i1 := burner.Inbox{
ID: "1234",
TTL: time.Now().Add(-1 * time.Second).Unix(),
}
i2 := burner.Inbox{
ID: "5678",
TTL: time.Now().Add(1 * time.Hour).Unix(),
}
_ = db.SaveNewInbox(i1)
_ = db.SaveNewInbox(i2)
m1 := burner.Message{
InboxID: "1234",
ID: "1234",
TTL: time.Now().Add(-1 * time.Second).Unix(),
}
m2 := burner.Message{
InboxID: "5678",
ID: "5678",
TTL: time.Now().Add(1 * time.Hour).Unix(),
}
_ = db.SaveNewMessage(m1)
_ = db.SaveNewMessage(m2)
db.DeleteExpiredData()
inboxTests := []struct {
ID string
ExpectedErr error
}{
{
ID: "1234",
ExpectedErr: errInboxDoesntExist,
},
{
ID: "5678",
ExpectedErr: nil,
},
}
for _, test := range inboxTests {
_, err := db.GetInboxByID(test.ID)
if err != test.ExpectedErr {
t.Errorf("TestInMemory_DeleteExpiredData: inbox test failed. Expected error - %v, got %v", test.ExpectedErr, err)
}
}
msgTests := []struct {
ID string
ExpectedErr error
}{
{
ID: "1234",
ExpectedErr: burner.ErrMessageDoesntExist,
},
{
ID: "5678",
ExpectedErr: nil,
},
}
for _, test := range msgTests {
_, err := db.GetMessageByID(test.ID, test.ID)
if err != test.ExpectedErr {
t.Errorf("TestInMemory_DeleteExpiredData: message test failed. Expected error - %v, got %v", test.ExpectedErr, err)
}
}
} | explode_data.jsonl/41284 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 729
} | [
2830,
3393,
641,
10642,
57418,
54349,
1043,
1155,
353,
8840,
836,
8,
341,
20939,
1669,
2126,
641,
10642,
3506,
2822,
8230,
16,
1669,
64719,
5337,
2011,
515,
197,
29580,
25,
220,
330,
16,
17,
18,
19,
756,
197,
10261,
13470,
25,
882,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestWaitingTimeoutProposeOnNewRound(t *testing.T) {
config := configSetup(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cs1, vss := makeState(ctx, t, makeStateArgs{config: config})
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose)
newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound)
pv1, err := cs1.privValidator.GetPubKey(ctx)
require.NoError(t, err)
addr := pv1.Address()
voteCh := subscribeToVoter(ctx, t, cs1, addr)
// start round
startTestRound(ctx, cs1, height, round)
ensureNewRound(t, newRoundCh, height, round)
ensurePrevote(t, voteCh, height, round)
incrementRound(vss[1:]...)
signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4)
round++ // moving to the next round
ensureNewRound(t, newRoundCh, height, round)
rs := cs1.GetRoundState()
assert.True(t, rs.Step == cstypes.RoundStepPropose) // P0 does not prevote before timeoutPropose expires
ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.proposeTimeout(round).Nanoseconds())
ensurePrevoteMatch(t, voteCh, height, round, nil)
} | explode_data.jsonl/54285 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 457
} | [
2830,
3393,
42104,
7636,
2008,
960,
1925,
3564,
27497,
1155,
353,
8840,
836,
8,
341,
25873,
1669,
2193,
21821,
1155,
340,
20985,
11,
9121,
1669,
2266,
26124,
9269,
5378,
19047,
2398,
16867,
9121,
2822,
71899,
16,
11,
348,
778,
1669,
128... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReconcile_PipelineSpecTaskSpec(t *testing.T) {
// TestReconcile_PipelineSpecTaskSpec runs "Reconcile" on a PipelineRun that has an embedded PipelineSpec that has an embedded TaskSpec.
// It verifies that a TaskRun is created, it checks the resulting API actions, status and events.
names.TestingSeed()
prs := []*v1beta1.PipelineRun{{
ObjectMeta: baseObjectMeta("test-pipeline-run-success", "foo"),
Spec: v1beta1.PipelineRunSpec{
PipelineRef: &v1beta1.PipelineRef{
Name: "test-pipeline",
},
},
}}
ps := []*v1beta1.Pipeline{{
ObjectMeta: baseObjectMeta("test-pipeline", "foo"),
Spec: v1beta1.PipelineSpec{
Tasks: []v1beta1.PipelineTask{{
Name: "unit-test-task-spec",
TaskSpec: &v1beta1.EmbeddedTask{
TaskSpec: v1beta1.TaskSpec{
Steps: []v1beta1.Step{{Container: corev1.Container{
Name: "mystep",
Image: "myimage"}}},
},
},
}},
},
}}
d := test.Data{
PipelineRuns: prs,
Pipelines: ps,
}
prt := newPipelineRunTest(d, t)
defer prt.Cancel()
wantEvents := []string{
"Normal Started",
"Normal Running Tasks Completed: 0",
}
reconciledRun, clients := prt.reconcileRun("foo", "test-pipeline-run-success", wantEvents, false)
actions := clients.Pipeline.Actions()
if len(actions) < 2 {
t.Fatalf("Expected client to have at least two action implementation but it has %d", len(actions))
}
// Check that the expected TaskRun was created
actual := getTaskRunCreations(t, actions)[0]
expectedTaskRun := &v1beta1.TaskRun{
ObjectMeta: taskRunObjectMeta("test-pipeline-run-success-unit-test-task-spec-9l9zj", "foo", "test-pipeline-run-success", "test-pipeline", "unit-test-task-spec", false),
Spec: v1beta1.TaskRunSpec{
TaskSpec: &v1beta1.TaskSpec{
Steps: []v1beta1.Step{{
Container: corev1.Container{
Name: "mystep",
Image: "myimage",
},
}},
},
ServiceAccountName: config.DefaultServiceAccountValue,
Resources: &v1beta1.TaskRunResources{},
Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute},
},
}
// ignore IgnoreUnexported ignore both after and before steps fields
if d := cmp.Diff(expectedTaskRun, actual, cmpopts.SortSlices(func(x, y v1beta1.TaskSpec) bool { return len(x.Steps) == len(y.Steps) })); d != "" {
t.Errorf("expected to see TaskRun %v created. Diff %s", expectedTaskRun, diff.PrintWantGot(d))
}
// test taskrun is able to recreate correct pipeline-pvc-name
if expectedTaskRun.GetPipelineRunPVCName() != "test-pipeline-run-success-pvc" {
t.Errorf("expected to see TaskRun PVC name set to %q created but got %s", "test-pipeline-run-success-pvc", expectedTaskRun.GetPipelineRunPVCName())
}
if len(reconciledRun.Status.TaskRuns) != 1 {
t.Errorf("Expected PipelineRun status to include both TaskRun status items that can run immediately: %v", reconciledRun.Status.TaskRuns)
}
if _, exists := reconciledRun.Status.TaskRuns["test-pipeline-run-success-unit-test-task-spec-9l9zj"]; !exists {
t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.TaskRuns)
}
} | explode_data.jsonl/68250 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1241
} | [
2830,
3393,
693,
40446,
457,
1088,
8790,
8327,
6262,
8327,
1155,
353,
8840,
836,
8,
341,
197,
322,
3393,
693,
40446,
457,
1088,
8790,
8327,
6262,
8327,
8473,
330,
693,
40446,
457,
1,
389,
264,
40907,
6727,
429,
702,
458,
22864,
40907,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestUseSecure(t *testing.T) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) })
gcs := &GRPCClientSettings{
Headers: nil,
Endpoint: "",
Compression: "",
TLSSetting: configtls.TLSClientSetting{},
Keepalive: nil,
}
dialOpts, err := gcs.ToDialOptions(componenttest.NewNopHost(), tt.TelemetrySettings)
assert.NoError(t, err)
assert.Len(t, dialOpts, 3)
} | explode_data.jsonl/80329 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 197
} | [
2830,
3393,
10253,
49813,
1155,
353,
8840,
836,
8,
341,
3244,
83,
11,
1848,
1669,
7448,
11736,
1944,
39820,
6639,
35958,
741,
17957,
35699,
1155,
11,
1848,
340,
3244,
727,
60639,
18552,
368,
314,
1373,
35699,
1155,
11,
17853,
10849,
184... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestClient_Handle_NilArgs(t *testing.T) {
rng := test.Prng(t)
backend := &ctest.MockBackend{}
c, err := client.New(wtest.NewRandomAddress(rng), &DummyBus{t}, backend, backend, wtest.RandomWallet())
require.NoError(t, err)
dummyUH := client.UpdateHandlerFunc(func(*channel.State, client.ChannelUpdate, *client.UpdateResponder) {})
assert.Panics(t, func() { c.Handle(nil, dummyUH) })
dummyPH := client.ProposalHandlerFunc(func(client.ChannelProposal, *client.ProposalResponder) {})
assert.Panics(t, func() { c.Handle(dummyPH, nil) })
} | explode_data.jsonl/2342 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 206
} | [
2830,
3393,
2959,
42714,
1604,
321,
4117,
1155,
353,
8840,
836,
8,
341,
7000,
968,
1669,
1273,
17947,
968,
1155,
340,
197,
20942,
1669,
609,
67880,
24664,
29699,
16094,
1444,
11,
1848,
1669,
2943,
7121,
3622,
1944,
7121,
13999,
4286,
87... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestArchiveReaderLZ4SlowCompressed(t *testing.T) {
for _, tt := range []archiveReaderLZ4Case{
{
name: "regular larger data with medium compression",
setup: func(w io.Writer) *lz4.Writer {
lz4w := lz4.NewWriter(w)
lz4w.Apply(lz4.CompressionLevelOption(lz4.Level5))
return lz4w
},
dataStr: randomString(5 * 1024),
},
{
name: "regular larger data with slow compression",
setup: func(w io.Writer) *lz4.Writer {
lz4w := lz4.NewWriter(w)
lz4w.Apply(lz4.CompressionLevelOption(lz4.Level9))
return lz4w
},
dataStr: randomString(5 * 1024),
},
{
name: "legacy larger data with medium compression",
setup: func(w io.Writer) *lz4.Writer {
lz4w := lz4.NewWriter(w)
lz4w.Apply(lz4.LegacyOption(true))
lz4w.Apply(lz4.CompressionLevelOption(lz4.Level5))
return lz4w
},
dataStr: randomString(5 * 1024),
},
{
name: "legacy larger data with slow compression",
setup: func(w io.Writer) *lz4.Writer {
lz4w := lz4.NewWriter(w)
lz4w.Apply(lz4.LegacyOption(true))
lz4w.Apply(lz4.CompressionLevelOption(lz4.Level9))
return lz4w
},
dataStr: randomString(5 * 1024),
},
} {
t.Run(tt.name, func(t *testing.T) {
checkArchiveReaderLZ4(t, tt)
})
}
} | explode_data.jsonl/56416 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 604
} | [
2830,
3393,
42502,
5062,
43,
57,
19,
58289,
1092,
14318,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
17853,
1669,
2088,
3056,
16019,
5062,
43,
57,
19,
4207,
515,
197,
197,
515,
298,
11609,
25,
330,
22308,
8131,
821,
448,
11051,
25111,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLogInfo(t *testing.T) {
// TODO: figure out some way to check the length, add something, and make sure the length increments!
out := runCli(t, "loginfo")
outputContains(t, out, "Verification Successful!")
} | explode_data.jsonl/20315 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 70
} | [
2830,
3393,
2201,
1731,
1155,
353,
8840,
836,
8,
341,
197,
322,
5343,
25,
7071,
700,
1045,
1616,
311,
1779,
279,
3084,
11,
912,
2494,
11,
323,
1281,
2704,
279,
3084,
61600,
4894,
13967,
1669,
1598,
87014,
1155,
11,
330,
839,
2733,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestSchemaParser_SimpleInputObject(t *testing.T) {
body := `
input Hello {
world: String
}`
astDoc := parse(t, body)
expected := &ast.Document{
Loc: testLoc(1, 32),
Definitions: []ast.Node{
&ast.InputObjectDefinition{
Loc: testLoc(1, 32),
Name: &ast.Name{
Value: "Hello",
Loc: testLoc(7, 12),
},
Fields: []*ast.InputValueDefinition{
{
Loc: testLoc(17, 30),
Name: &ast.Name{
Value: "world",
Loc: testLoc(17, 22),
},
Type: &ast.Named{
Loc: testLoc(24, 30),
Name: &ast.Name{
Value: "String",
Loc: testLoc(24, 30),
},
},
DefaultValue: nil,
},
},
},
},
}
if !reflect.DeepEqual(astDoc, expected) {
t.Fatalf("unexpected document, expected: %v, got: %v", expected, astDoc)
}
} | explode_data.jsonl/51232 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 424
} | [
2830,
3393,
8632,
6570,
1098,
6456,
2505,
1190,
1155,
353,
8840,
836,
8,
341,
35402,
1669,
22074,
1355,
21927,
341,
220,
1879,
25,
923,
198,
31257,
88836,
9550,
1669,
4715,
1155,
11,
2487,
340,
42400,
1669,
609,
559,
26256,
515,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestDistributionSummary_Measure(t *testing.T) {
c := getDistributionSummary("measure")
c.Record(100)
c.Record(200)
assertDistributionSummary(t, c, 2, 300, 100*100+200*200, 200)
} | explode_data.jsonl/8500 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 70
} | [
2830,
3393,
62377,
19237,
1245,
68,
3970,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
633,
62377,
19237,
445,
47799,
1138,
1444,
49959,
7,
16,
15,
15,
340,
1444,
49959,
7,
17,
15,
15,
340,
6948,
62377,
19237,
1155,
11,
272,
11,
220,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_fsSource_ReadDown(t *testing.T) {
s := getTestSource(t, "sample-migrations")
down, identifier, err := s.ReadDown(1)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if identifier != "some-text" {
t.Fatalf("expected identifier to be some-text, got: %s", identifier)
}
defer down.Close()
contents, _ := ioutil.ReadAll(down)
if bytes.Compare(contents, []byte("{\"1\": \"down\"}")) != 0 {
t.Fatalf("unexpected contents, got: %s", contents)
}
} | explode_data.jsonl/81889 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 185
} | [
2830,
3393,
34470,
3608,
38381,
4454,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
633,
2271,
3608,
1155,
11,
330,
13611,
1448,
17824,
1138,
2698,
779,
11,
12816,
11,
1848,
1669,
274,
6503,
4454,
7,
16,
340,
743,
1848,
961,
2092,
341,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestMinimal(t *testing.T) {
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha0", false, 1, true, false, nil)
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha1", false, 1, true, false, nil)
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha2", false, 1, true, false, nil)
} | explode_data.jsonl/17482 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 125
} | [
2830,
3393,
88328,
1155,
353,
8840,
836,
8,
341,
56742,
2271,
36136,
1155,
11,
330,
92607,
7724,
905,
497,
330,
92607,
497,
330,
85,
16,
7141,
15,
497,
895,
11,
220,
16,
11,
830,
11,
895,
11,
2092,
340,
56742,
2271,
36136,
1155,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestName(t *testing.T) {
tags := []language.Tag{
language.AmericanEnglish,
language.CanadianFrench,
language.Dutch,
language.French,
language.German,
language.Italian,
language.Japanese,
language.Korean,
language.LatinAmericanSpanish,
language.Russian,
language.Spanish,
language.SimplifiedChinese,
language.TraditionalChinese}
for _, tag := range tags {
name, ok := name[tag]
s := tag.String()
if !ok {
t.Fatalf("name[%s] != true", s)
}
if ok := len(name) > 0; !ok {
t.Skipf("len(name[%s]) == 0", s)
}
}
} | explode_data.jsonl/1548 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 226
} | [
2830,
3393,
675,
1155,
353,
8840,
836,
8,
341,
3244,
2032,
1669,
3056,
11528,
23676,
515,
197,
8810,
2616,
875,
15717,
22574,
345,
197,
8810,
2616,
53280,
10066,
43197,
345,
197,
8810,
2616,
909,
14061,
345,
197,
8810,
2616,
991,
6655,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestJobMarshal(t *testing.T) {
testCases := []struct {
value *Job
expected string
}{
{
value: &Job{
Name: "simple",
},
expected: `---
name: simple
`,
},
{
value: &Job{
Name: "release-name",
Release: &Release{
Name: "some-release",
},
},
expected: `---
name: release-name
release: some-release
`,
},
{
value: &Job{
Name: "templates",
Templates: []*JobTemplate{
&JobTemplate{
SourcePath: "/source",
Content: "<content>",
Job: &Job{Name: "templates"}, // fake a loop
},
},
},
expected: `---
name: templates
templates:
- sourcePath: /source
content: <content>
`,
},
{
value: &Job{
Name: "packages",
Packages: []*Package{
{
Fingerprint: "abc",
},
},
},
expected: `---
name: packages
packages:
- abc # only list the fingerprint, not the whole object
`,
},
{
value: &Job{
Name: "filled-out",
Description: "a filled-out job",
Path: "/path/to/thing",
Fingerprint: "abc123",
SHA1: "def456",
Properties: []*JobProperty{
&JobProperty{
Name: "property",
Description: "some job property",
Default: 1,
Job: &Job{Name: "filled-out"}, // fake a loop
},
},
Version: "v123",
},
expected: `---
name: filled-out
description: a filled-out job
path: /path/to/thing
fingerprint: abc123
sha1: def456
properties:
- name: property
description: some job property
default: 1
job: filled-out
version: v123
`,
},
}
for _, testCase := range testCases {
t.Run(testCase.value.Name, func(t *testing.T) {
assert := assert.New(t)
adapter := util.NewMarshalAdapter(testCase.value)
actual, err := yaml.Marshal(adapter)
if !assert.NoError(err) {
return
}
var unmarshalled, expected interface{}
if !assert.NoError(yaml.Unmarshal(actual, &unmarshalled), "Error unmarshalling result") {
return
}
expectedBytes := []byte(strings.Replace(testCase.expected, "\t", " ", -1))
if !assert.NoError(yaml.Unmarshal(expectedBytes, &expected), "Error in expected input") {
return
}
testhelpers.IsYAMLSubset(assert, expected, unmarshalled)
})
}
} | explode_data.jsonl/80863 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1126
} | [
2830,
3393,
12245,
55438,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
16309,
262,
353,
12245,
198,
197,
42400,
914,
198,
197,
59403,
197,
197,
515,
298,
16309,
25,
609,
12245,
515,
571,
21297,
25,
330,
2294... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestParsePointMissingQuote(t *testing.T) {
expectedSuffix := "unbalanced quotes"
examples := []string{
`cpu,host=serverA value="test`,
`cpu,host=serverA value="test""`,
}
for i, example := range examples {
_, err := models.ParsePointsString(example)
if err == nil {
t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example)
} else if !strings.HasSuffix(err.Error(), expectedSuffix) {
t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix)
}
}
} | explode_data.jsonl/16900 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 212
} | [
2830,
3393,
14463,
2609,
25080,
19466,
1155,
353,
8840,
836,
8,
341,
42400,
40177,
1669,
330,
359,
58402,
17194,
698,
8122,
4023,
1669,
3056,
917,
515,
197,
197,
63,
16475,
11,
3790,
28,
4030,
32,
897,
428,
1944,
12892,
197,
197,
63,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestListDocks(t *testing.T) {
m := map[string][]string{
"offset": {"0"},
"limit": {"732"},
"sortDir": {"desc"},
"sortKey": {"id"},
"Name": {"sample"},
"DriverName": {"sample"},
}
dcks, err := fc.ListDocksWithFilter(c.NewAdminContext(), m)
if err != nil {
t.Error("List docks failed:", err)
}
var expected []*model.DockSpec
expected = append(expected, &SampleDocks[0])
if !reflect.DeepEqual(dcks, expected) {
t.Errorf("Expected %+v, got %+v\n", expected, dcks)
}
} | explode_data.jsonl/50708 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 227
} | [
2830,
3393,
852,
35,
25183,
1155,
353,
8840,
836,
8,
341,
2109,
1669,
2415,
14032,
45725,
917,
515,
197,
197,
1,
3176,
788,
257,
5212,
15,
7115,
197,
197,
1,
9506,
788,
414,
5212,
22,
18,
17,
7115,
197,
197,
1,
6860,
6184,
788,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestRepeat(t *testing.T) {
for _, tc := range []struct {
name string
v interface{}
sz int
}{
{
name: "Int",
v: int64(4),
sz: 192, // 128 bytes (ints), 64 bytes (nulls)
},
{
name: "Uint",
v: uint64(4),
sz: 192, // 128 bytes (ints), 64 bytes (nulls)
},
{
name: "Float",
v: float64(4),
sz: 192, // 128 bytes (ints), 64 bytes (nulls)
},
{
name: "String",
v: "a",
sz: 0, // optimized away
},
{
name: "Boolean",
v: true,
sz: 128, // 64 bytes (bools), 64 bytes (nulls)
},
} {
t.Run(tc.name, func(t *testing.T) {
mem := memory.NewCheckedAllocator(memory.DefaultAllocator)
defer mem.AssertSize(t, 0)
arr := array.Repeat(tc.v, 10, mem)
mem.AssertSize(t, tc.sz)
arr.Release()
})
}
} | explode_data.jsonl/64589 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 421
} | [
2830,
3393,
38718,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
17130,
1669,
2088,
3056,
1235,
341,
197,
11609,
914,
198,
197,
5195,
262,
3749,
16094,
197,
1903,
89,
256,
526,
198,
197,
59403,
197,
197,
515,
298,
11609,
25,
330,
1072,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestConfigUtil_Visit(t *testing.T) {
t.Parallel()
var trail []string
visitor := func(path string) error {
trail = append(trail, path)
return nil
}
basePath := "../../test/command/merge"
if err := Visit(basePath, visitor); err != nil {
t.Fatalf("err: %v", err)
}
if err := Visit(path.Join(basePath, "subdir", "c.json"), visitor); err != nil {
t.Fatalf("err: %v", err)
}
expected := []string{
path.Join(basePath, "a.json"),
path.Join(basePath, "b.json"),
path.Join(basePath, "nope"),
path.Join(basePath, "zero.json"),
path.Join(basePath, "subdir", "c.json"),
}
if !reflect.DeepEqual(trail, expected) {
t.Fatalf("bad: %#v", trail)
}
} | explode_data.jsonl/49064 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 282
} | [
2830,
3393,
2648,
2742,
2334,
285,
275,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
2405,
8849,
3056,
917,
198,
197,
39985,
1669,
2915,
5581,
914,
8,
1465,
341,
197,
25583,
604,
284,
8737,
7624,
604,
11,
1815,
340,
197,
85... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReceive(t *testing.T) {
if testing.Short() {
t.Skip("Skip on short mode")
}
// Producer
cfg := gonsq.NewConfig()
prod, err := gonsq.NewProducer(*flagAddr, cfg)
if err != nil {
t.Fatal("err:", err)
}
defer prod.Stop()
testMessage := fmt.Sprintf("Hello World [%s]", time.Now().Format(time.RFC3339Nano))
err = prod.Publish(*flagTopic, []byte(testMessage))
if err != nil {
t.Fatal("err:", err)
}
// Receiver
recv, err := nsq.NewReceiver(*flagTopic, *flagChannel, *flagLookupdAddr, cfg)
if err != nil {
t.Fatal("err:", err)
}
msgCh := make(chan string)
errCh := make(chan error, 1)
go func() {
err := recv.Listen(mq.HandlerFunc(func(msg mq.Message) {
if ackErr := msg.Ack(); ackErr != nil {
t.Error("err:", ackErr)
}
msgCh <- string(msg.Body())
}))
if err != nil {
errCh <- err
}
close(errCh)
}()
defer func() {
if stopErr := recv.Stop(); stopErr != nil {
t.Error("err:", stopErr)
}
}()
expectMessage(t, msgCh, testMessage, errCh, 5*time.Second)
} | explode_data.jsonl/77805 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 456
} | [
2830,
3393,
14742,
1155,
353,
8840,
836,
8,
341,
743,
7497,
55958,
368,
341,
197,
3244,
57776,
445,
35134,
389,
2805,
3856,
1138,
197,
630,
197,
322,
43359,
198,
50286,
1669,
342,
2382,
80,
7121,
2648,
741,
197,
19748,
11,
1848,
1669,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestJoinsForSlice(t *testing.T) {
users := []User{
*GetUser("slice-joins-1", Config{Company: true, Manager: true, Account: true}),
*GetUser("slice-joins-2", Config{Company: true, Manager: true, Account: true}),
*GetUser("slice-joins-3", Config{Company: true, Manager: true, Account: true}),
}
DB.Create(&users)
var userIDs []uint
for _, user := range users {
userIDs = append(userIDs, user.ID)
}
var users2 []User
if err := DB.Joins("Company").Joins("Manager").Joins("Account").Find(&users2, "users.id IN ?", userIDs).Error; err != nil {
t.Fatalf("Failed to load with joins, got error: %v", err)
} else if len(users2) != len(users) {
t.Fatalf("Failed to load join users, got: %v, expect: %v", len(users2), len(users))
}
sort.Slice(users2, func(i, j int) bool {
return users2[i].ID > users2[j].ID
})
sort.Slice(users, func(i, j int) bool {
return users[i].ID > users[j].ID
})
for idx, user := range users {
CheckUser(t, user, users2[idx])
}
} | explode_data.jsonl/20074 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 388
} | [
2830,
3393,
22493,
1330,
2461,
33236,
1155,
353,
8840,
836,
8,
341,
90896,
1669,
3056,
1474,
515,
197,
197,
9,
1949,
1474,
445,
24963,
12,
7305,
1330,
12,
16,
497,
5532,
90,
14491,
25,
830,
11,
10567,
25,
830,
11,
8615,
25,
830,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestScopesRequestsOptimized(t *testing.T) {
runTestBuildFlags(t, "testvariables", func(client *daptest.Client, fixture protest.Fixture) {
runDebugSessionWithBPs(t, client, "launch",
// Launch
func() {
client.LaunchRequestWithArgs(map[string]interface{}{
"mode": "exec", "program": fixture.Path, "showGlobalVariables": true,
})
},
// Breakpoints are set within the program
fixture.Source, []int{},
[]onBreakpoint{{
// Stop at first breakpoint
execute: func() {
client.StackTraceRequest(1, 0, 20)
stack := client.ExpectStackTraceResponse(t)
startLineno := 66
if runtime.GOOS == "windows" && goversion.VersionAfterOrEqual(runtime.Version(), 1, 15) {
// Go1.15 on windows inserts a NOP after the call to
// runtime.Breakpoint and marks it same line as the
// runtime.Breakpoint call, making this flaky, so skip the line check.
startLineno = -1
}
checkStackFrames(t, stack, "main.foobar", startLineno, 1000, 4, 4)
client.ScopesRequest(1000)
scopes := client.ExpectScopesResponse(t)
checkScope(t, scopes, 0, "Arguments (warning: optimized function)", 1000)
checkScope(t, scopes, 1, "Locals (warning: optimized function)", 1001)
checkScope(t, scopes, 2, "Globals (package main)", 1002)
},
disconnect: false,
}, {
// Stop at second breakpoint
execute: func() {
// Frame ids get reset at each breakpoint.
client.StackTraceRequest(1, 0, 20)
stack := client.ExpectStackTraceResponse(t)
checkStackFrames(t, stack, "main.barfoo", 27, 1000, 5, 5)
client.ScopesRequest(1000)
scopes := client.ExpectScopesResponse(t)
checkScope(t, scopes, 0, "Arguments (warning: optimized function)", 1000)
checkScope(t, scopes, 1, "Locals (warning: optimized function)", 1001)
checkScope(t, scopes, 2, "Globals (package main)", 1002)
},
disconnect: false,
}})
},
protest.EnableOptimization)
} | explode_data.jsonl/17318 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 783
} | [
2830,
3393,
3326,
18523,
35295,
21367,
45706,
1155,
353,
8840,
836,
8,
341,
56742,
2271,
11066,
9195,
1155,
11,
330,
1944,
18616,
497,
2915,
12805,
353,
91294,
1944,
11716,
11,
12507,
8665,
991,
12735,
8,
341,
197,
56742,
7939,
5283,
23... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDisplayTaskUpdates(t *testing.T) {
require.NoError(t, db.ClearCollections(task.Collection, event.AllLogCollection), "error clearing collection")
assert := assert.New(t)
dt := task.Task{
Id: "dt",
DisplayOnly: true,
Status: evergreen.TaskUndispatched,
Activated: false,
ExecutionTasks: []string{
"task1",
"task2",
"task3",
"task4",
},
}
assert.NoError(dt.Insert())
dt2 := task.Task{
Id: "dt2",
DisplayOnly: true,
Status: evergreen.TaskUndispatched,
Activated: false,
ExecutionTasks: []string{
"task5",
"task6",
},
}
assert.NoError(dt2.Insert())
task1 := task.Task{
Id: "task1",
Status: evergreen.TaskFailed,
Details: apimodels.TaskEndDetail{
Status: evergreen.TaskFailed,
TimedOut: true,
},
TimeTaken: 3 * time.Minute,
StartTime: time.Date(2000, 0, 0, 1, 1, 1, 0, time.Local),
FinishTime: time.Date(2000, 0, 0, 1, 9, 1, 0, time.Local),
}
assert.NoError(task1.Insert())
task2 := task.Task{
Id: "task2",
Status: evergreen.TaskSucceeded,
TimeTaken: 2 * time.Minute,
StartTime: time.Date(2000, 0, 0, 0, 30, 0, 0, time.Local), // this should end up as the start time for dt1
FinishTime: time.Date(2000, 0, 0, 1, 0, 5, 0, time.Local),
}
assert.NoError(task2.Insert())
task3 := task.Task{
Id: "task3",
Activated: true,
Status: evergreen.TaskSystemUnresponse,
TimeTaken: 5 * time.Minute,
StartTime: time.Date(2000, 0, 0, 0, 44, 0, 0, time.Local),
FinishTime: time.Date(2000, 0, 0, 1, 0, 1, 0, time.Local),
}
assert.NoError(task3.Insert())
task4 := task.Task{
Id: "task4",
Activated: true,
Status: evergreen.TaskSystemUnresponse,
TimeTaken: 1 * time.Minute,
StartTime: time.Date(2000, 0, 0, 1, 0, 20, 0, time.Local),
FinishTime: time.Date(2000, 0, 0, 1, 22, 0, 0, time.Local), // this should end up as the end time for dt1
}
assert.NoError(task4.Insert())
task5 := task.Task{
Id: "task5",
Activated: true,
Status: evergreen.TaskUndispatched,
}
assert.NoError(task5.Insert())
task6 := task.Task{
Id: "task6",
Activated: true,
Status: evergreen.TaskSucceeded,
}
assert.NoError(task6.Insert())
// test that updating the status + activated from execution tasks works
assert.NoError(UpdateDisplayTask(&dt))
dbTask, err := task.FindOne(task.ById(dt.Id))
assert.NoError(err)
assert.NotNil(dbTask)
assert.Equal(evergreen.TaskFailed, dbTask.Status)
assert.True(dbTask.Details.TimedOut)
assert.True(dbTask.Activated)
assert.Equal(11*time.Minute, dbTask.TimeTaken)
assert.Equal(task2.StartTime, dbTask.StartTime)
assert.Equal(task4.FinishTime, dbTask.FinishTime)
// test that you can't update an execution task
assert.Error(UpdateDisplayTask(&task1))
// test that a display task with a finished + unstarted task is "scheduled"
assert.NoError(UpdateDisplayTask(&dt2))
dbTask, err = task.FindOne(task.ById(dt2.Id))
assert.NoError(err)
assert.NotNil(dbTask)
assert.Equal(evergreen.TaskStarted, dbTask.Status)
// check that the updates above logged an event for the first one
events, err := event.Find(event.AllLogCollection, event.TaskEventsForId(dt.Id))
assert.NoError(err)
assert.Len(events, 1)
events, err = event.Find(event.AllLogCollection, event.TaskEventsForId(dt2.Id))
assert.NoError(err)
assert.Len(events, 0)
} | explode_data.jsonl/60447 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1419
} | [
2830,
3393,
7020,
6262,
37091,
1155,
353,
8840,
836,
8,
341,
17957,
35699,
1155,
11,
2927,
13524,
52730,
17483,
28629,
11,
1538,
16764,
2201,
6482,
701,
330,
841,
32750,
4426,
1138,
6948,
1669,
2060,
7121,
1155,
340,
97980,
1669,
3383,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestQueryGetBlockByNumber(t *testing.T) {
chainid := "mytestchainid3"
path := tempDir(t, "test3")
defer os.RemoveAll(path)
stub, err := setupTestLedger(chainid, path)
if err != nil {
t.Fatalf(err.Error())
}
// block number 0 (genesis block) would already be present in the ledger
args := [][]byte{[]byte(GetBlockByNumber), []byte(chainid), []byte("0")}
prop := resetProvider(resources.Qscc_GetBlockByNumber, chainid, &peer2.SignedProposal{}, nil)
res := stub.MockInvokeWithSignedProposal("1", args, prop)
assert.Equal(t, int32(shim.OK), res.Status, "GetBlockByNumber should have succeeded for block number: 0")
// block number 1 should not be present in the ledger
args = [][]byte{[]byte(GetBlockByNumber), []byte(chainid), []byte("1")}
res = stub.MockInvoke("2", args)
assert.Equal(t, int32(shim.ERROR), res.Status, "GetBlockByNumber should have failed with invalid number: 1")
// block number cannot be nil
args = [][]byte{[]byte(GetBlockByNumber), []byte(chainid), []byte(nil)}
res = stub.MockInvoke("3", args)
assert.Equal(t, int32(shim.ERROR), res.Status, "GetBlockByNumber should have failed with nil block number")
} | explode_data.jsonl/18830 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 406
} | [
2830,
3393,
2859,
1949,
4713,
1359,
2833,
1155,
353,
8840,
836,
8,
341,
197,
8819,
307,
1669,
330,
2408,
1944,
8819,
307,
18,
698,
26781,
1669,
2730,
6184,
1155,
11,
330,
1944,
18,
1138,
16867,
2643,
84427,
5581,
692,
18388,
392,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestPublishWithoutEventTime(t *testing.T) {
payload := buildDefaultTestPayloadWithoutEventTime()
body, statusCode := performPublishRequest(t, publishServer.URL, payload)
assertExpectedError(t, body, statusCode, http.StatusBadRequest, api.FieldEventTime, api.ErrorTypeValidationViolation)
} | explode_data.jsonl/74414 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 87
} | [
2830,
3393,
50145,
26040,
1556,
1462,
1155,
353,
8840,
836,
8,
341,
76272,
1669,
1936,
3675,
2271,
29683,
26040,
1556,
1462,
741,
35402,
11,
35532,
1669,
2736,
50145,
1900,
1155,
11,
3415,
5475,
20893,
11,
7729,
340,
6948,
18896,
1454,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestNewV1EnvsnapResult(t *testing.T) {
v1 := NewV1EnvsnapResult()
assert.Equal(t, os.Stdout, v1.out)
assert.Nil(t, v1.Environment)
assert.Nil(t, v1.Exec)
assert.Nil(t, v1.Golang)
assert.Nil(t, v1.Python)
assert.Nil(t, v1.System)
} | explode_data.jsonl/62953 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 126
} | [
2830,
3393,
3564,
53,
16,
1702,
11562,
6861,
2077,
1155,
353,
8840,
836,
8,
341,
5195,
16,
1669,
1532,
53,
16,
1702,
11562,
6861,
2077,
741,
6948,
12808,
1155,
11,
2643,
83225,
11,
348,
16,
2532,
340,
6948,
59678,
1155,
11,
348,
16,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetConfigTxFailure(t *testing.T) {
rl := NewRAMLedger(10)
for i := 0; i < 10; i++ {
rl.Append(blockledger.CreateNextBlock(rl, []*cb.Envelope{
makeNormalTx(genesisconfig.TestChainID, i),
makeConfigTx(genesisconfig.TestChainID, i),
}))
}
rl.Append(blockledger.CreateNextBlock(rl, []*cb.Envelope{makeNormalTx(genesisconfig.TestChainID, 11)}))
assert.Panics(t, func() { getConfigTx(rl) }, "Should have panicked because there was no config tx")
block := blockledger.CreateNextBlock(rl, []*cb.Envelope{makeNormalTx(genesisconfig.TestChainID, 12)})
block.Metadata.Metadata[cb.BlockMetadataIndex_LAST_CONFIG] = []byte("bad metadata")
assert.Panics(t, func() { getConfigTx(rl) }, "Should have panicked because of bad last config metadata")
} | explode_data.jsonl/71396 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 278
} | [
2830,
3393,
1949,
2648,
51,
9770,
9373,
1155,
353,
8840,
836,
8,
341,
197,
2381,
1669,
1532,
49,
31102,
291,
1389,
7,
16,
15,
340,
2023,
600,
1669,
220,
15,
26,
600,
366,
220,
16,
15,
26,
600,
1027,
341,
197,
197,
2381,
8982,
18... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCT_XmlCellPrMarshalUnmarshal(t *testing.T) {
v := sml.NewCT_XmlCellPr()
buf, _ := xml.Marshal(v)
v2 := sml.NewCT_XmlCellPr()
xml.Unmarshal(buf, v2)
} | explode_data.jsonl/27940 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 82
} | [
2830,
3393,
1162,
6859,
1014,
3599,
3533,
55438,
1806,
27121,
1155,
353,
8840,
836,
8,
341,
5195,
1669,
274,
1014,
7121,
1162,
6859,
1014,
3599,
3533,
741,
26398,
11,
716,
1669,
8396,
37271,
3747,
340,
5195,
17,
1669,
274,
1014,
7121,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestUnparallelHashAggClose(t *testing.T) {
store, clean := testkit.CreateMockStore(t)
defer clean()
tk := testkit.NewTestKit(t, store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t values(1,1),(2,2)")
// Goroutine should not leak when error happen.
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/unparallelHashAggError", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/unparallelHashAggError"))
}()
ctx := context.Background()
rss, err := tk.Session().Execute(ctx, "select sum(distinct a) from (select cast(t.a as signed) as a, b from t) t group by b;")
require.NoError(t, err)
rs := rss[0]
req := rs.NewChunk(nil)
err = rs.Next(ctx, req)
require.EqualError(t, err, "HashAggExec.unparallelExec error")
} | explode_data.jsonl/38139 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 346
} | [
2830,
3393,
1806,
46103,
6370,
9042,
70,
7925,
1155,
353,
8840,
836,
8,
341,
57279,
11,
4240,
1669,
1273,
8226,
7251,
11571,
6093,
1155,
340,
16867,
4240,
741,
3244,
74,
1669,
1273,
8226,
7121,
2271,
7695,
1155,
11,
3553,
340,
3244,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNewControllerService(t *testing.T) {
var (
cloudObj cloud.Cloud
testErr = errors.New("test error")
testRegion = "test-region"
getNewCloudFunc = func(expectedRegion string) func(region string) (cloud.Cloud, error) {
return func(region string) (cloud.Cloud, error) {
if region != expectedRegion {
t.Fatalf("expected region %q but got %q", expectedRegion, region)
}
return cloudObj, nil
}
}
)
testCases := []struct {
name string
region string
newCloudFunc func(string) (cloud.Cloud, error)
newMetadataFuncErrors bool
expectPanic bool
}{
{
name: "AWS_REGION variable set, newCloud does not error",
region: "foo",
newCloudFunc: getNewCloudFunc("foo"),
},
{
name: "AWS_REGION variable set, newCloud errors",
region: "foo",
newCloudFunc: func(region string) (cloud.Cloud, error) {
return nil, testErr
},
expectPanic: true,
},
{
name: "AWS_REGION variable not set, newMetadata does not error",
newCloudFunc: getNewCloudFunc(testRegion),
},
{
name: "AWS_REGION variable not set, newMetadata errors",
newCloudFunc: getNewCloudFunc(testRegion),
newMetadataFuncErrors: true,
expectPanic: true,
},
}
driverOptions := &DriverOptions{
endpoint: "test",
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
oldNewCloudFunc := NewCloudFunc
defer func() { NewCloudFunc = oldNewCloudFunc }()
NewCloudFunc = tc.newCloudFunc
if tc.region == "" {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadataService := mocks.NewMockMetadataService(mockCtl)
oldNewMetadataFunc := NewMetadataFunc
defer func() { NewMetadataFunc = oldNewMetadataFunc }()
NewMetadataFunc = func() (cloud.MetadataService, error) {
if tc.newMetadataFuncErrors {
return nil, testErr
}
return mockMetadataService, nil
}
if !tc.newMetadataFuncErrors {
mockMetadataService.EXPECT().GetRegion().Return(testRegion)
}
} else {
os.Setenv("AWS_REGION", tc.region)
defer os.Unsetenv("AWS_REGION")
}
if tc.expectPanic {
defer func() {
if r := recover(); r == nil {
t.Errorf("The code did not panic")
}
}()
}
controllerService := newControllerService(driverOptions)
if controllerService.cloud != cloudObj {
t.Fatalf("expected cloud attribute to be equal to instantiated cloud object")
}
if !reflect.DeepEqual(controllerService.driverOptions, driverOptions) {
t.Fatalf("expected driverOptions attribute to be equal to input")
}
})
}
} | explode_data.jsonl/61516 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1172
} | [
2830,
3393,
3564,
2051,
1860,
1155,
353,
8840,
836,
8,
1476,
2405,
2399,
197,
197,
12361,
5261,
256,
9437,
94492,
198,
197,
18185,
7747,
262,
284,
5975,
7121,
445,
1944,
1465,
1138,
197,
18185,
14091,
284,
330,
1944,
60679,
1837,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestChasingCadence_ListAll(t *testing.T) {
key := "test api key"
var mockListResponse [1]invoiced.ChasingCadence
mockResponse := new(invoiced.ChasingCadence)
mockResponse.Id = int64(123)
mockResponse.Name = "standard"
mockResponse.CreatedAt = time.Now().UnixNano()
mockListResponse[0] = *mockResponse
server, err := invdmockserver.New(200, mockListResponse, "json", true)
if err != nil {
t.Fatal(err)
}
defer server.Close()
client := Client{invoiced.NewMockApi(key, server)}
filter := invoiced.NewFilter()
sorter := invoiced.NewSort()
result, err := client.ListAll(filter, sorter)
if err != nil {
t.Fatal("Error Creating entity", err)
}
if !reflect.DeepEqual(result[0], mockResponse) {
t.Fatal("Error messages do not match up")
}
} | explode_data.jsonl/71150 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 291
} | [
2830,
3393,
1143,
4422,
93338,
763,
27104,
2403,
1155,
353,
8840,
836,
8,
341,
23634,
1669,
330,
1944,
6330,
1376,
1837,
2405,
7860,
852,
2582,
508,
16,
60,
258,
3334,
7572,
6353,
4422,
93338,
763,
271,
77333,
2582,
1669,
501,
5900,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestSetup(t *testing.T) {
msp, err := setup("testdata/idemix/MSP1OU1", "MSP1OU1")
assert.NoError(t, err)
assert.Equal(t, msp2.IDEMIX, msp.GetType())
} | explode_data.jsonl/46028 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 75
} | [
2830,
3393,
21821,
1155,
353,
8840,
836,
8,
341,
47691,
79,
11,
1848,
1669,
6505,
445,
92425,
38146,
336,
941,
10270,
4592,
16,
11922,
16,
497,
330,
44,
4592,
16,
11922,
16,
1138,
6948,
35699,
1155,
11,
1848,
692,
6948,
12808,
1155,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestServiceCreateMaxMinScale(t *testing.T) {
action, created, _, err := fakeServiceCreate([]string{
"service", "create", "foo", "--image", "gcr.io/foo/bar:baz",
"--scale-min", "1", "--scale-max", "5",
"--concurrency-target", "10", "--concurrency-limit", "100",
"--concurrency-utilization", "50",
"--no-wait"}, false)
if err != nil {
t.Fatal(err)
} else if !action.Matches("create", "services") {
t.Fatalf("Bad action %v", action)
}
template := &created.Spec.Template
actualAnnos := template.Annotations
expectedAnnos := []string{
"autoscaling.knative.dev/minScale", "1",
"autoscaling.knative.dev/maxScale", "5",
"autoscaling.knative.dev/target", "10",
"autoscaling.knative.dev/targetUtilizationPercentage", "50",
}
for i := 0; i < len(expectedAnnos); i += 2 {
anno := expectedAnnos[i]
if actualAnnos[anno] != expectedAnnos[i+1] {
t.Fatalf("Unexpected annotation value for %s : %s (actual) != %s (expected)",
anno, actualAnnos[anno], expectedAnnos[i+1])
}
}
if *template.Spec.ContainerConcurrency != int64(100) {
t.Fatalf("container concurrency not set to given value 100")
}
} | explode_data.jsonl/42449 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 444
} | [
2830,
3393,
1860,
4021,
5974,
6217,
6947,
1155,
353,
8840,
836,
8,
341,
38933,
11,
3465,
11,
8358,
1848,
1669,
12418,
1860,
4021,
10556,
917,
515,
197,
197,
1,
7936,
497,
330,
3182,
497,
330,
7975,
497,
14482,
1805,
497,
330,
70,
50... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestAutoSaveBelongsToAssociation(t *testing.T) {
type Company struct {
orm.Model
Name string
}
type User struct {
orm.Model
Name string
CompanyID uint
Company Company `orm:"association_autoupdate:false;association_autocreate:false;"`
}
DB.Where("name = ?", "auto_save_association").Delete(&Company{})
DB.AutoMigrate(&Company{}, &User{})
DB.Save(&User{Name: "jinzhu", Company: Company{Name: "auto_save_association"}})
if !DB.Where("name = ?", "auto_save_association").First(&Company{}).RecordNotFound() {
t.Errorf("Company auto_save_association should not have been saved when autosave is false")
}
// if foreign key is set, this should be saved even if association isn't
company := Company{Name: "auto_save_association"}
DB.Save(&company)
company.Name = "auto_save_association_new_name"
user := User{Name: "jinzhu", Company: company}
DB.Save(&user)
if !DB.Where("name = ?", "auto_save_association_new_name").First(&Company{}).RecordNotFound() {
t.Errorf("Company should not have been updated")
}
if DB.Where("id = ? AND company_id = ?", user.ID, company.ID).First(&User{}).RecordNotFound() {
t.Errorf("User's foreign key should have been saved")
}
user2 := User{Name: "jinzhu_2", Company: Company{Name: "auto_save_association_2"}}
DB.Set("orm:association_autocreate", true).Save(&user2)
if DB.Where("name = ?", "auto_save_association_2").First(&Company{}).RecordNotFound() {
t.Errorf("Company auto_save_association_2 should been created when autocreate is true")
}
user2.Company.Name = "auto_save_association_2_newname"
DB.Set("orm:association_autoupdate", true).Save(&user2)
if DB.Where("name = ?", "auto_save_association_2_newname").First(&Company{}).RecordNotFound() {
t.Errorf("Company should been updated")
}
} | explode_data.jsonl/15890 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 634
} | [
2830,
3393,
13253,
8784,
21666,
34225,
63461,
1155,
353,
8840,
836,
8,
341,
13158,
8188,
2036,
341,
197,
197,
493,
5659,
198,
197,
21297,
914,
198,
197,
630,
13158,
2657,
2036,
341,
197,
197,
493,
5659,
198,
197,
21297,
414,
914,
198,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestRDNSequenceString(t *testing.T) {
// Test some extra cases that get lost in pkix.Name conversions such as
// multi-valued attributes.
var (
oidCountry = []int{2, 5, 4, 6}
oidOrganization = []int{2, 5, 4, 10}
oidOrganizationalUnit = []int{2, 5, 4, 11}
oidCommonName = []int{2, 5, 4, 3}
)
tests := []struct {
seq pkix.RDNSequence
want string
}{
{
seq: pkix.RDNSequence{
pkix.RelativeDistinguishedNameSET{
pkix.AttributeTypeAndValue{Type: oidCountry, Value: "US"},
},
pkix.RelativeDistinguishedNameSET{
pkix.AttributeTypeAndValue{Type: oidOrganization, Value: "Widget Inc."},
},
pkix.RelativeDistinguishedNameSET{
pkix.AttributeTypeAndValue{Type: oidOrganizationalUnit, Value: "Sales"},
pkix.AttributeTypeAndValue{Type: oidCommonName, Value: "J. Smith"},
},
},
want: "OU=Sales+CN=J. Smith,O=Widget Inc.,C=US",
},
}
for i, test := range tests {
if got := test.seq.String(); got != test.want {
t.Errorf("#%d: String() = \n%s\n, want \n%s", i, got, test.want)
}
}
} | explode_data.jsonl/68017 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 483
} | [
2830,
3393,
49,
31264,
14076,
703,
1155,
353,
8840,
836,
8,
341,
197,
322,
3393,
1045,
4960,
5048,
429,
633,
5558,
304,
22458,
941,
2967,
48722,
1741,
438,
198,
197,
322,
7299,
45154,
3260,
8201,
382,
2405,
2399,
197,
197,
588,
16408,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestValidateLocation(t *testing.T) {
tests := []struct {
name string
location string
propertiesnil bool
cs *ContainerService
expectedErr error
}{
{
name: "AzureStack location is empty",
location: "",
propertiesnil: false,
cs: &ContainerService{
Properties: &Properties{
CustomCloudProfile: &CustomCloudProfile{
PortalURL: "https://portal.testlocation.cotoso.com",
},
},
},
expectedErr: errors.New("missing ContainerService Location"),
},
{
name: "AzureStack UseInstanceMetadata is true",
location: "local",
propertiesnil: false,
cs: &ContainerService{
Location: "local",
Properties: &Properties{
CustomCloudProfile: &CustomCloudProfile{
PortalURL: "https://portal.local.cotoso.com",
},
OrchestratorProfile: &OrchestratorProfile{
OrchestratorType: Kubernetes,
OrchestratorVersion: "1.11.10",
KubernetesConfig: &KubernetesConfig{
UseInstanceMetadata: to.BoolPtr(trueVal),
},
},
},
},
expectedErr: errors.New("useInstanceMetadata shouldn't be set to true as feature not yet supported on Azure Stack"),
},
{
name: "AzureStack EtcdDiskSizeGB is 1024",
location: "local",
propertiesnil: false,
cs: &ContainerService{
Location: "local",
Properties: &Properties{
CustomCloudProfile: &CustomCloudProfile{
PortalURL: "https://portal.local.cotoso.com",
},
OrchestratorProfile: &OrchestratorProfile{
OrchestratorType: Kubernetes,
OrchestratorVersion: "1.11.10",
KubernetesConfig: &KubernetesConfig{
EtcdDiskSizeGB: "1024",
},
},
},
},
expectedErr: errors.Errorf("EtcdDiskSizeGB max size supported on Azure Stack is %d", MaxAzureStackManagedDiskSize),
},
{
name: "AzureStack EtcdDiskSizeGB is 1024",
location: "local",
propertiesnil: false,
cs: &ContainerService{
Location: "local",
Properties: &Properties{
CustomCloudProfile: &CustomCloudProfile{
PortalURL: "https://portal.local.cotoso.com",
},
OrchestratorProfile: &OrchestratorProfile{
OrchestratorType: Kubernetes,
OrchestratorVersion: "1.11.10",
KubernetesConfig: &KubernetesConfig{
EtcdDiskSizeGB: "1024GB",
},
},
},
},
expectedErr: errors.New("could not convert EtcdDiskSizeGB to int"),
},
{
name: "AzureStack AcceleratedNetworking is true",
location: "local",
propertiesnil: false,
cs: &ContainerService{
Location: "local",
Properties: &Properties{
CustomCloudProfile: &CustomCloudProfile{
PortalURL: "https://portal.local.cotoso.com",
},
OrchestratorProfile: &OrchestratorProfile{
OrchestratorType: Kubernetes,
OrchestratorVersion: "1.11.10",
},
AgentPoolProfiles: []*AgentPoolProfile{
{
Name: "testpool",
Count: 1,
VMSize: "Standard_D2_v2",
AcceleratedNetworkingEnabled: to.BoolPtr(trueVal),
},
},
},
},
expectedErr: errors.New("AcceleratedNetworkingEnabled or AcceleratedNetworkingEnabledWindows shouldn't be set to true as feature is not yet supported on Azure Stack"),
},
{
name: "AzureStack AcceleratedNetworking is true",
location: "local",
propertiesnil: false,
cs: &ContainerService{
Location: "local",
Properties: &Properties{
CustomCloudProfile: &CustomCloudProfile{
PortalURL: "https://portal.local.cotoso.com",
},
OrchestratorProfile: &OrchestratorProfile{
OrchestratorType: Kubernetes,
OrchestratorVersion: "1.11.10",
},
AgentPoolProfiles: []*AgentPoolProfile{
{
Name: "testpool",
Count: 1,
VMSize: "Standard_D2_v2",
AcceleratedNetworkingEnabledWindows: to.BoolPtr(trueVal),
},
},
},
},
expectedErr: errors.New("AcceleratedNetworkingEnabled or AcceleratedNetworkingEnabledWindows shouldn't be set to true as feature is not yet supported on Azure Stack"),
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
cs := getK8sDefaultContainerService(true)
cs.Location = test.cs.Location
if test.cs.Properties != nil {
if test.cs.Properties.CustomCloudProfile != nil {
cs.Properties.CustomCloudProfile = test.cs.Properties.CustomCloudProfile
}
if test.cs.Properties.OrchestratorProfile != nil {
cs.Properties.OrchestratorProfile = test.cs.Properties.OrchestratorProfile
}
if test.cs.Properties.AgentPoolProfiles != nil {
cs.Properties.AgentPoolProfiles = test.cs.Properties.AgentPoolProfiles
}
}
if test.propertiesnil {
cs.Properties = nil
}
gotErr := cs.Validate(false)
if !helpers.EqualError(gotErr, test.expectedErr) {
t.Logf("scenario %q", test.name)
t.Errorf("expected error: %v, got: %v", test.expectedErr, gotErr)
}
})
}
} | explode_data.jsonl/17894 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2386
} | [
2830,
3393,
17926,
4707,
1155,
353,
8840,
836,
8,
1476,
78216,
1669,
3056,
1235,
341,
197,
11609,
688,
914,
198,
197,
53761,
414,
914,
198,
197,
86928,
8385,
1807,
198,
197,
71899,
310,
353,
4502,
1860,
198,
197,
42400,
7747,
256,
146... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestKeepComments(t *testing.T) {
in := "# foo\ncmd\n# bar"
want := &File{StmtList: StmtList{
Stmts: []*Stmt{{
Comments: []Comment{{Text: " foo"}},
Cmd: litCall("cmd"),
}},
Last: []Comment{{Text: " bar"}},
}}
singleParse(NewParser(KeepComments), in, want)(t)
} | explode_data.jsonl/31426 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 126
} | [
2830,
3393,
19434,
17373,
1155,
353,
8840,
836,
8,
341,
17430,
1669,
5869,
15229,
59,
1016,
2277,
1699,
2,
3619,
698,
50780,
1669,
609,
1703,
90,
31063,
852,
25,
97023,
852,
515,
197,
197,
31063,
82,
25,
29838,
31063,
90,
515,
298,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestWithClientRequestName(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusTeapot)
}))
defer server.Close()
option := apmhttp.WithClientRequestName(func(_ *http.Request) string {
return "http://test"
})
_, spans, _ := apmtest.WithTransaction(func(ctx context.Context) {
mustGET(ctx, server.URL, option)
})
require.Len(t, spans, 1)
span := spans[0]
assert.Equal(t, "http://test", span.Name)
} | explode_data.jsonl/49621 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 187
} | [
2830,
3393,
2354,
2959,
1900,
675,
1155,
353,
8840,
836,
8,
341,
41057,
1669,
54320,
70334,
7121,
5475,
19886,
89164,
18552,
3622,
1758,
37508,
11,
716,
353,
1254,
9659,
8,
341,
197,
6692,
69794,
19886,
10538,
6639,
89901,
340,
197,
441... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_invalid_any(t *testing.T) {
should := require.New(t)
any := jsoniter.Get([]byte("[]"))
should.Equal(jsoniter.InvalidValue, any.Get(0.3).ValueType())
// is nil correct ?
should.Equal(nil, any.Get(0.3).GetInterface())
any = any.Get(0.3)
should.Equal(false, any.ToBool())
should.Equal(int(0), any.ToInt())
should.Equal(int32(0), any.ToInt32())
should.Equal(int64(0), any.ToInt64())
should.Equal(uint(0), any.ToUint())
should.Equal(uint32(0), any.ToUint32())
should.Equal(uint64(0), any.ToUint64())
should.Equal(float32(0), any.ToFloat32())
should.Equal(float64(0), any.ToFloat64())
should.Equal("", any.ToString())
should.Equal(jsoniter.InvalidValue, any.Get(0.1).Get(1).ValueType())
} | explode_data.jsonl/73511 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 303
} | [
2830,
3393,
31433,
37248,
1155,
353,
8840,
836,
8,
341,
197,
5445,
1669,
1373,
7121,
1155,
340,
197,
3767,
1669,
2951,
2015,
2234,
10556,
3782,
445,
1294,
5455,
197,
5445,
12808,
9304,
2015,
48144,
1130,
11,
894,
2234,
7,
15,
13,
18,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSanity(t *testing.T) {
const endpoint = "csi-sanity.sock"
if err := os.Remove(endpoint); err != nil && !os.IsNotExist(err) {
t.Fatal(err)
}
listener, err := net.Listen("unix", endpoint)
if err != nil {
t.Fatal(err)
}
defer os.Remove(endpoint)
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
logger = log.With(logger, "ts", log.DefaultTimestampUTC)
volumeService := volumes.NewIdempotentService(
log.With(logger, "component", "idempotent-volume-service"),
&sanityVolumeService{},
)
volumeMountService := &sanityMountService{}
controllerService := NewControllerService(
log.With(logger, "component", "driver-controller-service"),
volumeService,
"testloc",
)
identityService := NewIdentityService(
log.With(logger, "component", "driver-identity-service"),
)
nodeService := NewNodeService(
log.With(logger, "component", "driver-node-service"),
&hcloud.Server{
ID: 123456,
Datacenter: &hcloud.Datacenter{
Location: &hcloud.Location{
Name: "testloc",
},
},
},
volumeService,
volumeMountService,
)
grpcServer := grpc.NewServer()
proto.RegisterControllerServer(grpcServer, controllerService)
proto.RegisterIdentityServer(grpcServer, identityService)
proto.RegisterNodeServer(grpcServer, nodeService)
go func() {
if err := grpcServer.Serve(listener); err != nil {
t.Fatal(err)
}
}()
stagingDir, err := ioutil.TempDir("", "hcloud-csi-sanity-staging")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(stagingDir)
targetDir, err := ioutil.TempDir("", "hcloud-csi-sanity-target")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(targetDir)
sanity.Test(t, &sanity.Config{
StagingPath: stagingDir,
TargetPath: targetDir,
Address: endpoint,
})
} | explode_data.jsonl/37047 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 701
} | [
2830,
3393,
23729,
487,
1155,
353,
8840,
836,
8,
341,
4777,
14887,
284,
330,
63229,
1331,
38270,
68171,
1837,
743,
1848,
1669,
2643,
13270,
54869,
1215,
1848,
961,
2092,
1009,
753,
436,
4506,
45535,
3964,
8,
341,
197,
3244,
26133,
3964,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_Number_Required(t *testing.T) {
testNumberRequired(t, "Number", []testNumber{
{"1", Number(0), true},
{"2", Number(1), false},
})
testNumberRequired(t, "NumF32", []testNumber{
{"1", NumF32(0), true},
{"2", NumF32(1), false},
})
testNumberRequired(t, "NumF64", []testNumber{
{"1", NumF64(0), true},
{"2", NumF64(1), false},
})
testNumberRequired(t, "NumI32", []testNumber{
{"1", NumI32(0), true},
{"2", NumI32(1), false},
})
testNumberRequired(t, "NumI64", []testNumber{
{"1", NumI64(0), true},
{"2", NumI64(1), false},
})
} | explode_data.jsonl/57513 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 250
} | [
2830,
3393,
51799,
62,
8164,
1155,
353,
8840,
836,
8,
341,
18185,
2833,
8164,
1155,
11,
330,
2833,
497,
3056,
1944,
2833,
515,
197,
197,
4913,
16,
497,
5624,
7,
15,
701,
830,
1583,
197,
197,
4913,
17,
497,
5624,
7,
16,
701,
895,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestConvertEmptyCheckpointFromPb(t *testing.T) {
chkPb := emptyCheckpointPb()
expected := emptyCheckpoint()
require.Equal(t, expected, checkpointFromPb(chkPb))
} | explode_data.jsonl/81230 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 62
} | [
2830,
3393,
12012,
3522,
92688,
3830,
47,
65,
1155,
353,
8840,
836,
8,
341,
23049,
74,
47,
65,
1669,
4287,
92688,
47,
65,
741,
42400,
1669,
4287,
92688,
741,
17957,
12808,
1155,
11,
3601,
11,
29295,
3830,
47,
65,
7520,
74,
47,
65,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestListen(t *testing.T) {
source := lexer.FromBytes([]byte("Listen\n"))
tokens := lexer.Scan(source)
program := Listen(tokens.Advance(), tokens)
assert.Equal(t, `Listen`, program.String())
} | explode_data.jsonl/50056 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 74
} | [
2830,
3393,
38714,
1155,
353,
8840,
836,
8,
341,
47418,
1669,
53259,
11439,
7078,
10556,
3782,
445,
38714,
1699,
5455,
3244,
9713,
1669,
53259,
54874,
12437,
340,
197,
14906,
1669,
32149,
34052,
17865,
85,
681,
1507,
11211,
340,
6948,
128... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestResourceQuotaDelete(t *testing.T) {
ns := api.NamespaceDefault
c := &testClient{
Request: testRequest{Method: "DELETE", Path: testapi.Default.ResourcePath(getResourceQuotasResoureName(), ns, "foo"), Query: buildQueryValues(nil)},
Response: Response{StatusCode: 200},
}
err := c.Setup(t).ResourceQuotas(ns).Delete("foo")
c.Validate(t, nil, err)
} | explode_data.jsonl/13159 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 132
} | [
2830,
3393,
4783,
2183,
6089,
6435,
1155,
353,
8840,
836,
8,
341,
84041,
1669,
6330,
46011,
3675,
198,
1444,
1669,
609,
1944,
2959,
515,
197,
73806,
25,
220,
1273,
1900,
90,
3523,
25,
330,
14424,
497,
7933,
25,
1273,
2068,
13275,
2076... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTransportReturnsPeekError(t *testing.T) {
errValue := errors.New("specific error value")
wrote := make(chan struct{})
var wroteOnce sync.Once
tr := &Transport{
Dial: func(network, addr string) (net.Conn, error) {
c := funcConn{
read: func([]byte) (int, error) {
<-wrote
return 0, errValue
},
write: func(p []byte) (int, error) {
wroteOnce.Do(func() { close(wrote) })
return len(p), nil
},
}
return c, nil
},
}
_, err := tr.RoundTrip(httptest.NewRequest("GET", "http://fake.tld/", nil))
if err != errValue {
t.Errorf("error = %#v; want %v", err, errValue)
}
} | explode_data.jsonl/14169 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 272
} | [
2830,
3393,
27560,
16446,
10197,
1225,
1454,
1155,
353,
8840,
836,
8,
341,
9859,
1130,
1669,
5975,
7121,
445,
51240,
1465,
897,
5130,
6692,
5529,
1669,
1281,
35190,
2036,
37790,
2405,
6139,
12522,
12811,
77946,
271,
25583,
1669,
609,
2756... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMethodType_Call(t *testing.T) {
svc := newService(Foo{})
mType := svc.method["Sum"]
args := mType.newArgv()
reply := mType.newReply()
args.Set(reflect.ValueOf(Args{1, 2}))
err := svc.call(mType, args, reply)
_assert(t, err == nil, "call failed")
_assert(t, *reply.Interface().(*int) == 3, "call failed")
} | explode_data.jsonl/10204 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 132
} | [
2830,
3393,
3523,
929,
76028,
1155,
353,
8840,
836,
8,
341,
1903,
7362,
1669,
501,
1860,
7832,
2624,
37790,
2109,
929,
1669,
46154,
12908,
1183,
9190,
7026,
31215,
1669,
296,
929,
4618,
2735,
85,
741,
86149,
1669,
296,
929,
4618,
20841,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCreateObjectWithoutContentType(t *testing.T) {
th.SetupHTTP()
defer th.TeardownHTTP()
content := "The sky was the color of television, tuned to a dead channel."
HandleCreateTypelessObjectSuccessfully(t, content)
res := Create(fake.ServiceClient(), "testContainer", "testObject", strings.NewReader(content), &CreateOpts{})
th.AssertNoErr(t, res.Err)
} | explode_data.jsonl/30751 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 117
} | [
2830,
3393,
4021,
1190,
26040,
29504,
1155,
353,
8840,
836,
8,
341,
70479,
39820,
9230,
741,
16867,
270,
94849,
37496,
9230,
2822,
27751,
1669,
330,
785,
12884,
572,
279,
1894,
315,
12425,
11,
32419,
311,
264,
5593,
5496,
2217,
197,
699... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestClientFund(t *testing.T) {
hmock := httptest.NewClient()
client := &Client{
HorizonURL: "https://localhost/",
HTTP: hmock,
}
testAccount := "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"
// not testnet
hmock.On(
"GET",
"https://localhost/friendbot?addr=GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU",
).ReturnString(200, txSuccess)
_, err := client.Fund(testAccount)
// error case: not testnet
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "can't fund account from friendbot on production network")
}
// happy path
hmock.On(
"GET",
"https://localhost/friendbot?addr=GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU",
).ReturnString(200, txSuccess)
client.isTestNet = true
resp, err := client.Fund(testAccount)
if assert.NoError(t, err) {
assert.IsType(t, resp, hProtocol.TransactionSuccess{})
assert.Equal(t, resp.Links.Transaction.Href, "https://horizon-testnet.stellar.org/transactions/bcc7a97264dca0a51a63f7ea971b5e7458e334489673078bb2a34eb0cce910ca")
assert.Equal(t, resp.Hash, "bcc7a97264dca0a51a63f7ea971b5e7458e334489673078bb2a34eb0cce910ca")
assert.Equal(t, resp.Ledger, int32(354811))
assert.Equal(t, resp.Env, `AAAAABB90WssODNIgi6BHveqzxTRmIpvAFRyVNM+Hm2GVuCcAAAAZAAABD0AAuV/AAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAyTBGxOgfSApppsTnb/YRr6gOR8WT0LZNrhLh4y3FCgoAAAAXSHboAAAAAAAAAAABhlbgnAAAAEAivKe977CQCxMOKTuj+cWTFqc2OOJU8qGr9afrgu2zDmQaX5Q0cNshc3PiBwe0qw/+D/qJk5QqM5dYeSUGeDQP`)
assert.Equal(t, resp.Result, "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=")
assert.Equal(t, resp.Meta, `AAAAAQAAAAIAAAADAAVp+wAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwACBP/TuycHAAABD0AAuV+AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAVp+wAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwACBP/TuycHAAABD0AAuV/AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMABWn7AAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAAIE/9O7JwcAAAEPQAC5X8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABWn7AAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAAIE+gGdbQcAAAEPQAC5X8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAABWn7AAAAAAAAAADJMEbE6B9ICmmmxOdv9hGvqA5HxZPQtk2uEuHjLcUKCgAAABdIdugAAAVp+wAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==`)
}
// failure response
hmock.On(
"GET",
"https://localhost/friendbot?addr=GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU",
).ReturnString(400, transactionFailure)
_, err = client.Fund(testAccount)
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "horizon error")
horizonError, ok := errors.Cause(err).(*Error)
assert.Equal(t, ok, true)
assert.Equal(t, horizonError.Problem.Title, "Transaction Failed")
resultString, err := horizonError.ResultString()
assert.Nil(t, err)
assert.Equal(t, resultString, "AAAAAAAAAAD////4AAAAAA==")
}
} | explode_data.jsonl/82466 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1317
} | [
2830,
3393,
2959,
58340,
1155,
353,
8840,
836,
8,
341,
9598,
16712,
1669,
54320,
70334,
7121,
2959,
741,
25291,
1669,
609,
2959,
515,
197,
13292,
269,
16973,
3144,
25,
330,
2428,
1110,
8301,
35075,
197,
197,
9230,
25,
981,
305,
16712,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestChannelArbitratorLocalForceClose(t *testing.T) {
log := &mockArbitratorLog{
state: StateDefault,
newStates: make(chan ArbitratorState, 5),
}
chanArbCtx, err := createTestChannelArbitrator(t, log)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
chanArb := chanArbCtx.chanArb
if err := chanArb.Start(); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
defer chanArb.Stop()
// It should start out in the default state.
chanArbCtx.AssertState(StateDefault)
// We create a channel we can use to pause the ChannelArbitrator at the
// point where it broadcasts the close tx, and check its state.
stateChan := make(chan ArbitratorState)
chanArb.cfg.PublishTx = func(*wire.MsgTx, string) error {
// When the force close tx is being broadcasted, check that the
// state is correct at that point.
select {
case stateChan <- chanArb.state:
case <-chanArb.quit:
return fmt.Errorf("exiting")
}
return nil
}
errChan := make(chan error, 1)
respChan := make(chan *wire.MsgTx, 1)
// With the channel found, and the request crafted, we'll send over a
// force close request to the arbitrator that watches this channel.
chanArb.forceCloseReqs <- &forceCloseReq{
errResp: errChan,
closeTx: respChan,
}
// It should transition to StateBroadcastCommit.
chanArbCtx.AssertStateTransitions(StateBroadcastCommit)
// When it is broadcasting the force close, its state should be
// StateBroadcastCommit.
select {
case state := <-stateChan:
if state != StateBroadcastCommit {
t.Fatalf("state during PublishTx was %v", state)
}
case <-time.After(stateTimeout):
t.Fatalf("did not get state update")
}
// After broadcasting, transition should be to
// StateCommitmentBroadcasted.
chanArbCtx.AssertStateTransitions(StateCommitmentBroadcasted)
select {
case <-respChan:
case <-time.After(defaultTimeout):
t.Fatalf("no response received")
}
select {
case err := <-errChan:
if err != nil {
t.Fatalf("error force closing channel: %v", err)
}
case <-time.After(defaultTimeout):
t.Fatalf("no response received")
}
// After broadcasting the close tx, it should be in state
// StateCommitmentBroadcasted.
chanArbCtx.AssertState(StateCommitmentBroadcasted)
// Now notify about the local force close getting confirmed.
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
SpendDetail: &chainntnfs.SpendDetail{},
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
CloseTx: &wire.MsgTx{},
HtlcResolutions: &lnwallet.HtlcResolutions{},
},
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
}
// It should transition StateContractClosed -> StateFullyResolved.
chanArbCtx.AssertStateTransitions(StateContractClosed, StateFullyResolved)
// It should also mark the channel as resolved.
select {
case <-chanArbCtx.resolvedChan:
// Expected.
case <-time.After(defaultTimeout):
t.Fatalf("contract was not resolved")
}
} | explode_data.jsonl/3692 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1037
} | [
2830,
3393,
9629,
6953,
4489,
81,
850,
7319,
18573,
7925,
1155,
353,
8840,
836,
8,
341,
6725,
1669,
609,
16712,
6953,
4489,
81,
850,
2201,
515,
197,
24291,
25,
257,
3234,
3675,
345,
197,
8638,
23256,
25,
1281,
35190,
58795,
81,
850,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func Test_Validate_CloudConfiguration(t *testing.T) {
grid := []struct {
Description string
Input kops.CloudConfiguration
ExpectedErrors []string
}{
{
Description: "neither",
Input: kops.CloudConfiguration{},
},
{
Description: "all false",
Input: kops.CloudConfiguration{
ManageStorageClasses: fi.Bool(false),
},
},
{
Description: "all true",
Input: kops.CloudConfiguration{
ManageStorageClasses: fi.Bool(true),
},
},
{
Description: "os false",
Input: kops.CloudConfiguration{
Openstack: &kops.OpenstackConfiguration{
BlockStorage: &kops.OpenstackBlockStorageConfig{
CreateStorageClass: fi.Bool(false),
},
}},
},
{
Description: "os true",
Input: kops.CloudConfiguration{
Openstack: &kops.OpenstackConfiguration{
BlockStorage: &kops.OpenstackBlockStorageConfig{
CreateStorageClass: fi.Bool(true),
},
}},
},
{
Description: "all false, os false",
Input: kops.CloudConfiguration{
ManageStorageClasses: fi.Bool(false),
Openstack: &kops.OpenstackConfiguration{
BlockStorage: &kops.OpenstackBlockStorageConfig{
CreateStorageClass: fi.Bool(false),
},
}},
},
{
Description: "all false, os true",
Input: kops.CloudConfiguration{
ManageStorageClasses: fi.Bool(false),
Openstack: &kops.OpenstackConfiguration{
BlockStorage: &kops.OpenstackBlockStorageConfig{
CreateStorageClass: fi.Bool(true),
},
}},
ExpectedErrors: []string{"Forbidden::cloudConfig.manageStorageClasses"},
},
{
Description: "all true, os false",
Input: kops.CloudConfiguration{
ManageStorageClasses: fi.Bool(true),
Openstack: &kops.OpenstackConfiguration{
BlockStorage: &kops.OpenstackBlockStorageConfig{
CreateStorageClass: fi.Bool(false),
},
}},
ExpectedErrors: []string{"Forbidden::cloudConfig.manageStorageClasses"},
},
{
Description: "all true, os true",
Input: kops.CloudConfiguration{
ManageStorageClasses: fi.Bool(true),
Openstack: &kops.OpenstackConfiguration{
BlockStorage: &kops.OpenstackBlockStorageConfig{
CreateStorageClass: fi.Bool(true),
},
}},
},
}
for _, g := range grid {
fldPath := field.NewPath("cloudConfig")
t.Run(g.Description, func(t *testing.T) {
errs := validateCloudConfiguration(&g.Input, fldPath)
testErrors(t, g.Input, errs, g.ExpectedErrors)
})
}
} | explode_data.jsonl/61624 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1018
} | [
2830,
3393,
62,
17926,
920,
52178,
7688,
1155,
353,
8840,
836,
8,
341,
49018,
1669,
3056,
1235,
341,
197,
47414,
262,
914,
198,
197,
66588,
688,
595,
3721,
94492,
7688,
198,
197,
197,
18896,
13877,
3056,
917,
198,
197,
59403,
197,
197... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSecureBoot(t *testing.T) {
var result SecureBoot
err := json.NewDecoder(strings.NewReader(secureBootBody)).Decode(&result)
if err != nil {
t.Errorf("Error decoding JSON: %s", err)
}
if result.ID != "SecureBoot-1" {
t.Errorf("Received invalid ID: %s", result.ID)
}
if result.Name != "SecureBootOne" {
t.Errorf("Received invalid name: %s", result.Name)
}
if result.SecureBootCurrentBoot != EnabledSecureBootCurrentBootType {
t.Errorf("Invalid SecureBootCurrentBoot: %s", result.SecureBootCurrentBoot)
}
if !result.SecureBootEnable {
t.Error("SecureBootEnable should be true")
}
if result.SecureBootMode != UserModeSecureBootModeType {
t.Errorf("Invalid SecureBootMode: %s", result.SecureBootMode)
}
if result.resetKeysTarget != "/redfish/v1/SecureBoot/Actions/SecureBoot.ResetKeys" {
t.Errorf("Invalid ResetKeys target: %s", result.resetKeysTarget)
}
} | explode_data.jsonl/36220 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 311
} | [
2830,
3393,
49813,
17919,
1155,
353,
8840,
836,
8,
341,
2405,
1102,
34502,
17919,
198,
9859,
1669,
2951,
7121,
20732,
51442,
68587,
7,
25132,
17919,
5444,
4579,
32564,
2099,
1382,
692,
743,
1848,
961,
2092,
341,
197,
3244,
13080,
445,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestRun(t *testing.T) {
for _, dockerfile := range allDockerfiles {
t.Run("test_"+dockerfile, func(t *testing.T) {
dockerfile := dockerfile
t.Parallel()
if _, ok := imageBuilder.DockerfilesToIgnore[dockerfile]; ok {
t.SkipNow()
}
if _, ok := imageBuilder.TestCacheDockerfiles[dockerfile]; ok {
t.SkipNow()
}
buildImage(t, dockerfile, imageBuilder)
dockerImage := GetDockerImage(config.imageRepo, dockerfile)
kanikoImage := GetKanikoImage(config.imageRepo, dockerfile)
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
checkContainerDiffOutput(t, diff, expected)
})
}
err := logBenchmarks("benchmark")
if err != nil {
t.Logf("Failed to create benchmark file: %v", err)
}
} | explode_data.jsonl/667 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 324
} | [
2830,
3393,
6727,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
26588,
1192,
1669,
2088,
678,
35,
13659,
7198,
341,
197,
3244,
16708,
445,
1944,
33415,
28648,
1192,
11,
2915,
1155,
353,
8840,
836,
8,
341,
298,
2698,
13659,
1192,
1669,
265... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestService_Login(t *testing.T) {
correctEmail := "demo@demo.com"
correctPassword := "demoPassword123!"
correctSalt := "$2a$10$T/26fFbPqC9GY/zsQgGuGO1djroBCIXbL1kRXQpDw.OlKPniDTQt2---"
type args struct {
req *request.Login
}
tests := []struct {
name string
args args
wantErr bool
}{
{
"get success with correct email and password",
args{&request.Login{Email: correctEmail, Password: correctPassword}},
false,
},
{
"unauthorized error with wrong email",
args{&request.Login{Email: "random@email.com", Password: correctPassword}},
true,
},
{
"get error with empty fields",
args{&request.Login{Email: "", Password: ""}},
true,
},
{
"get error with wrong password",
args{&request.Login{Email: correctEmail, Password: "wrongpass"}},
true,
},
}
fk := &mocks.FakeDBClient{}
fk.GetUserStub = func(email string) (*models.User, error) {
if email == "" {
return &models.User{Email: email, Name: "", Hash: "", Verified: false}, errors.New("email can not be empty")
}
if email != correctEmail {
return &models.User{Email: email, Name: "", Hash: "", Verified: false}, errors.New("incorrect email")
}
return &models.User{Email: correctEmail, Name: "", Hash: correctSalt, Verified: true}, nil
}
s := &Service{db: fk}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := s.Login(nil, tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("Service.Login() error = %v, wantErr %v", err, tt.wantErr)
return
}
})
}
} | explode_data.jsonl/22355 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 643
} | [
2830,
3393,
1860,
79232,
1155,
353,
8840,
836,
8,
341,
1444,
27034,
4781,
1669,
330,
25762,
31,
25762,
905,
698,
1444,
27034,
4876,
1669,
330,
25762,
4876,
16,
17,
18,
24734,
1444,
27034,
47318,
1669,
5201,
17,
64,
3,
16,
15,
3,
51,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.