text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func Test_keepUsername(t *testing.T) {
db, _, dispose := GetInMemoryTestDB(t, GetInMemoryTestDBOptsNoInit)
defer dispose()
log := zap.NewNop()
require.Equal(t, "", keepDisplayName(db.db, nil))
// table schema on 2020_10_13
require.NoError(t, db.db.Exec("CREATE TABLE accounts (public_key text, display_name text, link text, replicate_new_groups_automatically numeric DEFAULT true,PRIMARY KEY (public_key))").Error)
require.Equal(t, "", keepDisplayName(db.db, log))
require.NoError(t, db.db.Exec(`INSERT INTO accounts (public_key, display_name, link, replicate_new_groups_automatically) VALUES ("pk_1", "display_name_1", "http://display_name_1/", true)`).Error)
require.Equal(t, "display_name_1", keepDisplayName(db.db, log))
require.NoError(t, db.db.Exec(`INSERT INTO accounts (public_key, display_name, link, replicate_new_groups_automatically) VALUES ("pk_2", "display_name_2", "http://display_name_2/", true)`).Error)
require.Equal(t, "display_name_1", keepDisplayName(db.db, log))
} | explode_data.jsonl/3226 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 368
} | [
2830,
3393,
50293,
11115,
1155,
353,
8840,
836,
8,
341,
20939,
11,
8358,
27390,
1669,
2126,
641,
10642,
2271,
3506,
1155,
11,
2126,
641,
10642,
2271,
3506,
43451,
2753,
3803,
340,
16867,
27390,
2822,
6725,
1669,
32978,
7121,
45,
453,
28... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStartAnchor(t *testing.T) {
testInputs(t, `
|a
b|
|c|
`,
[]TestInput{
{URL: "a", Matched: true},
{URL: "za", Matched: false},
{URL: "az", Matched: true},
{URL: "b", Matched: true},
{URL: "zb", Matched: true},
{URL: "bz", Matched: false},
{URL: "c", Matched: true},
{URL: "zc", Matched: false},
{URL: "cz", Matched: false},
})
} | explode_data.jsonl/81615 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 185
} | [
2830,
3393,
3479,
14677,
1155,
353,
8840,
836,
8,
341,
18185,
31946,
1155,
11,
22074,
91,
64,
198,
65,
7360,
91,
66,
7360,
12892,
197,
197,
1294,
2271,
2505,
515,
298,
197,
90,
3144,
25,
330,
64,
497,
14152,
291,
25,
830,
1583,
29... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_Hoverfly_AddDiff_AppendsEntry(t *testing.T) {
RegisterTestingT(t)
unit := NewHoverflyWithConfiguration(&Configuration{})
Expect(unit.responsesDiff).To(HaveLen(0))
key := v2.SimpleRequestDefinitionView{
Host: "test.com",
}
unit.AddDiff(key, v2.DiffReport{Timestamp: "now", DiffEntries: []v2.DiffReportEntry{{Actual: "1"}}})
unit.AddDiff(key, v2.DiffReport{Timestamp: "now", DiffEntries: []v2.DiffReportEntry{{Actual: "2"}}})
Expect(unit.responsesDiff).To(HaveLen(1))
diffReports := unit.responsesDiff[key]
Expect(diffReports).To(HaveLen(2))
Expect(diffReports[0].DiffEntries[0].Actual).To(Equal("1"))
Expect(diffReports[1].DiffEntries[0].Actual).To(Equal("2"))
} | explode_data.jsonl/45405 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 268
} | [
2830,
3393,
2039,
1975,
21642,
21346,
21751,
36117,
1412,
5874,
1155,
353,
8840,
836,
8,
341,
79096,
16451,
51,
1155,
692,
81189,
1669,
1532,
34379,
21642,
2354,
7688,
2099,
7688,
37790,
35911,
24144,
97961,
21751,
568,
1249,
77057,
11271,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReconcileWithWhenExpressionsWithTaskResults(t *testing.T) {
names.TestingSeed()
ps := []*v1beta1.Pipeline{{
ObjectMeta: baseObjectMeta("test-pipeline", "foo"),
Spec: v1beta1.PipelineSpec{
Tasks: []v1beta1.PipelineTask{
{
Name: "a-task",
TaskRef: &v1beta1.TaskRef{Name: "a-task"},
},
{
Name: "b-task",
TaskRef: &v1beta1.TaskRef{Name: "b-task"},
WhenExpressions: []v1beta1.WhenExpression{
{
Input: "$(tasks.a-task.results.aResult)",
Operator: selection.In,
Values: []string{"aResultValue"},
},
{
Input: "aResultValue",
Operator: selection.In,
Values: []string{"$(tasks.a-task.results.aResult)"},
},
},
},
{
Name: "c-task",
TaskRef: &v1beta1.TaskRef{Name: "c-task"},
WhenExpressions: []v1beta1.WhenExpression{{
Input: "$(tasks.a-task.results.aResult)",
Operator: selection.In,
Values: []string{"missing"},
}},
},
{
Name: "d-task",
TaskRef: &v1beta1.TaskRef{Name: "d-task"},
RunAfter: []string{"c-task"},
},
},
},
}}
prs := []*v1beta1.PipelineRun{{
ObjectMeta: baseObjectMeta("test-pipeline-run-different-service-accs", "foo"),
Spec: v1beta1.PipelineRunSpec{
PipelineRef: &v1beta1.PipelineRef{Name: "test-pipeline"},
ServiceAccountName: "test-sa-0",
},
}}
ts := []*v1beta1.Task{
{ObjectMeta: baseObjectMeta("a-task", "foo")},
{ObjectMeta: baseObjectMeta("b-task", "foo")},
{ObjectMeta: baseObjectMeta("c-task", "foo")},
{ObjectMeta: baseObjectMeta("d-task", "foo")},
}
trs := []*v1beta1.TaskRun{{
ObjectMeta: taskRunObjectMeta("test-pipeline-run-different-service-accs-a-task-xxyyy", "foo",
"test-pipeline-run-different-service-accs", "test-pipeline", "a-task",
true),
Spec: v1beta1.TaskRunSpec{
TaskRef: &v1beta1.TaskRef{Name: "hello-world"},
ServiceAccountName: "test-sa",
Resources: &v1beta1.TaskRunResources{},
Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute},
},
Status: v1beta1.TaskRunStatus{
Status: duckv1beta1.Status{
Conditions: duckv1beta1.Conditions{
apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
},
},
},
TaskRunStatusFields: v1beta1.TaskRunStatusFields{
TaskRunResults: []v1beta1.TaskRunResult{{
Name: "aResult",
Value: "aResultValue",
}},
},
},
}}
d := test.Data{
PipelineRuns: prs,
Pipelines: ps,
Tasks: ts,
TaskRuns: trs,
}
prt := newPipelineRunTest(d, t)
defer prt.Cancel()
wantEvents := []string{
"Normal Started",
"Normal Running Tasks Completed: 1 \\(Failed: 0, Cancelled 0\\), Incomplete: 1, Skipped: 2",
}
pipelineRun, clients := prt.reconcileRun("foo", "test-pipeline-run-different-service-accs", wantEvents, false)
expectedTaskRunName := "test-pipeline-run-different-service-accs-b-task-9l9zj"
expectedTaskRun := &v1beta1.TaskRun{
ObjectMeta: taskRunObjectMeta(expectedTaskRunName, "foo", "test-pipeline-run-different-service-accs", "test-pipeline", "b-task", false),
Spec: v1beta1.TaskRunSpec{
TaskRef: &v1beta1.TaskRef{Name: "b-task"},
ServiceAccountName: "test-sa-0",
Resources: &v1beta1.TaskRunResources{},
Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute},
},
}
// Check that the expected TaskRun was created
actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{
LabelSelector: "tekton.dev/pipelineTask=b-task,tekton.dev/pipelineRun=test-pipeline-run-different-service-accs",
Limit: 1,
})
if err != nil {
t.Fatalf("Failure to list TaskRun's %s", err)
}
if len(actual.Items) != 1 {
t.Fatalf("Expected 1 TaskRuns got %d", len(actual.Items))
}
actualTaskRun := actual.Items[0]
if d := cmp.Diff(&actualTaskRun, expectedTaskRun, ignoreResourceVersion); d != "" {
t.Errorf("expected to see TaskRun %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d))
}
actualWhenExpressionsInTaskRun := pipelineRun.Status.PipelineRunStatusFields.TaskRuns[expectedTaskRunName].WhenExpressions
expectedWhenExpressionsInTaskRun := []v1beta1.WhenExpression{{
Input: "aResultValue",
Operator: "in",
Values: []string{"aResultValue"},
}, {
Input: "aResultValue",
Operator: "in",
Values: []string{"aResultValue"},
}}
if d := cmp.Diff(expectedWhenExpressionsInTaskRun, actualWhenExpressionsInTaskRun); d != "" {
t.Errorf("expected to see When Expressions %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d))
}
actualSkippedTasks := pipelineRun.Status.SkippedTasks
expectedSkippedTasks := []v1beta1.SkippedTask{{
Name: "c-task",
WhenExpressions: v1beta1.WhenExpressions{{
Input: "aResultValue",
Operator: "in",
Values: []string{"missing"},
}},
}, {
Name: "d-task",
}}
if d := cmp.Diff(actualSkippedTasks, expectedSkippedTasks); d != "" {
t.Errorf("expected to find Skipped Tasks %v. Diff %s", expectedSkippedTasks, diff.PrintWantGot(d))
}
skippedTasks := []string{"c-task", "d-task"}
for _, skippedTask := range skippedTasks {
labelSelector := fmt.Sprintf("tekton.dev/pipelineTask=%s,tekton.dev/pipelineRun=test-pipeline-run-different-service-accs", skippedTask)
actualSkippedTask, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{
LabelSelector: labelSelector,
Limit: 1,
})
if err != nil {
t.Fatalf("Failure to list TaskRun's %s", err)
}
if len(actualSkippedTask.Items) != 0 {
t.Fatalf("Expected 0 TaskRuns got %d", len(actualSkippedTask.Items))
}
}
} | explode_data.jsonl/68288 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2545
} | [
2830,
3393,
693,
40446,
457,
2354,
4498,
40315,
2354,
6262,
9801,
1155,
353,
8840,
836,
8,
341,
93940,
8787,
287,
41471,
741,
35009,
1669,
29838,
85,
16,
19127,
16,
1069,
8790,
90,
515,
197,
23816,
12175,
25,
2331,
1190,
12175,
445,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func Test_parseOutput3(t *testing.T) {
case3 := "DISK OK - free space: /root 3326 MB (56%); | /=2643MB;5948;5958;0;5968\n/ 15272 MB (77%);\n/boot 68 MB (69%); | /boot=68MB;88;93;0;98\n/home=69357MB;253404;253409;0;253414"
expectedServiceOutput := "DISK OK - free space: /root 3326 MB (56%); "
expectedLongServiceOutput := "/ 15272 MB (77%);\n/boot 68 MB (69%); "
expectedServicePerfData := map[string]float64{
"/": 2643.0,
"/boot": 68.0,
"/home": 69357.0,
}
serviceOutput, longServiceOutput, servicePerfData := parseOutput(case3)
assert.Equal(t, expectedServiceOutput, serviceOutput)
assert.Equal(t, expectedLongServiceOutput, longServiceOutput)
assert.Equal(t, expectedServicePerfData, servicePerfData)
} | explode_data.jsonl/8926 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 286
} | [
2830,
3393,
21039,
5097,
18,
1155,
353,
8840,
836,
8,
341,
2722,
18,
1669,
330,
21202,
42,
10402,
481,
1910,
3550,
25,
608,
2888,
220,
18,
18,
17,
21,
13339,
320,
20,
21,
4,
1215,
760,
16455,
17,
21,
19,
18,
8412,
26,
20,
24,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestUpdateReleaseFailure(t *testing.T) {
c := helm.NewContext()
rs := rsFixture()
rel := releaseStub()
rs.env.Releases.Create(rel)
rs.env.KubeClient = newUpdateFailingKubeClient()
rs.Log = t.Logf
req := &services.UpdateReleaseRequest{
Name: rel.Name,
DisableHooks: true,
Chart: &chart.Chart{
Metadata: &chart.Metadata{Name: "hello"},
Templates: []*chart.Template{
{Name: "templates/something", Data: []byte("hello: world")},
},
},
}
res, err := rs.UpdateRelease(c, req)
if err == nil {
t.Error("Expected failed update")
}
if updatedStatus := res.Release.Info.Status.Code; updatedStatus != release.Status_FAILED {
t.Errorf("Expected FAILED release. Got %d", updatedStatus)
}
compareStoredAndReturnedRelease(t, *rs, *res)
expectedDescription := "Upgrade \"angry-panda\" failed: Failed update in kube client"
if got := res.Release.Info.Description; got != expectedDescription {
t.Errorf("Expected description %q, got %q", expectedDescription, got)
}
oldRelease, err := rs.env.Releases.Get(rel.Name, rel.Version)
if err != nil {
t.Errorf("Expected to be able to get previous release")
}
if oldStatus := oldRelease.Info.Status.Code; oldStatus != release.Status_DEPLOYED {
t.Errorf("Expected Deployed status on previous Release version. Got %v", oldStatus)
}
} | explode_data.jsonl/82240 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 477
} | [
2830,
3393,
4289,
16077,
17507,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
33765,
7121,
1972,
741,
41231,
1669,
10036,
18930,
741,
197,
3748,
1669,
4879,
33838,
741,
41231,
9265,
2817,
28299,
7251,
49235,
340,
41231,
9265,
11352,
3760,
295... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestSortByLeafIdentityHash(t *testing.T) {
l := make([]*trillian.LogLeaf, 30)
for i := range l {
hash := sha256.Sum256([]byte{byte(i)})
leaf := trillian.LogLeaf{
LeafIdentityHash: hash[:],
LeafValue: []byte(fmt.Sprintf("Value %d", i)),
ExtraData: []byte(fmt.Sprintf("Extra %d", i)),
LeafIndex: int64(i),
}
l[i] = &leaf
}
sort.Sort(byLeafIdentityHash(l))
for i := range l {
if i == 0 {
continue
}
if bytes.Compare(l[i-1].LeafIdentityHash, l[i].LeafIdentityHash) != -1 {
t.Errorf("sorted leaves not in order, [%d] = %x, [%d] = %x", i-1, l[i-1].LeafIdentityHash, i, l[i].LeafIdentityHash)
}
}
} | explode_data.jsonl/30706 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 327
} | [
2830,
3393,
10231,
1359,
31461,
18558,
6370,
1155,
353,
8840,
836,
8,
341,
8810,
1669,
1281,
85288,
376,
64721,
5247,
31461,
11,
220,
18,
15,
340,
2023,
600,
1669,
2088,
326,
341,
197,
50333,
1669,
15870,
17,
20,
21,
41676,
17,
20,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestStressDuplex(t *testing.T) {
// Limit runtime in case of deadlocks
lim := test.TimeOut(time.Second * 20)
defer lim.Stop()
// Check for leaking routines
report := test.CheckRoutines(t)
defer report()
// Run the test
stressDuplex(t)
} | explode_data.jsonl/40926 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 91
} | [
2830,
3393,
623,
673,
85713,
2571,
1155,
353,
8840,
836,
8,
341,
197,
322,
28008,
15592,
304,
1142,
315,
5593,
34936,
198,
197,
4659,
1669,
1273,
16299,
2662,
9730,
32435,
353,
220,
17,
15,
340,
16867,
4568,
30213,
2822,
197,
322,
424... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInterpolationWalker_replace(t *testing.T) {
cases := []struct {
Input interface{}
Output interface{}
Value string
}{
{
Input: map[string]interface{}{
"foo": "##{var.foo}",
},
Output: map[string]interface{}{
"foo": "bar",
},
Value: "bar",
},
{
Input: map[string]interface{}{
"foo": "hi, #{var.foo}",
},
Output: map[string]interface{}{
"foo": "bar",
},
Value: "bar",
},
{
Input: map[string]interface{}{
"foo": map[string]interface{}{
"#{var.foo}": "bar",
},
},
Output: map[string]interface{}{
"foo": map[string]interface{}{
"bar": "bar",
},
},
Value: "bar",
},
/*
{
Input: map[string]interface{}{
"foo": []interface{}{
"#{var.foo}",
"bing",
},
},
Output: map[string]interface{}{
"foo": []interface{}{
"bar",
"baz",
"bing",
},
},
Value: NewStringList([]string{"bar", "baz"}).String(),
},
{
Input: map[string]interface{}{
"foo": []interface{}{
"#{var.foo}",
"bing",
},
},
Output: map[string]interface{}{},
Value: NewStringList([]string{UnknownVariableValue, "baz"}).String(),
},
*/
}
for i, tc := range cases {
fn := func(data *WalkData) error {
data.Replace = true
data.ReplaceValue = tc.Value
return nil
}
if err := Walk(tc.Input, fn); err != nil {
t.Fatalf("err: %s", err)
}
if !reflect.DeepEqual(tc.Input, tc.Output) {
t.Fatalf("%d: bad:\n\nexpected:%#v\ngot:%#v", i, tc.Output, tc.Input)
}
}
} | explode_data.jsonl/58422 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 792
} | [
2830,
3393,
3306,
44686,
84892,
10633,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
3056,
1235,
341,
197,
66588,
220,
3749,
16094,
197,
80487,
3749,
16094,
197,
47399,
220,
914,
198,
197,
59403,
197,
197,
515,
298,
66588,
25,
2415,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGitCommandDiff(t *testing.T) {
type scenario struct {
testName string
command func(string, ...string) *exec.Cmd
file *models.File
plain bool
cached bool
ignoreWhitespace bool
}
scenarios := []scenario{
{
"Default case",
func(cmd string, args ...string) *exec.Cmd {
assert.EqualValues(t, "git", cmd)
assert.EqualValues(t, []string{"diff", "--submodule", "--no-ext-diff", "--color=always", "--", "test.txt"}, args)
return secureexec.Command("echo")
},
&models.File{
Name: "test.txt",
HasStagedChanges: false,
Tracked: true,
},
false,
false,
false,
},
{
"cached",
func(cmd string, args ...string) *exec.Cmd {
assert.EqualValues(t, "git", cmd)
assert.EqualValues(t, []string{"diff", "--submodule", "--no-ext-diff", "--color=always", "--cached", "--", "test.txt"}, args)
return secureexec.Command("echo")
},
&models.File{
Name: "test.txt",
HasStagedChanges: false,
Tracked: true,
},
false,
true,
false,
},
{
"plain",
func(cmd string, args ...string) *exec.Cmd {
assert.EqualValues(t, "git", cmd)
assert.EqualValues(t, []string{"diff", "--submodule", "--no-ext-diff", "--color=never", "--", "test.txt"}, args)
return secureexec.Command("echo")
},
&models.File{
Name: "test.txt",
HasStagedChanges: false,
Tracked: true,
},
true,
false,
false,
},
{
"File not tracked and file has no staged changes",
func(cmd string, args ...string) *exec.Cmd {
assert.EqualValues(t, "git", cmd)
assert.EqualValues(t, []string{"diff", "--submodule", "--no-ext-diff", "--color=always", "--no-index", "--", "/dev/null", "test.txt"}, args)
return secureexec.Command("echo")
},
&models.File{
Name: "test.txt",
HasStagedChanges: false,
Tracked: false,
},
false,
false,
false,
},
{
"Default case (ignore whitespace)",
func(cmd string, args ...string) *exec.Cmd {
assert.EqualValues(t, "git", cmd)
assert.EqualValues(t, []string{"diff", "--submodule", "--no-ext-diff", "--color=always", "--ignore-all-space", "--", "test.txt"}, args)
return secureexec.Command("echo")
},
&models.File{
Name: "test.txt",
HasStagedChanges: false,
Tracked: true,
},
false,
false,
true,
},
}
for _, s := range scenarios {
t.Run(s.testName, func(t *testing.T) {
gitCmd := NewDummyGitCommand()
gitCmd.OSCommand.Command = s.command
gitCmd.WorktreeFileDiff(s.file, s.plain, s.cached, s.ignoreWhitespace)
})
}
} | explode_data.jsonl/6319 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1289
} | [
2830,
3393,
46562,
4062,
21751,
1155,
353,
8840,
836,
8,
341,
13158,
15048,
2036,
341,
197,
18185,
675,
260,
914,
198,
197,
45566,
688,
2915,
3609,
11,
2503,
917,
8,
353,
11748,
64512,
198,
197,
17661,
1797,
353,
6507,
8576,
198,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIteratorSeekGE(t *testing.T) {
const n = 1000
d := &testStorage{}
l := NewSkiplist(d, 0)
it := l.NewIter()
require.False(t, it.Valid())
it.First()
require.False(t, it.Valid())
// 1000, 1010, 1020, ..., 1990.
for i := n - 1; i >= 0; i-- {
require.Nil(t, l.Add(d.add(fmt.Sprintf("%05d", i*10+1000))))
}
require.True(t, it.SeekGE(makeKey("")))
require.True(t, it.Valid())
require.EqualValues(t, "01000", it.Key().UserKey)
require.True(t, it.SeekGE(makeKey("01000")))
require.True(t, it.Valid())
require.EqualValues(t, "01000", it.Key().UserKey)
require.True(t, it.SeekGE(makeKey("01005")))
require.True(t, it.Valid())
require.EqualValues(t, "01010", it.Key().UserKey)
require.True(t, it.SeekGE(makeKey("01010")))
require.True(t, it.Valid())
require.EqualValues(t, "01010", it.Key().UserKey)
require.False(t, it.SeekGE(makeKey("99999")))
require.False(t, it.Valid())
// Test seek for empty key.
require.Nil(t, l.Add(d.add("")))
require.True(t, it.SeekGE([]byte{}))
require.True(t, it.Valid())
require.True(t, it.SeekGE(makeKey("")))
require.True(t, it.Valid())
} | explode_data.jsonl/26103 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 489
} | [
2830,
3393,
11951,
39350,
10777,
1155,
353,
8840,
836,
8,
341,
4777,
308,
284,
220,
16,
15,
15,
15,
198,
2698,
1669,
609,
1944,
5793,
16094,
8810,
1669,
1532,
50,
6642,
39934,
1500,
11,
220,
15,
340,
23374,
1669,
326,
7121,
8537,
28... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestEntry_OnPaste(t *testing.T) {
clipboard := test.NewClipboard()
shortcut := &fyne.ShortcutPaste{Clipboard: clipboard}
tests := []struct {
name string
entry *widget.Entry
clipboardContent string
wantText string
wantRow, wantCol int
}{
{
name: "singleline: empty content",
entry: widget.NewEntry(),
clipboardContent: "",
wantText: "",
wantRow: 0,
wantCol: 0,
},
{
name: "singleline: simple text",
entry: widget.NewEntry(),
clipboardContent: "clipboard content",
wantText: "clipboard content",
wantRow: 0,
wantCol: 17,
},
{
name: "singleline: UTF8 text",
entry: widget.NewEntry(),
clipboardContent: "Hié™שרה",
wantText: "Hié™שרה",
wantRow: 0,
wantCol: 7,
},
{
name: "singleline: with new line",
entry: widget.NewEntry(),
clipboardContent: "clipboard\ncontent",
wantText: "clipboard content",
wantRow: 0,
wantCol: 17,
},
{
name: "singleline: with tab",
entry: widget.NewEntry(),
clipboardContent: "clipboard\tcontent",
wantText: "clipboard\tcontent",
wantRow: 0,
wantCol: 17,
},
{
name: "password: with new line",
entry: widget.NewPasswordEntry(),
clipboardContent: "3SB=y+)z\nkHGK(hx6 -e_\"1TZu q^bF3^$u H[:e\"1O.",
wantText: `3SB=y+)z kHGK(hx6 -e_"1TZu q^bF3^$u H[:e"1O.`,
wantRow: 0,
wantCol: 44,
},
{
name: "multiline: with new line",
entry: widget.NewMultiLineEntry(),
clipboardContent: "clipboard\ncontent",
wantText: "clipboard\ncontent",
wantRow: 1,
wantCol: 7,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
clipboard.SetContent(tt.clipboardContent)
tt.entry.TypedShortcut(shortcut)
assert.Equal(t, tt.wantText, tt.entry.Text)
assert.Equal(t, tt.wantRow, tt.entry.CursorRow)
assert.Equal(t, tt.wantCol, tt.entry.CursorColumn)
})
}
} | explode_data.jsonl/12347 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1230
} | [
2830,
3393,
5874,
35482,
73970,
1155,
353,
8840,
836,
8,
341,
197,
70848,
1669,
1273,
7121,
65141,
741,
197,
46357,
1669,
609,
30595,
811,
55958,
10242,
73970,
90,
65141,
25,
46034,
532,
78216,
1669,
3056,
1235,
341,
197,
11609,
1797,
9... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNewValue(t *testing.T) {
testcases := []struct {
inType querypb.Type
inVal string
outVal Value
outErr string
}{{
inType: Null,
inVal: "",
outVal: NULL,
}, {
inType: Int8,
inVal: "1",
outVal: TestValue(Int8, "1"),
}, {
inType: Int16,
inVal: "1",
outVal: TestValue(Int16, "1"),
}, {
inType: Int24,
inVal: "1",
outVal: TestValue(Int24, "1"),
}, {
inType: Int32,
inVal: "1",
outVal: TestValue(Int32, "1"),
}, {
inType: Int64,
inVal: "1",
outVal: TestValue(Int64, "1"),
}, {
inType: Uint8,
inVal: "1",
outVal: TestValue(Uint8, "1"),
}, {
inType: Uint16,
inVal: "1",
outVal: TestValue(Uint16, "1"),
}, {
inType: Uint24,
inVal: "1",
outVal: TestValue(Uint24, "1"),
}, {
inType: Uint32,
inVal: "1",
outVal: TestValue(Uint32, "1"),
}, {
inType: Uint64,
inVal: "1",
outVal: TestValue(Uint64, "1"),
}, {
inType: Float32,
inVal: "1.00",
outVal: TestValue(Float32, "1.00"),
}, {
inType: Float64,
inVal: "1.00",
outVal: TestValue(Float64, "1.00"),
}, {
inType: Decimal,
inVal: "1.00",
outVal: TestValue(Decimal, "1.00"),
}, {
inType: Timestamp,
inVal: "2012-02-24 23:19:43",
outVal: TestValue(Timestamp, "2012-02-24 23:19:43"),
}, {
inType: Date,
inVal: "2012-02-24",
outVal: TestValue(Date, "2012-02-24"),
}, {
inType: Time,
inVal: "23:19:43",
outVal: TestValue(Time, "23:19:43"),
}, {
inType: Datetime,
inVal: "2012-02-24 23:19:43",
outVal: TestValue(Datetime, "2012-02-24 23:19:43"),
}, {
inType: Year,
inVal: "1",
outVal: TestValue(Year, "1"),
}, {
inType: Text,
inVal: "a",
outVal: TestValue(Text, "a"),
}, {
inType: Blob,
inVal: "a",
outVal: TestValue(Blob, "a"),
}, {
inType: VarChar,
inVal: "a",
outVal: TestValue(VarChar, "a"),
}, {
inType: Binary,
inVal: "a",
outVal: TestValue(Binary, "a"),
}, {
inType: Char,
inVal: "a",
outVal: TestValue(Char, "a"),
}, {
inType: Bit,
inVal: "1",
outVal: TestValue(Bit, "1"),
}, {
inType: Enum,
inVal: "a",
outVal: TestValue(Enum, "a"),
}, {
inType: Set,
inVal: "a",
outVal: TestValue(Set, "a"),
}, {
inType: VarBinary,
inVal: "a",
outVal: TestValue(VarBinary, "a"),
}, {
inType: Int64,
inVal: InvalidNeg,
outErr: "out of range",
}, {
inType: Int64,
inVal: InvalidPos,
outErr: "out of range",
}, {
inType: Uint64,
inVal: "-1",
outErr: "invalid syntax",
}, {
inType: Uint64,
inVal: InvalidPos,
outErr: "out of range",
}, {
inType: Float64,
inVal: "a",
outErr: "invalid syntax",
}, {
inType: Expression,
inVal: "a",
outErr: "invalid type specified for MakeValue: EXPRESSION",
}}
for _, tcase := range testcases {
v, err := NewValue(tcase.inType, []byte(tcase.inVal))
if tcase.outErr != "" {
if err == nil || !strings.Contains(err.Error(), tcase.outErr) {
t.Errorf("ValueFromBytes(%v, %v) error: %v, must contain %v", tcase.inType, tcase.inVal, err, tcase.outErr)
}
continue
}
if err != nil {
t.Errorf("ValueFromBytes(%v, %v) error: %v", tcase.inType, tcase.inVal, err)
continue
}
if !reflect.DeepEqual(v, tcase.outVal) {
t.Errorf("ValueFromBytes(%v, %v) = %v, want %v", tcase.inType, tcase.inVal, v, tcase.outVal)
}
}
} | explode_data.jsonl/30806 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1660
} | [
2830,
3393,
3564,
1130,
1155,
353,
8840,
836,
8,
341,
18185,
23910,
1669,
3056,
1235,
341,
197,
17430,
929,
3239,
16650,
10184,
198,
197,
17430,
2208,
220,
914,
198,
197,
13967,
2208,
5162,
198,
197,
13967,
7747,
914,
198,
197,
15170,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestTranscodingWMV(t *testing.T) {
var inputPath = "/tmp/ffmpeg/wmv"
var outputPath = "/tmp/ffmpeg/out/wmv.mp4"
trans := new(transcoder.Transcoder)
err := trans.Initialize(inputPath, outputPath)
assert.Nil(t, err)
done := trans.Run(false)
err = <-done
assert.Nil(t, err)
} | explode_data.jsonl/57666 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 119
} | [
2830,
3393,
3167,
48367,
15210,
53,
1155,
353,
8840,
836,
8,
1476,
2405,
1946,
1820,
284,
3521,
5173,
14,
72422,
6324,
26002,
698,
2405,
95017,
284,
3521,
5173,
14,
72422,
48316,
6324,
26002,
16870,
19,
1837,
72453,
1669,
501,
33089,
40... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInterface(t *testing.T) {
type AA struct {
ID int `orm:"id key auto"`
Name string `orm:"name"`
Desc *string `orm:"desc"`
}
type BB struct {
ID int `orm:"id key auto"`
Name string `orm:"name"`
AA *AA `orm:"aa"`
}
bb := &BB{AA: &AA{}}
bbVal, bbErr := GetObject(bb)
if bbErr != nil {
t.Errorf("GetObject failed, err:%s", bbErr.Error())
return
}
_, objErr := GetEntityModel(bbVal)
if objErr != nil {
t.Errorf("GetEntityModel failed, err:%s", objErr.Error())
return
}
} | explode_data.jsonl/3460 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 244
} | [
2830,
3393,
5051,
1155,
353,
8840,
836,
8,
341,
13158,
28234,
2036,
341,
197,
29580,
256,
526,
257,
1565,
493,
2974,
307,
1376,
3233,
8805,
197,
21297,
914,
220,
1565,
493,
2974,
606,
8805,
197,
10957,
3300,
353,
917,
1565,
493,
2974,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestControllerRegisterInsertFilterChain(t *testing.T) {
InsertFilterChain("/*", func(next FilterFunc) FilterFunc {
return func(ctx *context.Context) {
ctx.Output.Header("filter", "filter-chain")
next(ctx)
}
})
ns := NewNamespace("/chain")
ns.Get("/*", func(ctx *context.Context) {
_ = ctx.Output.Body([]byte("hello"))
})
r, _ := http.NewRequest("GET", "/chain/user", nil)
w := httptest.NewRecorder()
BhojpurApp.Handlers.Init()
BhojpurApp.Handlers.ServeHTTP(w, r)
assert.Equal(t, "filter-chain", w.Header().Get("filter"))
} | explode_data.jsonl/14966 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 215
} | [
2830,
3393,
2051,
8690,
13780,
5632,
18837,
1155,
353,
8840,
836,
8,
341,
197,
13780,
5632,
18837,
445,
1057,
497,
2915,
16913,
12339,
9626,
8,
12339,
9626,
341,
197,
853,
2915,
7502,
353,
2147,
9328,
8,
341,
298,
20985,
34246,
15753,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTaskPrintWithNoTruncOption(t *testing.T) {
quiet := false
trunc := false
noResolve := true
apiClient := &fakeClient{}
cli := test.NewFakeCli(apiClient)
tasks := []swarm.Task{
*Task(TaskID("id-foo-yov6omdek8fg3k5stosyp2m50")),
}
err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, "{{ .ID }}")
assert.NilError(t, err)
golden.Assert(t, cli.OutBuffer().String(), "task-print-with-no-trunc-option.golden")
} | explode_data.jsonl/43885 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 191
} | [
2830,
3393,
6262,
8994,
2354,
2753,
1282,
1347,
5341,
1155,
353,
8840,
836,
8,
341,
197,
43650,
1669,
895,
198,
25583,
1347,
1669,
895,
198,
72104,
56808,
1669,
830,
198,
54299,
2959,
1669,
609,
30570,
2959,
16094,
86448,
1669,
1273,
71... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLoginWithSQLServerAuth(t *testing.T) {
conn, err := NewConnector("sqlserver://test:secret@localhost:1433?Workstation ID=localhost&log=128")
if err != nil {
t.Errorf("Unable to parse dummy DSN: %v", err)
}
tl := testLogger{t: t}
defer tl.StopLogging()
SetLogger(&tl)
mock := NewMockTransportDialer(
[]string{
" 12 01 00 2f 00 00 01 00 00 00 1a 00 06 01 00 20\n" +
"00 01 02 00 21 00 01 03 00 22 00 04 04 00 26 00\n" +
"01 ff 00 00 00 00 00 00 00 00 00 00 00 00 00\n",
" 10 01 00 b2 00 00 01 00 aa 00 00 00 04 00 00 74\n" +
"00 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00\n" +
"A0 02 00 00 00 00 00 00 00 00 00 00 5e 00 09 00\n" +
"70 00 04 00 78 00 06 00 84 00 0a 00 98 00 09 00\n" +
"00 00 00 00 aa 00 00 00 aa 00 00 00 aa 00 00 00\n" +
"00 00 00 00 00 00 aa 00 00 00 aa 00 00 00 aa 00\n" +
"00 00 00 00 00 00 6c 00 6f 00 63 00 61 00 6c 00\n" +
"68 00 6f 00 73 00 74 00 74 00 65 00 73 00 74 00\n" +
"92 a5 f3 a5 93 a5 82 a5 f3 a5 e2 a5 67 00 6f 00\n" +
"2d 00 6d 00 73 00 73 00 71 00 6c 00 64 00 62 00\n" +
"6c 00 6f 00 63 00 61 00 6c 00 68 00 6f 00 73 00\n" +
"74 00\n",
},
[]string{
" 04 01 00 20 00 00 01 00 00 00 10 00 06 01 00 16\n" +
"00 01 06 00 17 00 01 FF 0C 00 07 D0 00 00 02 01\n",
" 04 01 00 4A 00 00 01 00 AD 32 00 01 74 00 00 04\n" +
"14 4d 00 69 00 63 00 72 00 6f 00 73 00 6f 00 66\n" +
"00 74 00 20 00 53 00 51 00 4c 00 20 00 53 00 65\n" +
"00 72 00 76 00 65 00 72 00 0c 00 07 d0 fd 00 00\n" +
"00 00 00 00 00 00 00 00 00 00\n",
},
)
conn.Dialer = mock
_, err = connect(context.Background(), conn, driverInstanceNoProcess.logger, conn.params)
if err != nil {
t.Error(err)
}
err = <-mock.result
if err != nil {
t.Error(err)
}
} | explode_data.jsonl/28157 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 861
} | [
2830,
3393,
6231,
2354,
6688,
5475,
5087,
1155,
353,
8840,
836,
8,
341,
32917,
11,
1848,
1669,
1532,
35954,
445,
3544,
4030,
1110,
1944,
25,
20474,
31,
8301,
25,
16,
19,
18,
18,
30,
6776,
20155,
3034,
84551,
5,
839,
28,
16,
17,
23... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestMaterializerSyntaxError(t *testing.T) {
ms := &vtctldatapb.MaterializeSettings{
Workflow: "workflow",
SourceKeyspace: "sourceks",
TargetKeyspace: "targetks",
TableSettings: []*vtctldatapb.TableMaterializeSettings{{
TargetTable: "t1",
SourceExpression: "bad query",
CreateDdl: "t1ddl",
}},
}
env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"})
defer env.close()
env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{})
err := env.wr.Materialize(context.Background(), ms)
require.EqualError(t, err, "syntax error at position 4 near 'bad'")
} | explode_data.jsonl/61879 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 251
} | [
2830,
3393,
13415,
3135,
33890,
1454,
1155,
353,
8840,
836,
8,
341,
47691,
1669,
609,
9708,
302,
507,
266,
391,
65,
44253,
551,
6086,
515,
197,
197,
62768,
25,
981,
330,
56249,
756,
197,
197,
3608,
8850,
1306,
25,
330,
2427,
2787,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEventSystemExtrinsicSuccess_Decode(t *testing.T) {
decoded := EventSystemExtrinsicSuccess{}
err := DecodeFromBytes(exampleEventFinEnc, &decoded)
assert.NoError(t, err)
assert.Equal(t, exampleEventFin, decoded)
decoded = EventSystemExtrinsicSuccess{}
err = DecodeFromBytes(exampleEventAppEnc, &decoded)
assert.NoError(t, err)
assert.Equal(t, exampleEventApp, decoded)
} | explode_data.jsonl/68327 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 140
} | [
2830,
3393,
1556,
2320,
840,
45002,
7188,
78668,
534,
1155,
353,
8840,
836,
8,
341,
197,
62913,
1669,
3665,
2320,
840,
45002,
7188,
16094,
9859,
1669,
50194,
3830,
7078,
66203,
1556,
9134,
7408,
11,
609,
62913,
340,
6948,
35699,
1155,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLock_DeleteKey(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
defer s.Stop()
// This uncovered some issues around special-case handling of low index
// numbers where it would work with a low number but fail for higher
// ones, so we loop this a bit to sweep the index up out of that
// territory.
for i := 0; i < 10; i++ {
func() {
lock, err := c.LockKey("test/lock")
if err != nil {
t.Fatalf("err: %v", err)
}
// Should work
leaderCh, err := lock.Lock(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if leaderCh == nil {
t.Fatalf("not leader")
}
defer lock.Unlock()
go func() {
// Nuke the key, simulate an operator intervention
kv := c.KV()
kv.Delete("test/lock", nil)
}()
// Should loose leadership
select {
case <-leaderCh:
case <-time.After(10 * time.Second):
t.Fatalf("should not be leader")
}
}()
}
} | explode_data.jsonl/27624 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 382
} | [
2830,
3393,
11989,
57418,
1592,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
1444,
11,
274,
1669,
1281,
2959,
1155,
340,
16867,
274,
30213,
2822,
197,
322,
1096,
42422,
1045,
4714,
2163,
3281,
38485,
11589,
315,
3347,
1922,
198... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMessageSendBlockGasLimit(t *testing.T) {
t.Parallel()
d := th.NewDaemon(
t,
// default address required
th.DefaultAddress(fixtures.TestAddresses[0]),
th.WithMiner(fixtures.TestMiners[0]),
th.KeyFile(fixtures.KeyFilePaths()[0]),
).Start()
defer d.ShutdownSuccess()
doubleTheBlockGasLimit := strconv.Itoa(int(types.BlockGasLimit) * 2)
halfTheBlockGasLimit := strconv.Itoa(int(types.BlockGasLimit) / 2)
result := struct{ Messages []interface{} }{}
t.Run("when the gas limit is above the block limit, the message fails", func(t *testing.T) {
d.RunFail("block gas limit",
"message", "send",
"--gas-price", "0", "--gas-limit", doubleTheBlockGasLimit,
"--value=10", fixtures.TestAddresses[1],
)
})
t.Run("when the gas limit is below the block limit, the message succeeds", func(t *testing.T) {
d.RunSuccess(
"message", "send",
"--gas-price", "0", "--gas-limit", halfTheBlockGasLimit,
"--value=10", fixtures.TestAddresses[1],
)
blockCid := d.RunSuccess("mining", "once").ReadStdoutTrimNewlines()
blockInfo := d.RunSuccess("show", "block", blockCid, "--enc", "json").ReadStdoutTrimNewlines()
require.NoError(t, json.Unmarshal([]byte(blockInfo), &result))
assert.NotEmpty(t, result.Messages, "msg under the block gas limit passes validation and is run in the block")
})
} | explode_data.jsonl/74072 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 496
} | [
2830,
3393,
2052,
11505,
4713,
58728,
16527,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
2698,
1669,
270,
7121,
89177,
1006,
197,
3244,
345,
197,
197,
322,
1638,
2621,
2567,
198,
197,
70479,
13275,
4286,
955,
941,
18513,
8787... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPreparePRContext(t *testing.T) {
var out bytes.Buffer
logger := zerolog.New(&out)
ctx := logger.WithContext(context.Background())
_, logger = PreparePRContext(ctx, 42, &github.Repository{
Name: github.String("test"),
Owner: &github.User{
Login: github.String("mhaypenny"),
},
}, 128)
logger.Info().Msg("")
var entry struct {
ID int64 `json:"github_installation_id"`
Owner string `json:"github_repository_owner"`
Name string `json:"github_repository_name"`
Number int `json:"github_pr_num"`
}
if err := json.Unmarshal(out.Bytes(), &entry); err != nil {
t.Fatalf("invalid log entry: %s: %v", out.String(), err)
}
assertField(t, "installation ID", int64(42), entry.ID)
assertField(t, "repository owner", "mhaypenny", entry.Owner)
assertField(t, "repository name", "test", entry.Name)
assertField(t, "pull request number", 128, entry.Number)
} | explode_data.jsonl/70538 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 345
} | [
2830,
3393,
50590,
6480,
1972,
1155,
353,
8840,
836,
8,
341,
2405,
700,
5820,
22622,
271,
17060,
1669,
76178,
1609,
7121,
2099,
411,
340,
20985,
1669,
5925,
26124,
1972,
5378,
19047,
12367,
197,
6878,
5925,
284,
31166,
6480,
1972,
7502,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestClient_GetSubject(t *testing.T) {
for _, client := range makeTestClients() {
table, err := client.GetFirstTable()
if err != nil {
t.Error(err)
return
}
for i, subject := range table.Subjects {
if i > 5 {
break
}
if _, err := client.GetSubject(subject.TimetableCode); err != nil {
t.Error(err)
return
}
}
}
} | explode_data.jsonl/61318 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 158
} | [
2830,
3393,
2959,
13614,
13019,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
2943,
1669,
2088,
1281,
2271,
47174,
368,
341,
197,
26481,
11,
1848,
1669,
2943,
2234,
5338,
2556,
741,
197,
743,
1848,
961,
2092,
341,
298,
3244,
6141,
3964,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestComputeBatchSpecState(t *testing.T) {
uploadedSpec := &BatchSpec{CreatedFromRaw: false}
createdFromRawSpec := &BatchSpec{CreatedFromRaw: true}
tests := []struct {
stats BatchSpecStats
spec *BatchSpec
want BatchSpecState
}{
{
stats: BatchSpecStats{ResolutionDone: false},
spec: uploadedSpec,
want: BatchSpecStateCompleted,
},
{
stats: BatchSpecStats{ResolutionDone: false},
spec: createdFromRawSpec,
want: BatchSpecStatePending,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5},
spec: createdFromRawSpec,
want: BatchSpecStatePending,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 3},
spec: createdFromRawSpec,
want: BatchSpecStateQueued,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 2, Processing: 1},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 1, Processing: 1, Completed: 1},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 1, Processing: 0, Completed: 2},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 0, Processing: 0, Completed: 3},
spec: createdFromRawSpec,
want: BatchSpecStateCompleted,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 1, Processing: 1, Failed: 1},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 1, Processing: 0, Failed: 2},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 0, Processing: 0, Failed: 3},
spec: createdFromRawSpec,
want: BatchSpecStateFailed,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 0, Completed: 1, Failed: 2},
spec: createdFromRawSpec,
want: BatchSpecStateFailed,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceling: 3},
spec: createdFromRawSpec,
want: BatchSpecStateCanceling,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceling: 2, Completed: 1},
spec: createdFromRawSpec,
want: BatchSpecStateCanceling,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceling: 2, Failed: 1},
spec: createdFromRawSpec,
want: BatchSpecStateCanceling,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceling: 1, Queued: 2},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceling: 1, Processing: 2},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 3},
spec: createdFromRawSpec,
want: BatchSpecStateCanceled,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Failed: 2},
spec: createdFromRawSpec,
want: BatchSpecStateCanceled,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Completed: 2},
spec: createdFromRawSpec,
want: BatchSpecStateCanceled,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Canceling: 2},
spec: createdFromRawSpec,
want: BatchSpecStateCanceling,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Canceling: 1, Queued: 1},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Processing: 2},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Canceling: 1, Processing: 1},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Queued: 2},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 0, Executions: 0},
spec: createdFromRawSpec,
want: BatchSpecStateCompleted,
},
}
for idx, tt := range tests {
have := ComputeBatchSpecState(tt.spec, tt.stats)
if have != tt.want {
t.Errorf("test %d/%d: unexpected batch spec state. want=%s, have=%s", idx+1, len(tests), tt.want, have)
}
}
} | explode_data.jsonl/4033 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1981
} | [
2830,
3393,
46254,
21074,
8327,
1397,
1155,
353,
8840,
836,
8,
341,
197,
56883,
8327,
1669,
609,
21074,
8327,
90,
11694,
3830,
20015,
25,
895,
532,
197,
7120,
3830,
20015,
8327,
1669,
609,
21074,
8327,
90,
11694,
3830,
20015,
25,
830,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestClient(t *testing.T) {
networkConfig, err := ReadFile(configFile)
require.NoError(t, err, "failed to read config file %s", configFile)
entityMatcherOverride, err := ReadFile(matcherFile)
require.NoError(t, err, "failed to read entity matcher file %s", matcherFile)
fbClient, err := NewFabricClient(ConnectorSpec{
Name: connectorName,
NetworkConfig: networkConfig,
EntityMatchers: entityMatcherOverride,
OrgName: org,
UserName: user,
ChannelID: channelID,
})
require.NoError(t, err, "failed to create fabric client %s", connectorName)
fmt.Printf("created fabric client %+v\n", fbClient)
// query original
result, _, err := fbClient.QueryChaincode(ccID, "query", [][]byte{[]byte("a")}, nil)
require.NoError(t, err, "failed to query %s", ccID)
fmt.Printf("Query result: %s\n", string(result))
origValue := result
// update
result, _, err = fbClient.ExecuteChaincode(ccID, "invoke", [][]byte{[]byte("a"), []byte("b"), []byte("10")}, nil)
require.NoError(t, err, "failed to invoke %s", ccID)
fmt.Printf("Invoke result: %s\n", string(result))
// query after update
result, _, err = fbClient.QueryChaincode(ccID, "query", [][]byte{[]byte("a")}, nil)
require.NoError(t, err, "failed to query %s", ccID)
fmt.Printf("Query result: %s\n", string(result))
assert.NotEqual(t, origValue, result, "original %s should different from %s", string(origValue), string(result))
} | explode_data.jsonl/75483 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 531
} | [
2830,
3393,
2959,
1155,
353,
8840,
836,
8,
341,
9038,
2349,
2648,
11,
1848,
1669,
4457,
1703,
8754,
1703,
340,
17957,
35699,
1155,
11,
1848,
11,
330,
16091,
311,
1349,
2193,
1034,
1018,
82,
497,
92770,
692,
52987,
37554,
2177,
11,
184... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_getHelmDependencyRepos(t *testing.T) {
repo1 := "https://charts.bitnami.com/bitnami"
repo2 := "https://eventstore.github.io/EventStore.Charts"
repos, err := getHelmDependencyRepos("../../util/helm/testdata/dependency")
assert.NoError(t, err)
assert.Equal(t, len(repos), 2)
assert.Equal(t, repos[0].Repo, repo1)
assert.Equal(t, repos[1].Repo, repo2)
} | explode_data.jsonl/5704 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 160
} | [
2830,
3393,
3062,
39,
23162,
36387,
693,
966,
1155,
353,
8840,
836,
8,
341,
17200,
5368,
16,
1669,
330,
2428,
1110,
36584,
30099,
77,
10606,
905,
86644,
77,
10606,
698,
17200,
5368,
17,
1669,
330,
2428,
1110,
3087,
4314,
11021,
4245,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestExternalLoadBalancer(t *testing.T) {
runTestAWS(t, "externallb.example.com", "externallb", "v1alpha2", false, 1, true)
runTestCloudformation(t, "externallb.example.com", "externallb", "v1alpha2", false, nil)
} | explode_data.jsonl/25380 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 88
} | [
2830,
3393,
25913,
5879,
93825,
1155,
353,
8840,
836,
8,
341,
56742,
2271,
36136,
1155,
11,
330,
4301,
541,
65,
7724,
905,
497,
330,
4301,
541,
65,
497,
330,
85,
16,
7141,
17,
497,
895,
11,
220,
16,
11,
830,
340,
56742,
2271,
1605... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestOKExSpot_BatchPlaceOrders(t *testing.T) {
t.Log(okex.OKExSpot.BatchPlaceOrders([]goex.Order{
goex.Order{
Cid: okex.UUID(),
Currency: goex.XRP_USD,
Amount: 10,
Price: 0.32,
Side: goex.BUY,
Type: "limit",
OrderType: goex.ORDER_FEATURE_ORDINARY,
},
{
Cid: okex.UUID(),
Currency: goex.EOS_USD,
Amount: 1,
Price: 5.2,
Side: goex.BUY,
OrderType: goex.ORDER_FEATURE_ORDINARY,
},
goex.Order{
Cid: okex.UUID(),
Currency: goex.XRP_USD,
Amount: 10,
Price: 0.33,
Side: goex.BUY,
Type: "limit",
OrderType: goex.ORDER_FEATURE_ORDINARY,
}}))
} | explode_data.jsonl/3918 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 401
} | [
2830,
3393,
3925,
840,
47049,
1668,
754,
17371,
24898,
1155,
353,
8840,
836,
8,
341,
3244,
5247,
60207,
327,
15480,
840,
47049,
45791,
17371,
24898,
10556,
3346,
327,
19664,
515,
197,
30680,
327,
19664,
515,
298,
6258,
307,
25,
981,
539... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSendNoSinkNoRoute(t *testing.T) {
r := NewRouter(nil)
if err := r.Send([]byte("hello"), nil); err == nil {
t.Fatalf("error expected")
}
a, b, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
defer a.Close()
defer b.Close()
if err := r.Send([]byte("foo bar baz"), a); err == nil {
t.Fatalf("error expected")
}
} | explode_data.jsonl/35717 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 148
} | [
2830,
3393,
11505,
2753,
45094,
2753,
4899,
1155,
353,
8840,
836,
8,
341,
7000,
1669,
1532,
9523,
27907,
340,
743,
1848,
1669,
435,
20176,
10556,
3782,
445,
14990,
3975,
2092,
1215,
1848,
621,
2092,
341,
197,
3244,
30762,
445,
841,
3601... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestAPIVersion(t *testing.T) {
av, err := Load(os.DirFS(runtime.GOROOT()))
if err != nil {
t.Fatal(err)
}
for _, tc := range []struct {
kind string
pkg string
name string
receiver string
want string
}{
// Things that were added post-1.0 should appear
{"func", "archive/tar", "FileInfoHeader", "", "1.1"},
{"type", "bufio", "Scanner", "", "1.1"},
{"method", "bufio", "WriteTo", "*Reader", "1.1"},
{"func", "bytes", "LastIndexByte", "", "1.5"},
{"type", "crypto", "Decrypter", "", "1.5"},
{"method", "crypto/rsa", "Decrypt", "*PrivateKey", "1.5"},
{"method", "debug/dwarf", "GoString", "Class", "1.5"},
{"func", "os", "IsTimeout", "", "1.10"},
{"type", "strings", "Builder", "", "1.10"},
{"method", "strings", "WriteString", "*Builder", "1.10"},
// Should get the earliest Go version when an identifier
// was initially added, rather than a later version when
// it may have been updated. See issue 44081.
{"func", "os", "Chmod", "", ""}, // Go 1 era function, updated in Go 1.16.
{"method", "os", "Readdir", "*File", ""}, // Go 1 era method, updated in Go 1.16.
{"method", "os", "ReadDir", "*File", "1.16"}, // New to Go 1.16.
// Things from package syscall should never appear
{"func", "syscall", "FchFlags", "", ""},
{"type", "syscall", "Inet4Pktinfo", "", ""},
// Things added in Go 1 should never appear
{"func", "archive/tar", "NewReader", "", ""},
{"type", "archive/tar", "Header", "", ""},
{"method", "archive/tar", "Next", "*Reader", ""},
} {
if tc.want != "" && !hasTag("go"+tc.want) {
continue
}
if got := av.Func(tc.pkg, tc.kind, tc.receiver, tc.name); got != tc.want {
t.Errorf(`sinceFunc(%q, %q, %q, %q) = %q; want %q`, tc.pkg, tc.kind, tc.receiver, tc.name, got, tc.want)
}
}
} | explode_data.jsonl/42992 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 758
} | [
2830,
3393,
7082,
5637,
1155,
353,
8840,
836,
8,
341,
197,
402,
11,
1848,
1669,
8893,
9638,
83757,
8485,
89467,
1224,
868,
53837,
12145,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
532,
2023,
8358,
17130,
1669,
2088,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func Test_Block(t *testing.T) {
goldenBlock := getResponse(block).(*Block)
type want struct {
wantErr bool
containsErr string
wantBlock *Block
}
cases := []struct {
name string
inputHanler http.Handler
want
}{
{
"failed to unmarshal",
gtGoldenHTTPMock(newBlockMock().handler([]byte(`not_block_data`), blankHandler)),
want{
true,
"could not get block '50': invalid character",
&Block{},
},
},
{
"is successful",
gtGoldenHTTPMock(newBlockMock().handler(readResponse(block), blankHandler)),
want{
false,
"",
goldenBlock,
},
},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
server := httptest.NewServer(tt.inputHanler)
defer server.Close()
gt, err := New(server.URL)
assert.Nil(t, err)
block, err := gt.Block(50)
checkErr(t, tt.wantErr, tt.containsErr, err)
assert.Equal(t, tt.want.wantBlock, block)
})
}
} | explode_data.jsonl/48369 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 430
} | [
2830,
3393,
51779,
1155,
353,
8840,
836,
8,
341,
3174,
813,
268,
4713,
1669,
633,
2582,
18682,
568,
4071,
4713,
340,
13158,
1366,
2036,
341,
197,
50780,
7747,
257,
1807,
198,
197,
197,
13372,
7747,
914,
198,
197,
50780,
4713,
256,
353... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTestAllSubSets(t *testing.T) {
t.Log(testf.TestAllSubSets(httpLayer.Fallback_sni, testMap))
t.Log(testf.TestAllSubSets(map2Mask, testMap2))
} | explode_data.jsonl/61064 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 67
} | [
2830,
3393,
2271,
2403,
3136,
30175,
1155,
353,
8840,
836,
8,
1476,
3244,
5247,
8623,
69,
8787,
2403,
3136,
30175,
19886,
9188,
991,
3420,
643,
7751,
11,
1273,
2227,
1171,
3244,
5247,
8623,
69,
8787,
2403,
3136,
30175,
9147,
17,
12686,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestDelivering_CanTransitionTo(t *testing.T) {
st := &delivering{}
require.Equal(t, stateNameDelivering, st.Name())
require.True(t, st.CanTransitionTo(&confirming{}))
require.True(t, st.CanTransitionTo(&abandoning{}))
require.True(t, st.CanTransitionTo(&done{}))
require.False(t, st.CanTransitionTo(&noOp{}))
require.False(t, st.CanTransitionTo(&start{}))
require.False(t, st.CanTransitionTo(&delivering{}))
require.False(t, st.CanTransitionTo(&arranging{}))
require.False(t, st.CanTransitionTo(&deciding{}))
require.False(t, st.CanTransitionTo(&waiting{}))
require.False(t, st.CanTransitionTo(&requesting{}))
} | explode_data.jsonl/66245 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 245
} | [
2830,
3393,
16532,
83940,
920,
276,
21768,
1249,
1155,
353,
8840,
836,
8,
341,
18388,
1669,
609,
74728,
287,
16094,
17957,
12808,
1155,
11,
1584,
675,
16532,
83940,
11,
357,
2967,
12367,
17957,
32443,
1155,
11,
357,
53280,
21768,
1249,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCreateSubaccount(t *testing.T) {
t.Parallel()
_, err := f.CreateSubaccount(context.Background(), "")
if !errors.Is(err, errSubaccountNameMustBeSpecified) {
t.Errorf("expected %v, but received: %s", errSubaccountNameMustBeSpecified, err)
}
if !areTestAPIKeysSet() || !canManipulateRealOrders {
t.Skip("skipping test, either api keys or canManipulateRealOrders isn't set")
}
_, err = f.CreateSubaccount(context.Background(), "subzero")
if err != nil {
t.Fatal(err)
}
if err = f.DeleteSubaccount(context.Background(), "subzero"); err != nil {
t.Error(err)
}
} | explode_data.jsonl/15246 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 216
} | [
2830,
3393,
4021,
3136,
4608,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
197,
6878,
1848,
1669,
282,
7251,
3136,
4608,
5378,
19047,
1507,
14676,
743,
753,
7650,
4506,
3964,
11,
1848,
3136,
4608,
675,
31776,
3430,
8327,
1870,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestStoreSendUpdateTime(t *testing.T) {
defer leaktest.AfterTest(t)
store, _, stopper := createTestStore(t)
defer stopper.Stop()
args := getArgs([]byte("a"))
reqTS := store.ctx.Clock.Now()
reqTS.WallTime += (100 * time.Millisecond).Nanoseconds()
_, err := client.SendWrappedWith(store.testSender(), nil, roachpb.Header{Timestamp: reqTS}, &args)
if err != nil {
t.Fatal(err)
}
ts := store.ctx.Clock.Timestamp()
if ts.WallTime != reqTS.WallTime || ts.Logical <= reqTS.Logical {
t.Errorf("expected store clock to advance to %s; got %s", reqTS, ts)
}
} | explode_data.jsonl/44469 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 223
} | [
2830,
3393,
6093,
11505,
64299,
1155,
353,
8840,
836,
8,
341,
16867,
23352,
1944,
36892,
2271,
1155,
340,
57279,
11,
8358,
2936,
712,
1669,
1855,
2271,
6093,
1155,
340,
16867,
2936,
712,
30213,
741,
31215,
1669,
633,
4117,
10556,
3782,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestService_GetStringifiedManifestsFromCache(t *testing.T) {
stringifiedManifest := "{\"url\":\"http://chart-viewer.com\",\"manifests\":[{\"name\":\"deployment.yaml\",\"content\":\"kind: Deployment\"}]}"
repository := new(repoMock.Repository)
helm := new(helmMock.Helm)
repository.On("Get", "manifests-stable-app-deploy-v0.0.1-hash").Return(stringifiedManifest)
svc := service.NewService(helm, repository)
manifest := svc.GetStringifiedManifests("stable", "app-deploy", "v0.0.1", "hash")
expectedManifests := "---\nkind: Deployment\n"
assert.Equal(t, expectedManifests, manifest)
} | explode_data.jsonl/15366 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 222
} | [
2830,
3393,
1860,
13614,
703,
1870,
38495,
82,
3830,
8233,
1155,
353,
8840,
836,
8,
341,
11357,
1870,
38495,
1669,
54734,
1085,
23488,
1254,
1110,
15941,
22503,
261,
905,
34333,
42315,
82,
59,
8899,
64238,
606,
23488,
82213,
33406,
34333,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCatchUpClient_UpdateMembers(t *testing.T) {
lg, err := logger.New(&logger.Config{
Level: "info",
OutputPath: []string{"stdout"},
ErrOutputPath: []string{"stderr"},
Encoding: "console",
})
require.NoError(t, err)
h := comm.NewCatchUpClient(lg, nil)
require.NotNil(t, h)
peer1 := &types.PeerConfig{
NodeId: "node1",
RaftId: 1,
PeerHost: "127.0.0.1",
PeerPort: 9001,
}
peer2 := &types.PeerConfig{
NodeId: "node2",
RaftId: 2,
PeerHost: "127.0.0.1",
PeerPort: 9002,
}
err = h.UpdateMembers([]*types.PeerConfig{peer1, peer2})
require.NoError(t, err)
peer2.PeerHost = "not a legal address"
err = h.UpdateMembers([]*types.PeerConfig{peer1, peer2})
require.EqualError(t, err, "failed to convert PeerConfig [node_id:\"node2\" raft_id:2 peer_host:\"not a legal address\" peer_port:9002 ] to url: parse \"http://not a legal address:9002\": invalid character \" \" in host name")
} | explode_data.jsonl/70492 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 410
} | [
2830,
3393,
57760,
2324,
2959,
47393,
24371,
1155,
353,
8840,
836,
8,
341,
8810,
70,
11,
1848,
1669,
5925,
7121,
2099,
9786,
10753,
515,
197,
197,
4449,
25,
260,
330,
2733,
756,
197,
80487,
1820,
25,
262,
3056,
917,
4913,
36358,
7115,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLimiter_Allow(t *testing.T) {
now := time.Now()
tests := []struct {
name string
limiter *Limiter
now time.Time
expectedLimiter *Limiter
expectedAllowed bool
expectedSleep time.Duration
}{
{
name: "has one token",
limiter: &Limiter{
rate: 1,
burst: 1,
tokens: 1,
last: now,
},
now: now,
expectedLimiter: &Limiter{
rate: 1,
burst: 1,
tokens: 0,
last: now,
},
expectedAllowed: true,
expectedSleep: 0,
},
{
name: "no token and enough time passed",
limiter: &Limiter{
rate: 1,
burst: 1,
tokens: 0,
last: now,
},
now: now.Add(time.Second),
expectedLimiter: &Limiter{
rate: 1,
burst: 1,
tokens: 0,
last: now.Add(time.Second),
},
expectedAllowed: true,
expectedSleep: 0,
},
{
name: "no token and some time passed, rate = 1",
limiter: &Limiter{
rate: 1,
burst: 1,
tokens: 0,
last: now,
},
now: now.Add(time.Millisecond * 200),
expectedLimiter: &Limiter{
rate: 1,
burst: 1,
tokens: -0.8,
last: now.Add(time.Millisecond * 200),
},
expectedAllowed: false,
expectedSleep: time.Millisecond * 800,
},
{
name: "no token and some time passed, rate = 2",
limiter: &Limiter{
rate: 2,
burst: 1,
tokens: 0,
last: now,
},
now: now.Add(time.Millisecond * 200),
expectedLimiter: &Limiter{
rate: 2,
burst: 1,
tokens: -0.6,
last: now.Add(time.Millisecond * 200),
},
expectedAllowed: false,
expectedSleep: time.Millisecond * 300,
},
{
name: "rate reach burst when enough time passed",
limiter: &Limiter{
rate: 100,
burst: 1000,
tokens: 0,
last: now,
},
now: now.Add(time.Second * 10),
expectedLimiter: &Limiter{
rate: 100,
burst: 1000,
tokens: 999,
last: now.Add(time.Second * 10),
},
expectedAllowed: true,
expectedSleep: 0,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
b, sleep := test.limiter.Allow(test.now)
errorassert.Equal(t, test.expectedLimiter, test.limiter)
errorassert.Equal(t, test.expectedAllowed, b)
errorassert.Equal(t, test.expectedSleep, sleep)
})
}
} | explode_data.jsonl/38234 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1150
} | [
2830,
3393,
43,
17700,
53629,
363,
1155,
353,
8840,
836,
8,
341,
80922,
1669,
882,
13244,
2822,
78216,
1669,
3056,
1235,
341,
197,
11609,
262,
914,
198,
197,
197,
4659,
2015,
353,
43,
17700,
198,
197,
80922,
257,
882,
16299,
271,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestWithHelper(t *testing.T) {
withHelper := &HelperReporter{TestReporter: NewErrorReporter(t)}
ctrlWithHelper := gomock.NewController(withHelper)
ctrlWithHelper.T.Helper()
if withHelper.helper == 0 {
t.Fatal("expected Helper to be invoked")
}
} | explode_data.jsonl/17300 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 90
} | [
2830,
3393,
2354,
5511,
1155,
353,
8840,
836,
8,
341,
46948,
5511,
1669,
609,
5511,
52766,
90,
2271,
52766,
25,
1532,
1454,
52766,
1155,
10569,
84381,
2354,
5511,
1669,
342,
316,
1176,
7121,
2051,
16980,
5511,
692,
84381,
2354,
5511,
83... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_NamespacesTopic_WhenConvertedToHub_RoundTripsWithoutLoss(t *testing.T) {
t.Parallel()
parameters := gopter.DefaultTestParameters()
parameters.MaxSize = 10
properties := gopter.NewProperties(parameters)
properties.Property(
"Round trip from NamespacesTopic to hub returns original",
prop.ForAll(RunResourceConversionTestForNamespacesTopic, NamespacesTopicGenerator()))
properties.TestingRun(t, gopter.NewFormatedReporter(false, 240, os.Stdout))
} | explode_data.jsonl/36546 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 145
} | [
2830,
3393,
1604,
971,
27338,
26406,
62,
4498,
61941,
1249,
19316,
2568,
795,
21884,
1690,
26040,
39838,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
67543,
1669,
728,
73137,
13275,
2271,
9706,
741,
67543,
14535,
1695,
284,
220,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInvalidAgentStarts(t *testing.T) {
report := test.CheckRoutines(t)
defer report()
a, err := NewAgent(&AgentConfig{})
assert.NoError(t, err)
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
defer cancel()
if _, err = a.Dial(ctx, "", "bar"); err != nil && err != ErrRemoteUfragEmpty {
t.Fatal(err)
}
if _, err = a.Dial(ctx, "foo", ""); err != nil && err != ErrRemotePwdEmpty {
t.Fatal(err)
}
if _, err = a.Dial(ctx, "foo", "bar"); err != nil && err != ErrCanceledByCaller {
t.Fatal(err)
}
if _, err = a.Dial(context.TODO(), "foo", "bar"); err != nil && err != ErrMultipleStart {
t.Fatal(err)
}
assert.NoError(t, a.Close())
} | explode_data.jsonl/50567 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 286
} | [
2830,
3393,
7928,
16810,
3479,
82,
1155,
353,
8840,
836,
8,
341,
69931,
1669,
1273,
10600,
49,
28628,
1155,
340,
16867,
1895,
2822,
11323,
11,
1848,
1669,
1532,
16810,
2099,
16810,
2648,
37790,
6948,
35699,
1155,
11,
1848,
692,
20985,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func TestApnianConfig(t *testing.T) {
apnianConfigurer := ApnianConfigurer{"apnian.example", "files/test"}
t.Run("New", func(t *testing.T) {
sut, err := New("apnian.example")
assert.Nil(t, err)
assert.IsType(t, &Apnian{}, sut)
})
t.Run("getApnian", func(t *testing.T) {
sut, err := New("apnian.example")
assert.Nil(t, err)
assert.IsType(t, &Apnian{}, sut)
assert.NotEmpty(t, sut.P8KeyName)
assert.NotEmpty(t, sut.Topic)
assert.NotEmpty(t, sut.APNSKeyID)
assert.NotEmpty(t, sut.TeamID)
})
t.Run("getApnian GOROOT/config path", func(t *testing.T) {
ac := ApnianConfigurer{"apnian.example.pathtest", "files/test"}
sut, err := ac.getApnian()
assert.Nil(t, err)
assert.IsType(t, &Apnian{}, sut)
assert.NotEmpty(t, sut.P8KeyName)
assert.NotEmpty(t, sut.Topic)
assert.NotEmpty(t, sut.APNSKeyID)
assert.NotEmpty(t, sut.TeamID)
})
t.Run("getApnian bad config name returns error", func(t *testing.T) {
ac := ApnianConfigurer{"apnian.example.nope", "."}
sut, err := ac.getApnian()
assert.Nil(t, sut)
assert.Error(t, err)
})
t.Run("getApnian bad config file returns error", func(t *testing.T) {
ac := ApnianConfigurer{"apnian.badexample", "../files/test"}
sut, err := ac.getApnian()
assert.Nil(t, sut)
assert.Error(t, err)
})
t.Run("AuthKeyPath()", func(t *testing.T) {
sut, err := apnianConfigurer.getApnian()
keyPath := sut.AuthKeyPath()
info, err := os.Stat(keyPath)
assert.Nil(t, err)
assert.Equal(t, info.Name(), sut.P8KeyName)
})
t.Run("AuthKey()", func(t *testing.T) {
sut, err := apnianConfigurer.getApnian()
authKey, err := sut.AuthKey()
assert.Nil(t, err)
assert.IsType(t, &ecdsa.PrivateKey{}, authKey)
assert.NotNil(t, authKey)
})
t.Run("AuthKey() bad key", func(t *testing.T) {
ac := ApnianConfigurer{"apnian.badkey", "../files/test"}
sut, err := ac.getApnian()
_, err2 := sut.AuthKey()
assert.Nil(t, err)
assert.NotNil(t, err2)
})
t.Run("Token()", func(t *testing.T) {
sut, err := apnianConfigurer.getApnian()
toke, err2 := sut.Token()
assert.Nil(t, err)
assert.Nil(t, err2)
assert.IsType(t, &token.Token{}, toke)
assert.Equal(t, sut.APNSKeyID, toke.KeyID)
assert.Equal(t, sut.TeamID, toke.TeamID)
})
t.Run("Token() bad key", func(t *testing.T) {
ac := ApnianConfigurer{"apnian.badkey", "../files/test"}
sut, err := ac.getApnian()
_, err2 := sut.Token()
assert.Nil(t, err)
assert.NotNil(t, err2)
})
t.Run("Notification()", func(t *testing.T) {
deviceID := "123456"
sut, err := apnianConfigurer.getApnian()
payload := testAPS()
notification := sut.Notification(deviceID, payload)
assert.Nil(t, err)
assert.Equal(t, payload.ToJsonBytes(), notification.Payload)
})
t.Run("loadClient() not yet called", func(t *testing.T) {
sut, err := New("apnian.example")
assert.Nil(t, err)
assert.Nil(t, sut.Client)
})
t.Run("loadClient() called", func(t *testing.T) {
sut, err := apnianConfigurer.getApnian()
err2 := sut.loadClient()
assert.Nil(t, err)
assert.Nil(t, err2)
assert.NotNil(t, sut.Client)
assert.IsType(t, &apns2.Client{}, sut.Client)
})
t.Run("Push() with invalid provider token", func(t *testing.T) {
sut, err := apnianConfigurer.getApnian()
aps := GenerateAPS(alert, sound, linkUrl)
res, err2 := sut.Push("123456", aps)
assert.Nil(t, err)
assert.Nil(t, err2)
assert.Equal(t, res.StatusCode, 403)
assert.Equal(t, res.Reason, "InvalidProviderToken")
})
} | explode_data.jsonl/21000 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1565
} | [
2830,
3393,
10611,
77,
1103,
2648,
1155,
353,
8840,
836,
8,
341,
69898,
77,
1103,
62824,
1669,
5232,
77,
1103,
62824,
4913,
391,
77,
1103,
7724,
497,
330,
7198,
12697,
63159,
3244,
16708,
445,
3564,
497,
2915,
1155,
353,
8840,
836,
8,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetTickFrequencyCount(t *testing.T) {
freq := GetTickFrequency()
if freq == 0 {
t.Error("GetTickFrequency expected non zero.")
}
count := GetTickCount()
if count == 0 {
t.Error("GetTickCount expected non zero.")
}
} | explode_data.jsonl/81762 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 90
} | [
2830,
3393,
1949,
22213,
38614,
2507,
1155,
353,
8840,
836,
8,
341,
1166,
2958,
1669,
2126,
22213,
38614,
741,
743,
20895,
621,
220,
15,
341,
197,
3244,
6141,
445,
1949,
22213,
38614,
3601,
2477,
7168,
13053,
197,
630,
18032,
1669,
2126... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 3 |
func TestTerragruntRemoteStateCodegenGeneratesBackendBlock(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "remote-state", "base")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase))
// If the state file was written as foo.tfstate, that means it wrote out the local backend config.
assert.True(t, fileIsInFolder(t, "foo.tfstate", generateTestCase))
} | explode_data.jsonl/10167 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 201
} | [
2830,
3393,
51402,
68305,
3850,
24703,
1397,
2078,
4370,
5531,
973,
29699,
4713,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
3174,
13220,
16458,
1669,
26054,
22363,
50320,
42635,
41486,
10020,
11085,
7944,
11,
330,
18147,
20733,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRanger(t *testing.T) {
tests := []struct {
name string
input string
expected []text.Range
}{
{
name: "empty object",
input: `{}`,
expected: nil,
},
{
name: "flat object",
input: `{
"title": "This is a title.",
"description": "This is a description."
}`,
expected: []text.Range{
{16, 32},
{55, 77},
},
},
{
name: "nested object",
input: `{
"nested": {
"title": "This is a title.",
"description": "This is a description."
}
}`,
expected: []text.Range{
{33, 49},
{73, 95},
},
},
{
name: "more nested object",
input: `{
"nested": {
"title": "This is a title.",
"description": "This is a description.",
"nested2": {
"nested3": "Hello."
}
}
}`,
expected: []text.Range{
{33, 49},
{73, 95},
{134, 140},
},
},
{
name: "flat array",
input: `["Hello", "Bob"]`,
expected: []text.Range{
{2, 7},
{11, 14},
},
},
{
name: "nested object with array field",
input: `{
"nested": {
"field": ["A", "BB", "CCC"]
}
}`,
expected: []text.Range{
{34, 35},
{39, 41},
{45, 48},
},
},
{
name: "object with umlauts",
input: `{
"nested": {
"key1": "Hällo.",
"key2": "Müst.",
"key3": "Göödbye."
}
}`,
expected: []text.Range{
{32, 38},
{55, 60},
{77, 85},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ranger := json.Ranger()
rangeChan, errChan := ranger.Ranges(context.Background(), strings.NewReader(test.input))
var ranges []text.Range
for rang := range rangeChan {
ranges = append(ranges, rang)
}
assert.Empty(t, errChan)
assert.Equal(t, test.expected, ranges)
})
}
} | explode_data.jsonl/58815 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 942
} | [
2830,
3393,
49,
4003,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
257,
914,
198,
197,
22427,
262,
914,
198,
197,
42400,
3056,
1318,
24783,
198,
197,
59403,
197,
197,
515,
298,
11609,
25,
257,
330,
3194,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestReturnLeaseForJobInQueueIsNoop(t *testing.T) {
withRepository(func(r *RedisJobRepository) {
job := addTestJob(t, r, "queue1")
returned, e := r.ReturnLease("cluster2", job.Id)
assert.Nil(t, e)
assert.Nil(t, returned)
})
} | explode_data.jsonl/32045 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 105
} | [
2830,
3393,
5598,
2304,
519,
2461,
12245,
641,
7554,
3872,
2753,
453,
1155,
353,
8840,
836,
8,
341,
46948,
4624,
18552,
2601,
353,
48137,
12245,
4624,
8,
341,
197,
68577,
1669,
912,
2271,
12245,
1155,
11,
435,
11,
330,
4584,
16,
5130,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestWindowRepoAndDuration1(t *testing.T) {
_, test, cleanup := setup(t, 30*time.Second, 20, 10)
defer cleanup()
// Commits are 5 seconds apart, so the last 6 commits are within 30
// seconds. In this case the repo will win out and the last 10 commits
// (index 10-19) will be in range.
test(0, false)
test(9, false)
test(10, true)
test(11, true)
test(19, true)
} | explode_data.jsonl/74000 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 136
} | [
2830,
3393,
4267,
25243,
3036,
12945,
16,
1155,
353,
8840,
836,
8,
341,
197,
6878,
1273,
11,
21290,
1669,
6505,
1155,
11,
220,
18,
15,
77053,
32435,
11,
220,
17,
15,
11,
220,
16,
15,
340,
16867,
21290,
2822,
197,
322,
6804,
1199,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCommands(t *testing.T) {
for _, cmdName := range availableCmdNames() {
ui := &testterm.FakeUI{}
config := &configuration.Configuration{}
configRepo := testconfig.FakeConfigRepository{}
manifestRepo := &testmanifest.FakeManifestRepository{}
repoLocator := api.NewRepositoryLocator(config, configRepo, map[string]net.Gateway{
"auth": net.NewUAAGateway(),
"cloud-controller": net.NewCloudControllerGateway(),
"uaa": net.NewUAAGateway(),
})
cmdFactory := commands.NewFactory(ui, config, configRepo, manifestRepo, repoLocator)
cmdRunner := &FakeRunner{cmdFactory: cmdFactory, t: t}
app, _ := NewApp(cmdRunner)
app.Run([]string{"", cmdName})
assert.Equal(t, cmdRunner.cmdName, cmdName)
}
} | explode_data.jsonl/27044 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 293
} | [
2830,
3393,
30479,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
5439,
675,
1669,
2088,
2500,
15613,
7980,
368,
341,
197,
37278,
1669,
609,
1944,
4991,
991,
726,
2275,
16094,
197,
25873,
1669,
609,
21138,
17334,
16094,
197,
25873,
25243,
16... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestJobsController_Create_HappyPath_FluxMonitorSpec(t *testing.T) {
rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t)
defer assertMocksCalled()
app, cleanup := cltest.NewApplicationWithKey(t,
eth.NewClientWith(rpcClient, gethClient),
)
defer cleanup()
require.NoError(t, app.Start())
gethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(cltest.EmptyMockSubscription(), nil)
client := app.NewHTTPClient()
tomlBytes := cltest.MustReadFile(t, "testdata/flux-monitor-spec.toml")
body, _ := json.Marshal(models.CreateJobSpecRequest{
TOML: string(tomlBytes),
})
response, cleanup := client.Post("/v2/jobs", bytes.NewReader(body))
defer cleanup()
require.Equal(t, http.StatusOK, response.StatusCode)
jb := job.SpecDB{}
require.NoError(t, app.Store.DB.Preload("FluxMonitorSpec").First(&jb).Error)
jobSpec := job.SpecDB{}
err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &jobSpec)
assert.NoError(t, err)
t.Log()
assert.Equal(t, "example flux monitor spec", jb.Name.ValueOrZero())
assert.NotNil(t, jobSpec.PipelineSpec.DotDagSource)
assert.Equal(t, models.EIP55Address("0x3cCad4715152693fE3BC4460591e3D3Fbd071b42"), jb.FluxMonitorSpec.ContractAddress)
assert.Equal(t, time.Second, jb.FluxMonitorSpec.IdleTimerPeriod)
assert.Equal(t, false, jb.FluxMonitorSpec.IdleTimerDisabled)
assert.Equal(t, int32(2), jb.FluxMonitorSpec.Precision)
assert.Equal(t, float32(0.5), jb.FluxMonitorSpec.Threshold)
} | explode_data.jsonl/1624 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 592
} | [
2830,
3393,
40667,
2051,
34325,
2039,
11144,
1820,
1400,
62859,
30098,
8327,
1155,
353,
8840,
836,
8,
341,
7000,
3992,
2959,
11,
633,
71,
2959,
11,
8358,
2060,
72577,
20960,
1669,
1185,
1944,
7121,
65390,
11571,
16056,
39076,
90206,
1155,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGcpPluginSpec_IsValid(t *testing.T) {
type testCase struct {
input *GcpPluginSpec
expected bool
}
cases := []testCase{
{
// Neither IAP or BasicAuth is set
input: &GcpPluginSpec{
Auth: &Auth{},
},
expected: false,
},
{
// Both IAP and BasicAuth set
input: &GcpPluginSpec{
Auth: &Auth{
BasicAuth: &BasicAuth{
Username: "jlewi",
Password: &kfdeftypes.SecretRef{
Name: "somesecret",
},
},
IAP: &IAP{
OAuthClientId: "jlewi",
OAuthClientSecret: &kfdeftypes.SecretRef{
Name: "somesecret",
},
},
},
},
expected: false,
},
// Validate basic auth.
{
input: &GcpPluginSpec{
Auth: &Auth{
BasicAuth: &BasicAuth{
Username: "jlewi",
Password: &kfdeftypes.SecretRef{
Name: "somesecret",
},
},
},
},
expected: true,
},
{
input: &GcpPluginSpec{
Auth: &Auth{
BasicAuth: &BasicAuth{
Username: "jlewi",
},
},
},
expected: false,
},
{
input: &GcpPluginSpec{
Auth: &Auth{
BasicAuth: &BasicAuth{
Password: &kfdeftypes.SecretRef{
Name: "somesecret",
},
},
},
},
expected: false,
},
// End Validate basic auth.
// End Validate IAP.
{
input: &GcpPluginSpec{
Auth: &Auth{
IAP: &IAP{
OAuthClientId: "jlewi",
OAuthClientSecret: &kfdeftypes.SecretRef{
Name: "somesecret",
},
},
},
},
expected: true,
},
{
input: &GcpPluginSpec{
Auth: &Auth{
IAP: &IAP{
OAuthClientId: "jlewi",
},
},
},
expected: false,
},
{
input: &GcpPluginSpec{
Auth: &Auth{
IAP: &IAP{
OAuthClientSecret: &kfdeftypes.SecretRef{
Name: "somesecret",
},
},
},
},
expected: false,
},
{
input: &GcpPluginSpec{
Hostname: "this-kfApp-name-is-very-long.endpoints.my-gcp-project-for-kubeflow.cloud.goog",
},
expected: false,
},
}
for _, c := range cases {
isValid, _ := c.input.IsValid()
// Test they are equal
if isValid != c.expected {
pSpec := kfutils.PrettyPrint(c.input)
t.Errorf("Spec %v;\n IsValid Got:%v %v", pSpec, isValid, c.expected)
}
}
} | explode_data.jsonl/24314 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1202
} | [
2830,
3393,
38,
4672,
11546,
8327,
31879,
4088,
1155,
353,
8840,
836,
8,
1476,
13158,
54452,
2036,
341,
197,
22427,
262,
353,
38,
4672,
11546,
8327,
198,
197,
42400,
1807,
198,
197,
630,
1444,
2264,
1669,
3056,
66194,
515,
197,
197,
5... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestCounter_Increment(t *testing.T) {
c := getCounter("inc")
if c.Count() != 0 {
t.Error("Count should start at 0, got ", c.Count())
}
c.Increment()
if c.Count() != 1 {
t.Error("Count should be 1, got ", c.Count())
}
c.Increment()
if c.Count() != 2 {
t.Error("Count should be 2, got ", c.Count())
}
} | explode_data.jsonl/35710 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 133
} | [
2830,
3393,
14099,
25972,
13477,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
633,
14099,
445,
2840,
1138,
743,
272,
6134,
368,
961,
220,
15,
341,
197,
3244,
6141,
445,
2507,
1265,
1191,
518,
220,
15,
11,
2684,
3670,
272,
6134,
2398,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestTypeSystem_ObjectInterfacesMustBeArray_AcceptsAnObjectTypeWithArrayInterfaces(t *testing.T) {
anotherInterfaceType := graphql.NewInterface(graphql.InterfaceConfig{
Name: "AnotherInterface",
ResolveType: func(p graphql.ResolveTypeParams) *graphql.Object {
return nil
},
Fields: graphql.Fields{
"f": &graphql.Field{
Type: graphql.String,
},
},
})
_, err := schemaWithFieldType(graphql.NewObject(graphql.ObjectConfig{
Name: "SomeObject",
Interfaces: (graphql.InterfacesThunk)(func() []*graphql.Interface {
return []*graphql.Interface{anotherInterfaceType}
}),
Fields: graphql.Fields{
"f": &graphql.Field{
Type: graphql.String,
},
},
}))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
} | explode_data.jsonl/79150 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 296
} | [
2830,
3393,
929,
2320,
27839,
41066,
31776,
3430,
1857,
1566,
66,
57771,
2082,
49530,
2354,
1857,
41066,
1155,
353,
8840,
836,
8,
341,
197,
41963,
5051,
929,
1669,
48865,
7121,
5051,
24312,
1470,
41065,
2648,
515,
197,
21297,
25,
330,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTransportClosesBodyOnError(t *testing.T) {
setParallel(t)
defer afterTest(t)
readBody := make(chan error, 1)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
_, err := ioutil.ReadAll(r.Body)
readBody <- err
}))
defer ts.Close()
c := ts.Client()
fakeErr := errors.New("fake error")
didClose := make(chan bool, 1)
req, _ := NewRequest("POST", ts.URL, struct {
io.Reader
io.Closer
}{
io.MultiReader(io.LimitReader(neverEnding('x'), 1<<20), errorReader{fakeErr}),
closerFunc(func() error {
select {
case didClose <- true:
default:
}
return nil
}),
})
res, err := c.Do(req)
if res != nil {
defer res.Body.Close()
}
if err == nil || !strings.Contains(err.Error(), fakeErr.Error()) {
t.Fatalf("Do error = %v; want something containing %q", err, fakeErr.Error())
}
select {
case err := <-readBody:
if err == nil {
t.Errorf("Unexpected success reading request body from handler; want 'unexpected EOF reading trailer'")
}
case <-time.After(5 * time.Second):
t.Error("timeout waiting for server handler to complete")
}
select {
case <-didClose:
default:
t.Errorf("didn't see Body.Close")
}
} | explode_data.jsonl/14130 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 467
} | [
2830,
3393,
27560,
34,
49341,
5444,
74945,
1155,
353,
8840,
836,
8,
341,
8196,
16547,
1155,
340,
16867,
1283,
2271,
1155,
340,
37043,
5444,
1669,
1281,
35190,
1465,
11,
220,
16,
340,
57441,
1669,
54320,
70334,
7121,
5475,
7,
3050,
9626,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_SalesReports_Filter_IsInValidReportType(t *testing.T) {
filter := &SalesReportsFilter{}
date, _ := time.Parse("2006-01-02", "2020-05-05")
filter.Daily().SubTypeSummary().Version10().SetReportDate(date)
err := filter.IsValid()
assert.Error(t, err)
assert.Equal(t, "SalesReportsFilter@IsValid: ReportType is required", err.Error())
} | explode_data.jsonl/60790 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 121
} | [
2830,
3393,
1098,
3831,
23748,
68935,
31879,
641,
4088,
10361,
929,
1155,
353,
8840,
836,
8,
341,
50108,
1669,
609,
35418,
23748,
5632,
16094,
44086,
11,
716,
1669,
882,
8937,
445,
17,
15,
15,
21,
12,
15,
16,
12,
15,
17,
497,
330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMinZero(t *testing.T) {
{
a := Bitmap{0xffffffffffffffff, 0xffffffffffffffff, 0xf0ffffffffffff0f}
v, ok := a.MinZero()
assert.True(t, ok)
assert.Equal(t, 64+64+4, int(v))
assert.False(t, a.Contains(v))
}
{
a := Bitmap{0xffffffffffffffff, 0xffffffffffffffff}
v, ok := a.MinZero()
assert.False(t, ok)
assert.Equal(t, 0, int(v))
}
} | explode_data.jsonl/39893 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 177
} | [
2830,
3393,
6217,
17999,
1155,
353,
8840,
836,
8,
341,
197,
515,
197,
11323,
1669,
17533,
90,
15,
41798,
53697,
11,
220,
15,
41798,
53697,
11,
220,
15,
5848,
15,
53697,
7238,
15,
69,
532,
197,
5195,
11,
5394,
1669,
264,
17070,
17999... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDeviceTokenReturnsErrorIfCannotDeserializeDeviceToken(t *testing.T) {
gibberishJSON := strings.Replace(MockDeviceTokenResponse, "expires_in", ";:\"gibberish", -1)
sender := mocks.NewSender()
body := mocks.NewBody(gibberishJSON)
sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK"))
_, err := WaitForUserCompletion(sender, deviceCode())
if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) {
t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error())
}
if body.IsOpen() {
t.Fatalf("response body was left open!")
}
} | explode_data.jsonl/27504 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 227
} | [
2830,
3393,
6985,
3323,
16446,
1454,
2679,
17444,
64465,
6985,
3323,
1155,
353,
8840,
836,
8,
341,
3174,
579,
652,
812,
5370,
1669,
9069,
20858,
66436,
6985,
3323,
2582,
11,
330,
48203,
1243,
497,
18963,
75035,
70,
579,
652,
812,
497,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestCreateAffinityGroup(t *testing.T) {
apiName := "createAffinityGroup"
server := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
responses, err := ParseAsyncResponse(apiName, "AffinityGroupService", *request)
if err != nil {
t.Errorf("Failed to parse response, due to: %v", err)
}
fmt.Fprintln(writer, responses)
}))
defer server.Close()
client := NewAsyncClient(server.URL, "APIKEY", "SECRETKEY", false)
p := client.AffinityGroup.NewCreateAffinityGroupParams("testAffinityGroup", "host affinity")
ag, err := client.AffinityGroup.CreateAffinityGroup(p)
if err != nil {
t.Errorf("Failed to disassociate IP addres due to: %v", err.Error())
return
}
if ag.Name != "testAffinityGroup" {
t.Errorf("Failed to create affinity group of name: testAffinityGroup")
}
} | explode_data.jsonl/74174 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 293
} | [
2830,
3393,
4021,
25841,
13489,
2808,
1155,
353,
8840,
836,
8,
341,
54299,
675,
1669,
330,
3182,
25841,
13489,
2808,
698,
41057,
1669,
54320,
70334,
7121,
5475,
19886,
89164,
18552,
38356,
1758,
37508,
11,
1681,
353,
1254,
9659,
8,
341,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestInterpreterZip(t *testing.T) {
s, err := parseFile("src/parse/asp/test_data/interpreter/zip.build")
require.NoError(t, err)
expected := pyList{
pyList{pyInt(1), pyInt(4), pyInt(7)},
pyList{pyInt(2), pyInt(5), pyInt(8)},
pyList{pyInt(3), pyInt(6), pyInt(9)},
}
assert.EqualValues(t, expected, s.Lookup("x"))
} | explode_data.jsonl/81070 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 149
} | [
2830,
3393,
58426,
31047,
1155,
353,
8840,
836,
8,
341,
1903,
11,
1848,
1669,
4715,
1703,
445,
3548,
14,
6400,
14,
13367,
12697,
1769,
14,
90554,
14,
9964,
13239,
1138,
17957,
35699,
1155,
11,
1848,
340,
42400,
1669,
4510,
852,
515,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCreateProductReview(t *testing.T) {
t.Parallel()
migration := sqltest.New(t, sqltest.Options{
Force: *force,
Path: "../../migrations",
})
pool := migration.Setup(context.Background(), "")
db := &DB{
Postgres: pool,
}
createProducts(t, db, []inventory.CreateProductParams{
{
ID: "product",
Name: "Original name",
Description: "This is the original description",
Price: 250,
},
})
type args struct {
ctx context.Context
params inventory.CreateProductReviewDBParams
}
tests := []struct {
name string
args args
want *inventory.ProductReview
wantErr string
}{
{
name: "success",
args: args{
ctx: context.Background(),
params: inventory.CreateProductReviewDBParams{
ID: "review1",
CreateProductReviewParams: inventory.CreateProductReviewParams{
ProductID: "product",
ReviewerID: "reviewer",
Score: 5,
Title: "title",
Description: "review",
},
},
},
want: &inventory.ProductReview{
ID: "review1",
ProductID: "product",
ReviewerID: "reviewer",
Score: 5,
Title: "title",
Description: "review",
CreatedAt: time.Now(),
ModifiedAt: time.Now(),
},
},
{
name: "invalid_id",
args: args{
ctx: context.Background(),
params: inventory.CreateProductReviewDBParams{
ID: "",
CreateProductReviewParams: inventory.CreateProductReviewParams{
ProductID: "product",
ReviewerID: "reviewer",
Score: 5,
Title: "title",
Description: "review",
},
},
},
wantErr: "invalid product review ID",
},
{
name: "invalid_title",
args: args{
ctx: context.Background(),
params: inventory.CreateProductReviewDBParams{
ID: "xyz",
CreateProductReviewParams: inventory.CreateProductReviewParams{
ProductID: "product",
ReviewerID: "reviewer",
Score: 5,
Title: "",
Description: "review",
},
},
},
wantErr: "invalid title",
},
{
name: "invalid_score",
args: args{
ctx: context.Background(),
params: inventory.CreateProductReviewDBParams{
ID: "xyz",
CreateProductReviewParams: inventory.CreateProductReviewParams{
ProductID: "product",
ReviewerID: "reviewer",
Score: 15,
Title: "abc",
Description: "review",
},
},
},
wantErr: "invalid score",
},
{
name: "review1_already_exists",
args: args{
ctx: context.Background(),
params: inventory.CreateProductReviewDBParams{
ID: "review1",
CreateProductReviewParams: inventory.CreateProductReviewParams{
ProductID: "product",
ReviewerID: "reviewer",
Score: 5,
Title: "title",
Description: "review",
},
},
},
wantErr: "product review already exists",
},
{
name: "product_id_not_found",
args: args{
ctx: context.Background(),
params: inventory.CreateProductReviewDBParams{
ID: "review_has_no_product_on_database",
CreateProductReviewParams: inventory.CreateProductReviewParams{
ProductID: "product_not_found",
ReviewerID: "reviewer123",
Score: 3,
Title: "review title",
Description: "review description",
},
},
},
wantErr: "cannot find product to create review",
},
{
name: "canceled_ctx",
args: args{
ctx: canceledContext(),
},
wantErr: "context canceled",
},
{
name: "deadline_exceeded_ctx",
args: args{
ctx: deadlineExceededContext(),
},
wantErr: "context deadline exceeded",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := db.CreateProductReview(tt.args.ctx, tt.args.params)
if err == nil && tt.wantErr != "" || err != nil && tt.wantErr != err.Error() {
t.Errorf("DB.CreateProductReview() error = %v, wantErr %v", err, tt.wantErr)
}
if err != nil {
return
}
// Reusing GetProduct to check if the product was created successfully.
got, err := db.GetProductReview(tt.args.ctx, tt.args.params.ID)
if err != nil {
t.Errorf("DB.GetProduct() error = %v", err)
}
// Ignore or CreatedAt and ModifiedAt before comparing structs.
if !cmp.Equal(tt.want, got, cmpopts.IgnoreFields(inventory.ProductReview{}, "CreatedAt", "ModifiedAt")) {
t.Errorf("value returned by DB.GetProduct() doesn't match: %v", cmp.Diff(tt.want, got))
}
})
}
} | explode_data.jsonl/25454 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2040
} | [
2830,
3393,
4021,
4816,
19432,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
2109,
5033,
1669,
5704,
1944,
7121,
1155,
11,
5704,
1944,
22179,
515,
197,
197,
18573,
25,
353,
8833,
345,
197,
69640,
25,
220,
10208,
76,
17824,
756... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestInvariants(t *testing.T) {
app := simapp.Setup(t, false)
app.Commit()
app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: app.LastBlockHeight() + 1}})
require.Equal(t, app.CrisisKeeper.InvCheckPeriod(), uint(5))
// SimApp has 11 registered invariants
orgInvRoutes := app.CrisisKeeper.Routes()
app.CrisisKeeper.RegisterRoute("testModule", "testRoute", func(sdk.Context) (string, bool) { return "", false })
require.Equal(t, len(app.CrisisKeeper.Routes()), len(orgInvRoutes)+1)
} | explode_data.jsonl/2236 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 194
} | [
2830,
3393,
641,
54611,
1155,
353,
8840,
836,
8,
341,
28236,
1669,
1643,
676,
39820,
1155,
11,
895,
340,
28236,
53036,
741,
28236,
28467,
4713,
56085,
5855,
9659,
11135,
4713,
90,
4047,
25,
17333,
15110,
15753,
90,
3640,
25,
906,
24682,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPrometheus(t *testing.T) {
prometheusRegistry := RegisterPrometheus(&types.Prometheus{})
if !prometheusRegistry.IsEnabled() {
t.Errorf("PrometheusRegistry should return true for IsEnabled()")
}
prometheusRegistry.ReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1)
prometheusRegistry.ReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1)
prometheusRegistry.ReqDurationHistogram().With("service", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000)
prometheusRegistry.ReqDurationHistogram().With("service", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000)
prometheusRegistry.RetriesCounter().With("service", "test").Add(1)
metricsFamilies, err := prometheus.DefaultGatherer.Gather()
if err != nil {
t.Fatalf("could not gather metrics families: %s", err)
}
tests := []struct {
name string
labels map[string]string
assert func(*dto.MetricFamily)
}{
{
name: reqsTotalName,
labels: map[string]string{
"code": "200",
"method": http.MethodGet,
"service": "test",
},
assert: func(family *dto.MetricFamily) {
cv := family.Metric[0].Counter.GetValue()
expectedCv := float64(2)
if cv != expectedCv {
t.Errorf("gathered metrics do not contain correct value for total requests, got %f expected %f", cv, expectedCv)
}
},
},
{
name: reqDurationName,
labels: map[string]string{
"service": "test",
"code": "200",
},
assert: func(family *dto.MetricFamily) {
sc := family.Metric[0].Histogram.GetSampleCount()
expectedSc := uint64(2)
if sc != expectedSc {
t.Errorf("gathered metrics do not contain correct sample count for request duration, got %d expected %d", sc, expectedSc)
}
},
},
{
name: retriesTotalName,
labels: map[string]string{
"service": "test",
},
assert: func(family *dto.MetricFamily) {
cv := family.Metric[0].Counter.GetValue()
expectedCv := float64(1)
if cv != expectedCv {
t.Errorf("gathered metrics do not contain correct value for total retries, got %f expected %f", cv, expectedCv)
}
},
},
}
for _, test := range tests {
family := findMetricFamily(test.name, metricsFamilies)
if family == nil {
t.Errorf("gathered metrics do not contain %q", test.name)
continue
}
for _, label := range family.Metric[0].Label {
val, ok := test.labels[*label.Name]
if !ok {
t.Errorf("%q metric contains unexpected label %q", test.name, *label.Name)
} else if val != *label.Value {
t.Errorf("label %q in metric %q has wrong value %q, expected %q", *label.Name, test.name, *label.Value, val)
}
}
test.assert(family)
}
} | explode_data.jsonl/68636 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1086
} | [
2830,
3393,
35186,
39705,
1155,
353,
8840,
836,
8,
341,
3223,
441,
39705,
15603,
1669,
8451,
35186,
39705,
2099,
9242,
1069,
441,
39705,
6257,
692,
743,
753,
24468,
39705,
15603,
53073,
368,
341,
197,
3244,
13080,
445,
35186,
39705,
15603... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestEphemeralStorageResource(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes))
nodeName := "node"
podE := makePodWithEphemeralStorage(nodeName, "500")
tests := []struct {
pod *v1.Pod
wNodeInfo *NodeInfo
}{
{
pod: podE,
wNodeInfo: &NodeInfo{
requestedResource: &Resource{
EphemeralStorage: 500,
},
nonzeroRequest: &Resource{
MilliCPU: priorityutil.DefaultMilliCPURequest,
Memory: priorityutil.DefaultMemoryRequest,
},
TransientInfo: newTransientSchedulerInfo(),
allocatableResource: &Resource{},
pods: []*v1.Pod{podE},
usedPorts: schedutil.HostPortInfo{},
imageStates: make(map[string]*ImageStateSummary),
},
},
}
for i, tt := range tests {
cache := newSchedulerCache(time.Second, time.Second, nil)
if err := cache.AddPod(tt.pod); err != nil {
t.Fatalf("AddPod failed: %v", err)
}
n := cache.nodes[nodeName]
deepEqualWithoutGeneration(t, i, n, tt.wNodeInfo)
if err := cache.RemovePod(tt.pod); err != nil {
t.Fatalf("RemovePod failed: %v", err)
}
n = cache.nodes[nodeName]
if n != nil {
t.Errorf("#%d: expecting pod deleted and nil node info, get=%s", i, n)
}
}
} | explode_data.jsonl/19651 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 577
} | [
2830,
3393,
36,
59941,
3253,
5793,
4783,
1155,
353,
8840,
836,
8,
341,
197,
322,
18567,
26282,
1925,
1955,
2461,
37889,
8974,
311,
653,
23831,
5101,
23757,
198,
79138,
12753,
13275,
13859,
42318,
4202,
28197,
17305,
4430,
82,
11265,
497,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestExecutorDriverStatusUpdateAcknowledgement(t *testing.T) {
setTestEnv(t)
ch := make(chan bool, 2)
// Mock Slave process to respond to registration event.
server := testutil.NewMockSlaveHttpServer(t, func(rsp http.ResponseWriter, req *http.Request) {
reqPath, err := url.QueryUnescape(req.URL.String())
assert.NoError(t, err)
log.Infoln("RCVD request", reqPath)
rsp.WriteHeader(http.StatusAccepted)
})
defer server.Close()
exec := newTestExecutor(t)
exec.ch = ch
exec.t = t
// start
driver := newIntegrationTestDriver(t, exec)
stat, err := driver.Start()
assert.NoError(t, err)
assert.Equal(t, mesos.Status_DRIVER_RUNNING, stat)
driver.setConnected(true)
defer driver.Stop()
// send ACK from server
pbMsg := &mesos.StatusUpdateAcknowledgementMessage{
SlaveId: util.NewSlaveID(slaveID),
FrameworkId: util.NewFrameworkID(frameworkID),
TaskId: util.NewTaskID("test-task-001"),
Uuid: []byte(uuid.NewRandom().String()),
}
c := testutil.NewMockMesosClient(t, server.PID)
c.SendMessage(driver.self, pbMsg)
<-time.After(time.Second * 1)
} | explode_data.jsonl/76151 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 427
} | [
2830,
3393,
25255,
11349,
2522,
4289,
90236,
29564,
1155,
353,
8840,
836,
8,
341,
8196,
2271,
14359,
1155,
340,
23049,
1669,
1281,
35190,
1807,
11,
220,
17,
340,
197,
322,
14563,
59368,
1882,
311,
5889,
311,
12227,
1538,
624,
41057,
166... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestHandshakeClientECDHERSAChaCha20(t *testing.T) {
config := testConfig.Clone()
config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305}
test := &clientTest{
name: "ECDHE-RSA-CHACHA20-POLY1305",
args: []string{"-cipher", "ECDHE-RSA-CHACHA20-POLY1305"},
config: config,
}
runClientTestTLS12(t, test)
} | explode_data.jsonl/27703 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 154
} | [
2830,
3393,
2314,
29661,
2959,
36,
6484,
3012,
7778,
95971,
95971,
17,
15,
1155,
353,
8840,
836,
8,
341,
25873,
1669,
1273,
2648,
64463,
741,
25873,
727,
10558,
62898,
288,
284,
3056,
2496,
16,
21,
90,
45439,
2089,
6484,
1799,
76994,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestApp_AfterFunc(t *testing.T) {
counts := &opCounts{}
afterError := fmt.Errorf("fail")
var err error
app := &App{
After: func(c *Context) error {
counts.Total++
counts.After = counts.Total
s := c.String("opt")
if s == "fail" {
return afterError
}
return nil
},
Commands: []*Command{
{
Name: "sub",
Action: func(c *Context) error {
counts.Total++
counts.SubCommand = counts.Total
return nil
},
},
},
Flags: []Flag{
&StringFlag{Name: "opt"},
},
}
// run with the After() func succeeding
err = app.Run([]string{"command", "--opt", "succeed", "sub"})
if err != nil {
t.Fatalf("Run error: %s", err)
}
if counts.After != 2 {
t.Errorf("After() not executed when expected")
}
if counts.SubCommand != 1 {
t.Errorf("Subcommand not executed when expected")
}
// reset
counts = &opCounts{}
// run with the Before() func failing
err = app.Run([]string{"command", "--opt", "fail", "sub"})
// should be the same error produced by the Before func
if err != afterError {
t.Errorf("Run error expected, but not received")
}
if counts.After != 2 {
t.Errorf("After() not executed when expected")
}
if counts.SubCommand != 1 {
t.Errorf("Subcommand not executed when expected")
}
} | explode_data.jsonl/52577 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 510
} | [
2830,
3393,
2164,
1566,
1046,
9626,
1155,
353,
8840,
836,
8,
341,
18032,
82,
1669,
609,
453,
63731,
16094,
197,
10694,
1454,
1669,
8879,
13080,
445,
18403,
1138,
2405,
1848,
1465,
271,
28236,
1669,
609,
2164,
515,
197,
197,
6025,
25,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestDeletePodWithTombstone(t *testing.T) {
labels := map[string]string{
"app": "test-pod",
}
podObj := createPod("test-pod", "test-namespace", "0", "1.2.3.4", labels, NonHostNetwork, corev1.PodRunning)
calls := []testutils.TestCmd{}
fexec := testutils.GetFakeExecWithScripts(calls)
defer testutils.VerifyCalls(t, fexec, calls)
f := newFixture(t, fexec)
stopCh := make(chan struct{})
defer close(stopCh)
f.newPodController(stopCh)
podKey := getKey(podObj, t)
tombstone := cache.DeletedFinalStateUnknown{
Key: podKey,
Obj: podObj,
}
f.podController.deletePod(tombstone)
testCases := []expectedValues{
{0, 0, 1},
}
checkPodTestResult("TestDeletePodWithTombstone", f, testCases)
} | explode_data.jsonl/35409 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 286
} | [
2830,
3393,
6435,
23527,
2354,
51,
2855,
10812,
1155,
353,
8840,
836,
8,
341,
95143,
1669,
2415,
14032,
30953,
515,
197,
197,
1,
676,
788,
330,
1944,
2268,
347,
756,
197,
532,
3223,
347,
5261,
1669,
1855,
23527,
445,
1944,
2268,
347,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSemiCircleDistribution(t *testing.T) {
rand.Seed(time.Now().UnixNano())
count := 100000000
generators := []struct {
name string
generator PointGenerator
}{
{
name: "GeneratePointMax",
generator: GeneratePointMax,
},
{
name: "GeneratePointRejection",
generator: GeneratePointRejection,
},
{
name: "GeneratePointSqrt",
generator: GeneratePointSqrt,
},
{
name: "GeneratePointTriangle",
generator: GeneratePointTriangle,
},
}
for _, g := range generators {
t.Run(g.name, func(t *testing.T) {
points := make([]Point, count)
for i := 0; i < count; i++ {
points[i] = g.generator()
}
countLeft := 0
countRight := 0
countUp := 0
countDown := 0
for _, point := range points {
if point.X < 0 {
countLeft++
}
if point.X > 0 {
countRight++
}
if point.Y < 0 {
countDown++
}
if point.Y > 0 {
countUp++
}
}
assert.GreaterOrEqual(t, float64(countLeft)/float64(countRight), .99)
assert.LessOrEqual(t, float64(countLeft)/float64(countRight), 1.01)
assert.GreaterOrEqual(t, float64(countUp)/float64(countDown), .99)
assert.LessOrEqual(t, float64(countUp)/float64(countDown), 1.01)
})
}
} | explode_data.jsonl/74740 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 586
} | [
2830,
3393,
50,
21780,
25199,
62377,
1155,
353,
8840,
836,
8,
341,
7000,
437,
5732,
291,
9730,
13244,
1005,
55832,
83819,
2398,
18032,
1669,
220,
16,
15,
15,
15,
15,
15,
15,
15,
15,
271,
3174,
798,
2973,
1669,
3056,
1235,
341,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestAccessToken(t *testing.T) {
inputDetails := config.ArtifactoryDetails{
Url: "http://localhost:8080/artifactory",
DistributionUrl: "http://localhost:8080/distribution",
User: "", Password: "",
ApiKey: "", SshKeyPath: "", AccessToken: "accessToken",
ServerId: "test",
IsDefault: false}
configAndTest(t, &inputDetails)
} | explode_data.jsonl/23160 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 148
} | [
2830,
3393,
37649,
1155,
353,
8840,
836,
8,
341,
22427,
7799,
1669,
2193,
50064,
333,
2919,
7799,
515,
197,
197,
2864,
25,
1797,
330,
1254,
1110,
8301,
25,
23,
15,
23,
15,
55549,
333,
2919,
756,
197,
10957,
13107,
2864,
25,
330,
125... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestListIngressFilterClass(t *testing.T) {
for name, test := range map[string]struct {
ingressClassFilters []string
expectedIngressNames []string
}{
"emptyIngressClassFilters": {
ingressClassFilters: nil,
expectedIngressNames: []string{
"fixture01",
"fixture02",
"fixture03",
"fixture-rg01",
"fixture-rg02",
"fixture-rg03",
},
},
"emptyIngressClassFilters2": {
ingressClassFilters: []string{},
expectedIngressNames: []string{
"fixture01",
"fixture02",
"fixture03",
"fixture-rg01",
"fixture-rg02",
"fixture-rg03",
},
},
"singleIngressClass1": {
ingressClassFilters: []string{"skipper"},
expectedIngressNames: []string{
"fixture02",
"fixture-rg02",
},
},
"singleIngressClass2": {
ingressClassFilters: []string{"other"},
expectedIngressNames: []string{
"fixture03",
"fixture-rg03",
},
},
"multipleIngressClass": {
ingressClassFilters: []string{"skipper", "other"},
expectedIngressNames: []string{
"fixture02",
"fixture03",
"fixture-rg02",
"fixture-rg03",
},
},
"multipleIngressClassWithDefault": {
ingressClassFilters: []string{"skipper", ""},
expectedIngressNames: []string{
"fixture01",
"fixture02",
"fixture-rg01",
"fixture-rg02",
},
},
"multipleIngressClassWithDefault2": {
ingressClassFilters: []string{"other", ""},
expectedIngressNames: []string{
"fixture01",
"fixture03",
"fixture-rg01",
"fixture-rg03",
},
},
} {
t.Run(name, func(t *testing.T) {
a, _ := NewAdapter(testConfig, IngressAPIVersionNetworking, test.ingressClassFilters, testIngressDefaultSecurityGroup, testSSLPolicy, aws.LoadBalancerTypeApplication, DefaultClusterLocalDomain, false)
client := &mockClient{}
a.kubeClient = client
ingresses, err := a.ListResources()
if err != nil {
t.Error(err)
}
ingressNames := make([]string, len(ingresses))
for i, ing := range ingresses {
ingressNames[i] = ing.Name
}
assert.ElementsMatch(t, test.expectedIngressNames, ingressNames, "ingress names mismatch")
})
}
} | explode_data.jsonl/6736 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 932
} | [
2830,
3393,
852,
641,
2483,
5632,
1957,
1155,
353,
8840,
836,
8,
341,
2023,
829,
11,
1273,
1669,
2088,
2415,
14032,
60,
1235,
341,
197,
197,
287,
673,
1957,
28351,
220,
3056,
917,
198,
197,
42400,
641,
2483,
7980,
3056,
917,
198,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestQuery(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
prepares0 := numPrepares(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatalf("Query: %v", err)
}
type row struct {
age int
name string
}
got := []row{}
for rows.Next() {
var r row
err = rows.Scan(&r.age, &r.name)
if err != nil {
t.Fatalf("Scan: %v", err)
}
got = append(got, r)
}
err = rows.Err()
if err != nil {
t.Fatalf("Err: %v", err)
}
want := []row{
{age: 1, name: "Alice"},
{age: 2, name: "Bob"},
{age: 3, name: "Chris"},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatch.\n got: %#v\nwant: %#v", got, want)
}
// And verify that the final rows.Next() call, which hit EOF,
// also closed the rows connection.
if n := db.numFreeConns(); n != 1 {
t.Fatalf("free conns after query hitting EOF = %d; want 1", n)
}
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
} | explode_data.jsonl/15950 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 446
} | [
2830,
3393,
2859,
1155,
353,
8840,
836,
8,
341,
20939,
1669,
501,
2271,
3506,
1155,
11,
330,
16069,
1138,
16867,
3265,
3506,
1155,
11,
2927,
340,
40346,
79,
5403,
15,
1669,
1629,
4703,
79,
5403,
1155,
11,
2927,
340,
68438,
11,
1848,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestSlogan(t *testing.T) {
gopClTest(t, `
fields := ["engineering", "STEM education", "data science"]
println "The Go+ Language for", fields.join(", ")
`, `package main
import (
fmt "fmt"
strings "strings"
)
func main() {
fields := []string{"engineering", "STEM education", "data science"}
fmt.Println("The Go+ Language for", strings.Join(fields, ", "))
}
`)
} | explode_data.jsonl/73573 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 131
} | [
2830,
3393,
50,
93925,
1155,
353,
8840,
836,
8,
341,
3174,
453,
5066,
2271,
1155,
11,
22074,
9007,
1669,
4383,
97915,
497,
330,
15283,
6731,
497,
330,
691,
8038,
7026,
33655,
330,
785,
5994,
10,
11434,
369,
497,
5043,
5446,
12918,
141... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestHaltMultiError(t *testing.T) {
haltErr := halt(errors.New("halt error"))
nonHaltErr := errors.New("not a halt error")
errs := terrors.MultiError{nonHaltErr}
testutil.Assert(t, !IsHaltError(errs), "should not be a halt error")
errs.Add(haltErr)
testutil.Assert(t, IsHaltError(errs), "if any halt errors are present this should return true")
} | explode_data.jsonl/7701 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 136
} | [
2830,
3393,
39,
3145,
20358,
1454,
1155,
353,
8840,
836,
8,
341,
9598,
3145,
7747,
1669,
26269,
38881,
7121,
445,
39416,
1465,
5455,
197,
6280,
39,
3145,
7747,
1669,
5975,
7121,
445,
1921,
264,
26269,
1465,
5130,
9859,
82,
1669,
259,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReadEmptyBuffer(t *testing.T) {
l := NewReaderSize(new(bytes.Buffer), minReadBufferSize)
line, isPrefix, err := l.ReadLine()
if err != io.EOF {
t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err)
}
} | explode_data.jsonl/2887 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 95
} | [
2830,
3393,
4418,
3522,
4095,
1155,
353,
8840,
836,
8,
341,
8810,
1669,
1532,
5062,
1695,
1755,
23158,
22622,
701,
1308,
4418,
52661,
340,
27109,
11,
374,
14335,
11,
1848,
1669,
326,
18354,
741,
743,
1848,
961,
6399,
86492,
341,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestNoCache(t *testing.T) {
mux := http.NewServeMux()
var n uint32
mux.Handle("/", httpcache.CacheFunc(func(res http.ResponseWriter, req *http.Request) {
atomic.AddUint32(&n, 1)
res.Write([]byte(expectedBodyStr))
}, cacheDuration))
mux.Handle("/nocache", httpcache.CacheFunc(func(res http.ResponseWriter, req *http.Request) {
httpcache.NoCache(res) // <----
atomic.AddUint32(&n, 1)
res.Write([]byte(expectedBodyStr))
}, cacheDuration))
e := httptest.New(t, httptest.Handler(mux))
if err := runTest(e, &n, expectedBodyStr, "/nocache"); err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/16262 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 241
} | [
2830,
3393,
2753,
8233,
1155,
353,
8840,
836,
8,
341,
2109,
2200,
1669,
1758,
7121,
60421,
44,
2200,
741,
2405,
308,
2622,
18,
17,
271,
2109,
2200,
31421,
35460,
1758,
9360,
46130,
9626,
18552,
4590,
1758,
37508,
11,
4232,
353,
1254,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMigrate(t *testing.T) {
tests := map[string]struct {
migrations []testMigration
// expected
vers int
failed bool
}{
"ordered": {
migrations: []testMigration{
{
version: 1,
sql: "create table if not exists test1 (pk bigint not null primary key)",
err: nil,
},
{
version: 2,
sql: "insert into test1 (pk) values (1)",
err: nil,
},
{
version: 3,
sql: "insert into test1 (pk) values (2)",
err: nil,
},
{
version: 4,
sql: "insert into test1 (pk) values (3)",
err: nil,
},
},
vers: 4,
failed: false,
},
"errorAt5": {
migrations: []testMigration{
{
version: 1,
sql: "create table if not exists test1 (pk bigint not null primary key)",
err: nil,
},
{
version: 2,
sql: "insert into test1 (pk) values (1)",
err: nil,
},
{
version: 3,
sql: "insert into test1 (pk) values (2)",
err: nil,
},
{
version: 4,
sql: "insert into test1 (pk) values (3)",
err: nil,
},
{
version: 5,
sql: "insert into nonexistant (pk) values (2)",
err: nil,
},
},
vers: 4,
failed: true,
},
"unordered": {
migrations: []testMigration{
{
version: 3,
sql: "insert into test1 (pk) values (2)",
err: nil,
},
{
version: 4,
sql: "insert into test1 (pk) values (3)",
err: nil,
},
{
version: 2,
sql: "insert into test1 (pk) values (1)",
err: nil,
},
{
version: 1,
sql: "create table if not exists test1 (pk bigint not null primary key)",
err: nil,
},
},
vers: 4,
failed: false,
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
db, err := sql.Open("sqlite3", testDB)
if err != nil {
t.Fatalf("DB setup failed: %v", err)
}
defer db.Close()
migrator := Migrator{
db: db,
migrations: prepareMigrations(tt.migrations),
}
// No version yet
v, err := migrator.Version()
if err != nil {
t.Fatalf("Migrator.Version() failed: %v", err)
}
if v != NilVersion {
t.Fatalf("Migrator.Version() should be NilVersion, got %d", v)
}
if err = migrator.Migrate(); (err != nil) != tt.failed {
t.Errorf("got %s, unexpected", err)
}
v, err = migrator.Version()
if err != nil {
t.Fatalf("Migrator.Version() failed: %v", err)
}
if v != tt.vers {
t.Errorf("got %d, want %d", v, tt.vers)
}
})
}
} | explode_data.jsonl/55310 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1400
} | [
2830,
3393,
44,
34479,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
2415,
14032,
60,
1235,
341,
197,
2109,
17824,
3056,
1944,
20168,
198,
197,
197,
322,
3601,
198,
197,
197,
3004,
256,
526,
198,
197,
1166,
5687,
1807,
198,
197,
59403,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestCreateTraceExporter_NilConfig(t *testing.T) {
factory := Factory{}
exporter, err := factory.CreateTraceExporter(context.Background(), component.ExporterCreateParams{}, nil)
require.Nil(t, exporter)
assert.EqualError(t, err, "could not cast configuration to jaeger_cassandra")
} | explode_data.jsonl/3445 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 97
} | [
2830,
3393,
4021,
6550,
88025,
1604,
321,
2648,
1155,
353,
8840,
836,
8,
341,
1166,
2919,
1669,
16937,
16094,
59440,
261,
11,
1848,
1669,
8633,
7251,
6550,
88025,
5378,
19047,
1507,
3692,
81077,
261,
4021,
4870,
22655,
2092,
340,
17957,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAutoLoopEnabled(t *testing.T) {
defer test.Guard(t)()
channels := []lndclient.ChannelInfo{
channel1, channel2,
}
// Create a set of parameters with autoloop enabled. The autoloop budget
// is set to allow exactly 2 swaps at the prices that we set in our
// test quotes.
params := Parameters{
AutoOut: true,
AutoFeeBudget: 40066,
AutoFeeStartDate: testTime,
MaxAutoInFlight: 2,
FailureBackOff: time.Hour,
SweepFeeRateLimit: 20000,
SweepConfTarget: 10,
MaximumPrepay: 20000,
MaximumSwapFeePPM: 1000,
MaximumRoutingFeePPM: 1000,
MaximumPrepayRoutingFeePPM: 1000,
MaximumMinerFee: 20000,
ChannelRules: map[lnwire.ShortChannelID]*ThresholdRule{
chanID1: chanRule,
chanID2: chanRule,
},
}
c := newAutoloopTestCtx(t, params, channels)
c.start()
// Calculate our maximum allowed fees and create quotes that fall within
// our budget.
var (
amt = chan1Rec.Amount
maxSwapFee = ppmToSat(amt, params.MaximumSwapFeePPM)
// Create a quote that is within our limits. We do not set miner
// fee because this value is not actually set by the server.
quote1 = &loop.LoopOutQuote{
SwapFee: maxSwapFee,
PrepayAmount: params.MaximumPrepay - 10,
}
quote2 = &loop.LoopOutQuote{
SwapFee: maxSwapFee,
PrepayAmount: params.MaximumPrepay - 20,
}
quoteRequest = &loop.LoopOutQuoteRequest{
Amount: amt,
SweepConfTarget: params.SweepConfTarget,
}
quotes = []quoteRequestResp{
{
request: quoteRequest,
quote: quote1,
},
{
request: quoteRequest,
quote: quote2,
},
}
maxRouteFee = ppmToSat(amt, params.MaximumRoutingFeePPM)
chan1Swap = &loop.OutRequest{
Amount: amt,
MaxSwapRoutingFee: maxRouteFee,
MaxPrepayRoutingFee: ppmToSat(
quote1.PrepayAmount,
params.MaximumPrepayRoutingFeePPM,
),
MaxSwapFee: quote1.SwapFee,
MaxPrepayAmount: quote1.PrepayAmount,
MaxMinerFee: params.MaximumMinerFee,
SweepConfTarget: params.SweepConfTarget,
OutgoingChanSet: loopdb.ChannelSet{chanID1.ToUint64()},
Label: labels.AutoOutLabel(),
}
chan2Swap = &loop.OutRequest{
Amount: amt,
MaxSwapRoutingFee: maxRouteFee,
MaxPrepayRoutingFee: ppmToSat(
quote2.PrepayAmount,
params.MaximumPrepayRoutingFeePPM,
),
MaxSwapFee: quote2.SwapFee,
MaxPrepayAmount: quote2.PrepayAmount,
MaxMinerFee: params.MaximumMinerFee,
SweepConfTarget: params.SweepConfTarget,
OutgoingChanSet: loopdb.ChannelSet{chanID2.ToUint64()},
Label: labels.AutoOutLabel(),
}
loopOuts = []loopOutRequestResp{
{
request: chan1Swap,
response: &loop.LoopOutSwapInfo{
SwapHash: lntypes.Hash{1},
},
},
{
request: chan2Swap,
response: &loop.LoopOutSwapInfo{
SwapHash: lntypes.Hash{2},
},
},
}
)
// Tick our autolooper with no existing swaps, we expect a loop out
// swap to be dispatched for each channel.
c.autoloop(1, amt+1, nil, quotes, loopOuts)
// Tick again with both of our swaps in progress. We haven't shifted our
// channel balances at all, so swaps should still be suggested, but we
// have 2 swaps in flight so we do not expect any suggestion.
existing := []*loopdb.LoopOut{
existingSwapFromRequest(chan1Swap, testTime, nil),
existingSwapFromRequest(chan2Swap, testTime, nil),
}
c.autoloop(1, amt+1, existing, nil, nil)
// Now, we update our channel 2 swap to have failed due to off chain
// failure and our first swap to have succeeded.
now := c.testClock.Now()
failedOffChain := []*loopdb.LoopEvent{
{
SwapStateData: loopdb.SwapStateData{
State: loopdb.StateFailOffchainPayments,
},
Time: now,
},
}
success := []*loopdb.LoopEvent{
{
SwapStateData: loopdb.SwapStateData{
State: loopdb.StateSuccess,
Cost: loopdb.SwapCost{
Server: quote1.SwapFee,
Onchain: params.MaximumMinerFee,
Offchain: maxRouteFee +
chan1Rec.MaxPrepayRoutingFee,
},
},
Time: now,
},
}
quotes = []quoteRequestResp{
{
request: quoteRequest,
quote: quote1,
},
}
loopOuts = []loopOutRequestResp{
{
request: chan1Swap,
response: &loop.LoopOutSwapInfo{
SwapHash: lntypes.Hash{3},
},
},
}
existing = []*loopdb.LoopOut{
existingSwapFromRequest(chan1Swap, testTime, success),
existingSwapFromRequest(chan2Swap, testTime, failedOffChain),
}
// We tick again, this time we expect another swap on channel 1 (which
// still has balances which reflect that we need to swap), but nothing
// for channel 2, since it has had a failure.
c.autoloop(1, amt+1, existing, quotes, loopOuts)
// Now, we progress our time so that we have sufficiently backed off
// for channel 2, and could perform another swap.
c.testClock.SetTime(now.Add(params.FailureBackOff))
// Our existing swaps (1 successful, one pending) have used our budget
// so we no longer expect any swaps to automatically dispatch.
existing = []*loopdb.LoopOut{
existingSwapFromRequest(chan1Swap, testTime, success),
existingSwapFromRequest(chan1Swap, c.testClock.Now(), nil),
existingSwapFromRequest(chan2Swap, testTime, failedOffChain),
}
c.autoloop(1, amt+1, existing, quotes, nil)
c.stop()
} | explode_data.jsonl/73427 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2268
} | [
2830,
3393,
13253,
14620,
5462,
1155,
353,
8840,
836,
8,
341,
16867,
1273,
1224,
11034,
1155,
8,
2822,
23049,
6680,
1669,
3056,
75,
303,
2972,
38716,
1731,
515,
197,
71550,
16,
11,
5496,
17,
345,
197,
630,
197,
322,
4230,
264,
738,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTokens(t *testing.T) {
expected := []struct {
contents string
token T
text string
}{
{"", TEndOfFile, "end of file"},
{"@media", TAtKeyword, "@-keyword"},
{"url(x y", TBadURL, "bad URL token"},
{"-->", TCDC, "\"-->\""},
{"<!--", TCDO, "\"<!--\""},
{"}", TCloseBrace, "\"}\""},
{"]", TCloseBracket, "\"]\""},
{")", TCloseParen, "\")\""},
{":", TColon, "\":\""},
{",", TComma, "\",\""},
{"?", TDelim, "delimiter"},
{"&", TDelimAmpersand, "\"&\""},
{"*", TDelimAsterisk, "\"*\""},
{"|", TDelimBar, "\"|\""},
{"^", TDelimCaret, "\"^\""},
{"$", TDelimDollar, "\"$\""},
{".", TDelimDot, "\".\""},
{"=", TDelimEquals, "\"=\""},
{"!", TDelimExclamation, "\"!\""},
{">", TDelimGreaterThan, "\">\""},
{"+", TDelimPlus, "\"+\""},
{"/", TDelimSlash, "\"/\""},
{"~", TDelimTilde, "\"~\""},
{"1px", TDimension, "dimension"},
{"max(", TFunction, "function token"},
{"#name", THash, "hash token"},
{"name", TIdent, "identifier"},
{"123", TNumber, "number"},
{"{", TOpenBrace, "\"{\""},
{"[", TOpenBracket, "\"[\""},
{"(", TOpenParen, "\"(\""},
{"50%", TPercentage, "percentage"},
{";", TSemicolon, "\";\""},
{"'abc'", TString, "string token"},
{"url(test)", TURL, "URL token"},
{" ", TWhitespace, "whitespace"},
}
for _, it := range expected {
contents := it.contents
token := it.token
t.Run(contents, func(t *testing.T) {
kind, _ := lexToken(contents)
test.AssertEqual(t, kind, token)
})
}
} | explode_data.jsonl/47973 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 695
} | [
2830,
3393,
29300,
1155,
353,
8840,
836,
8,
341,
42400,
1669,
3056,
1235,
341,
197,
197,
17610,
914,
198,
197,
43947,
262,
350,
198,
197,
15425,
257,
914,
198,
197,
59403,
197,
197,
4913,
497,
350,
3727,
74696,
11,
330,
408,
315,
10... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestUpdateServices(t *testing.T) {
boardConfigUpdater := unitstatushandler.NewTestBoardConfigUpdater(
cloudprotocol.BoardConfigInfo{VendorVersion: "1.0", Status: cloudprotocol.InstalledStatus})
firmwareUpdater := unitstatushandler.NewTestFirmwareUpdater(nil)
softwareUpdater := unitstatushandler.NewTestSoftwareUpdater([]cloudprotocol.ServiceInfo{
{ID: "service0", AosVersion: 0, Status: cloudprotocol.InstalledStatus},
{ID: "service1", AosVersion: 0, Status: cloudprotocol.InstalledStatus},
{ID: "service2", AosVersion: 0, Status: cloudprotocol.InstalledStatus},
}, nil)
sender := unitstatushandler.NewTestSender()
statusHandler, err := unitstatushandler.New(
cfg, boardConfigUpdater, firmwareUpdater, softwareUpdater, unitstatushandler.NewTestDownloader(),
unitstatushandler.NewTestStorage(), sender)
if err != nil {
t.Fatalf("Can't create unit status handler: %s", err)
}
defer statusHandler.Close()
go handleUpdateStatus(statusHandler)
if err = statusHandler.SendUnitStatus(); err != nil {
t.Fatalf("Can't set users: %s", err)
}
if _, err = sender.WaitForStatus(5 * time.Second); err != nil {
t.Fatalf("Can't receive unit status: %s", err)
}
// success update
expectedUnitStatus := cloudprotocol.UnitStatus{
BoardConfig: []cloudprotocol.BoardConfigInfo{boardConfigUpdater.BoardConfigInfo},
Components: []cloudprotocol.ComponentInfo{},
Layers: []cloudprotocol.LayerInfo{},
Services: []cloudprotocol.ServiceInfo{
{ID: "service0", AosVersion: 0, Status: cloudprotocol.InstalledStatus},
{ID: "service1", AosVersion: 1, Status: cloudprotocol.InstalledStatus},
{ID: "service2", Status: cloudprotocol.RemovedStatus},
{ID: "service3", AosVersion: 1, Status: cloudprotocol.InstalledStatus},
},
}
statusHandler.ProcessDesiredStatus(cloudprotocol.DecodedDesiredStatus{
Services: []cloudprotocol.ServiceInfoFromCloud{
{
ID: "service0", VersionFromCloud: cloudprotocol.VersionFromCloud{AosVersion: 0},
DecryptDataStruct: cloudprotocol.DecryptDataStruct{Sha256: []byte{0}},
},
{
ID: "service1", VersionFromCloud: cloudprotocol.VersionFromCloud{AosVersion: 1},
DecryptDataStruct: cloudprotocol.DecryptDataStruct{Sha256: []byte{1}},
},
{
ID: "service3", VersionFromCloud: cloudprotocol.VersionFromCloud{AosVersion: 1},
DecryptDataStruct: cloudprotocol.DecryptDataStruct{Sha256: []byte{3}},
},
},
})
receivedUnitStatus, err := sender.WaitForStatus(waitStatusTimeout)
if err != nil {
t.Fatalf("Can't receive unit status: %s", err)
}
if err = compareUnitStatus(receivedUnitStatus, expectedUnitStatus); err != nil {
t.Errorf("Wrong unit status received: %v, expected: %v", receivedUnitStatus, expectedUnitStatus)
}
// failed update
softwareUpdater.UsersServices = expectedUnitStatus.Services
softwareUpdater.UpdateError = aoserrors.New("some error occurs")
expectedUnitStatus = cloudprotocol.UnitStatus{
BoardConfig: []cloudprotocol.BoardConfigInfo{boardConfigUpdater.BoardConfigInfo},
Components: []cloudprotocol.ComponentInfo{},
Layers: []cloudprotocol.LayerInfo{},
Services: []cloudprotocol.ServiceInfo{
{
ID: "service0", AosVersion: 0, Status: cloudprotocol.ErrorStatus,
Error: softwareUpdater.UpdateError.Error(),
},
{ID: "service1", AosVersion: 1, Status: cloudprotocol.InstalledStatus},
{ID: "service2", Status: cloudprotocol.RemovedStatus},
{ID: "service3", AosVersion: 1, Status: cloudprotocol.InstalledStatus},
{
ID: "service3", AosVersion: 2, Status: cloudprotocol.ErrorStatus,
Error: softwareUpdater.UpdateError.Error(),
},
{
ID: "service4", AosVersion: 2, Status: cloudprotocol.ErrorStatus,
Error: softwareUpdater.UpdateError.Error(),
},
},
}
statusHandler.ProcessDesiredStatus(cloudprotocol.DecodedDesiredStatus{
Services: []cloudprotocol.ServiceInfoFromCloud{
{
ID: "service1", VersionFromCloud: cloudprotocol.VersionFromCloud{AosVersion: 1},
DecryptDataStruct: cloudprotocol.DecryptDataStruct{Sha256: []byte{1}},
},
{
ID: "service3", VersionFromCloud: cloudprotocol.VersionFromCloud{AosVersion: 2},
DecryptDataStruct: cloudprotocol.DecryptDataStruct{Sha256: []byte{3}},
},
{
ID: "service4", VersionFromCloud: cloudprotocol.VersionFromCloud{AosVersion: 2},
DecryptDataStruct: cloudprotocol.DecryptDataStruct{Sha256: []byte{4}},
},
},
})
if receivedUnitStatus, err = sender.WaitForStatus(waitStatusTimeout); err != nil {
t.Fatalf("Can't receive unit status: %s", err)
}
if err = compareUnitStatus(receivedUnitStatus, expectedUnitStatus); err != nil {
t.Errorf("Wrong unit status received: %v, expected: %v", receivedUnitStatus, expectedUnitStatus)
}
} | explode_data.jsonl/52390 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1671
} | [
2830,
3393,
4289,
11025,
1155,
353,
8840,
836,
8,
341,
59868,
2648,
79854,
1669,
4982,
2829,
17905,
7121,
2271,
11932,
2648,
79854,
1006,
197,
197,
12361,
17014,
83284,
2648,
1731,
90,
44691,
5637,
25,
330,
16,
13,
15,
497,
8104,
25,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestPointer(t *testing.T) {
p := Programmer{"stefno", "go"}
fmt.Println(p)
name := (*string)(unsafe.Pointer(&p))
*name = "qcrao"
lang := (*string)(unsafe.Pointer(uintptr(unsafe.Pointer(&p)) + unsafe.Offsetof(p.language)))
*lang = "Golang"
fmt.Println(p)
s := []int{5}
s = append(s, 7)
s = append(s, 9)
x := append(s, 11)
y := append(s, 12)
fmt.Println(s, x, y)
} | explode_data.jsonl/31065 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 181
} | [
2830,
3393,
9084,
1155,
353,
8840,
836,
8,
341,
3223,
1669,
88024,
4913,
267,
823,
2152,
497,
330,
3346,
16707,
11009,
12419,
1295,
692,
11609,
1669,
4609,
917,
2376,
38157,
41275,
2099,
79,
1171,
197,
9,
606,
284,
330,
59833,
956,
78... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAddDir(t *testing.T) {
is := is.New(t)
s := NewShell(shellUrl)
cid, err := s.AddDir("./testdata")
is.Nil(err)
is.Equal(cid, "QmS4ustL54uo8FzR9455qaxZwuMiUhyvMcX9Ba8nUH4uVv")
} | explode_data.jsonl/61075 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 106
} | [
2830,
3393,
2212,
6184,
1155,
353,
8840,
836,
8,
341,
19907,
1669,
374,
7121,
1155,
340,
1903,
1669,
1532,
25287,
93558,
2864,
692,
1444,
307,
11,
1848,
1669,
274,
1904,
6184,
13988,
92425,
1138,
19907,
59678,
3964,
340,
19907,
12808,
5... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDeliveryServiceServersWithRequiredCapabilities(t *testing.T) {
WithObjs(t, []TCObj{CDNs, Types, Tenants, Parameters, Profiles, Statuses, Divisions, Regions, PhysLocations, CacheGroups, Servers, ServerCapabilities, Topologies, DeliveryServices, DeliveryServicesRequiredCapabilities, ServerServerCapabilities}, func() {
CreateTestDeliveryServiceServersWithRequiredCapabilities(t)
CreateTestMSODSServerWithReqCap(t)
})
} | explode_data.jsonl/30817 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 123
} | [
2830,
3393,
38121,
1860,
78139,
2354,
8164,
55315,
1155,
353,
8840,
836,
8,
341,
197,
2354,
4121,
2519,
1155,
11,
3056,
7749,
5261,
90,
6484,
47360,
11,
20768,
11,
17695,
1783,
11,
13522,
11,
71727,
11,
8104,
288,
11,
8765,
6805,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNormalizeUIDependenciesTab(t *testing.T) {
falseVar := false
tests := []struct {
uiOpts map[string]interface{}
storage string
enabled *bool
expected map[string]interface{}
}{
{
uiOpts: map[string]interface{}{},
storage: "memory",
expected: map[string]interface{}{},
},
{
uiOpts: map[string]interface{}{},
storage: "memory",
enabled: &falseVar,
expected: map[string]interface{}{},
},
{
uiOpts: map[string]interface{}{},
storage: "whateverStorage",
expected: map[string]interface{}{"dependencies": map[string]interface{}{"menuEnabled": false}},
},
{
uiOpts: map[string]interface{}{},
storage: "whateverStorage",
enabled: &falseVar,
expected: map[string]interface{}{"dependencies": map[string]interface{}{"menuEnabled": false}},
},
{
uiOpts: map[string]interface{}{"dependencies": "respectThis"},
storage: "whateverStorage",
expected: map[string]interface{}{"dependencies": "respectThis"},
},
{
uiOpts: map[string]interface{}{"dependencies": map[string]interface{}{"menuEnabled": "respectThis"}},
storage: "whateverStorage",
expected: map[string]interface{}{"dependencies": map[string]interface{}{"menuEnabled": "respectThis"}},
},
{
uiOpts: map[string]interface{}{"dependencies": map[string]interface{}{"foo": "bar"}},
storage: "whateverStorage",
expected: map[string]interface{}{"dependencies": map[string]interface{}{"foo": "bar", "menuEnabled": false}},
},
}
for _, test := range tests {
disableDependenciesTab(test.uiOpts, test.storage, test.enabled)
assert.Equal(t, test.expected, test.uiOpts)
}
} | explode_data.jsonl/21863 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 635
} | [
2830,
3393,
87824,
6463,
12769,
5946,
8582,
1155,
353,
8840,
836,
8,
341,
36012,
3962,
1669,
895,
198,
78216,
1669,
3056,
1235,
341,
197,
37278,
43451,
256,
2415,
14032,
31344,
16094,
197,
197,
16172,
220,
914,
198,
197,
197,
15868,
220... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestTraceparent_String(t *testing.T) {
type fields struct {
version byte
traceID [16]byte
parentID [8]byte
traceFlags byte
}
tests := []struct {
name string
fields fields
want string
}{
{"ok", fields{
version: TraceVersion,
traceID: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5},
parentID: [8]byte{6, 7, 8, 9, 0, 1, 2, 3},
traceFlags: 0,
}, "01-00010203040506070809000102030405-0607080900010203-00"},
{"ok", fields{
version: 0x2,
traceID: [16]byte{0xd7, 0x40, 0x67, 0xf8, 0x3f, 0xa7, 0x46, 0x15, 0x35, 0x57, 0xc6, 0x2d, 0x2e, 0x26, 0x6, 0x1b},
parentID: [8]byte{0x55, 0x9c, 0x1a, 0x5f, 0x60, 0xe6, 0x7a, 0x81},
traceFlags: 0x70,
}, "02-d74067f83fa746153557c62d2e26061b-559c1a5f60e67a81-70"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tp := &Traceparent{
version: tt.fields.version,
traceID: tt.fields.traceID,
parentID: tt.fields.parentID,
traceFlags: tt.fields.traceFlags,
}
if got := tp.String(); got != tt.want {
t.Errorf("Traceparent.String() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/35202 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 619
} | [
2830,
3393,
6550,
3765,
31777,
1155,
353,
8840,
836,
8,
341,
13158,
5043,
2036,
341,
197,
74954,
262,
4922,
198,
197,
65058,
915,
262,
508,
16,
21,
90184,
198,
197,
24804,
915,
256,
508,
23,
90184,
198,
197,
65058,
9195,
4922,
198,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestHandleContainerChangeUpdateContainerHealth(t *testing.T) {
eventStreamName := "TestHandleContainerChangeUpdateContainerHealth"
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
containerChangeEventStream := eventstream.NewEventStream(eventStreamName, ctx)
containerChangeEventStream.StartListening()
mTask := &managedTask{
Task: testdata.LoadTask("sleep5TaskCgroup"),
containerChangeEventStream: containerChangeEventStream,
stateChangeEvents: make(chan statechange.Event),
}
// Disgard all the statechange events
defer discardEvents(mTask.stateChangeEvents)()
mTask.SetKnownStatus(apitaskstatus.TaskRunning)
mTask.SetSentStatus(apitaskstatus.TaskRunning)
container := mTask.Containers[0]
container.HealthCheckType = "docker"
containerChange := dockerContainerChange{
container: container,
event: dockerapi.DockerContainerChangeEvent{
Status: apicontainerstatus.ContainerRunning,
DockerContainerMetadata: dockerapi.DockerContainerMetadata{
DockerID: "dockerID",
Health: apicontainer.HealthStatus{
Status: apicontainerstatus.ContainerHealthy,
Output: "health check succeed",
},
},
},
}
mTask.handleContainerChange(containerChange)
containerHealth := container.GetHealthStatus()
assert.Equal(t, containerHealth.Status, apicontainerstatus.ContainerHealthy)
assert.Equal(t, containerHealth.Output, "health check succeed")
} | explode_data.jsonl/24588 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 464
} | [
2830,
3393,
6999,
4502,
4072,
4289,
4502,
14542,
1155,
353,
8840,
836,
8,
341,
28302,
3027,
675,
1669,
330,
2271,
6999,
4502,
4072,
4289,
4502,
14542,
698,
20985,
11,
9121,
1669,
2266,
26124,
9269,
5378,
19047,
2398,
16867,
9121,
741,
5... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestValidateLSIs(t *testing.T) {
testCases := map[string]struct {
inputAttributes []string
inputLSIs []string
wantError error
}{
"good case": {
inputLSIs: []string{"userID:S"},
wantError: nil,
},
"bad lsi structure": {
inputLSIs: []string{"userID"},
wantError: errDDBAttributeBadFormat,
},
"too many lsis": {
inputLSIs: []string{"bowie:S", "clyde:S", "keno:S", "kava:S", "meow:S", "hana:S"},
wantError: errTooManyLSIKeys,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got := validateLSIs(tc.inputLSIs)
if tc.wantError != nil {
require.EqualError(t, got, tc.wantError.Error())
} else {
require.Nil(t, got)
}
})
}
} | explode_data.jsonl/34550 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 335
} | [
2830,
3393,
17926,
7268,
3872,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
2415,
14032,
60,
1235,
341,
197,
22427,
10516,
3056,
917,
198,
197,
22427,
7268,
3872,
981,
3056,
917,
198,
197,
50780,
1454,
981,
1465,
198,
197,
59403,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestArray_Chunk(t *testing.T) {
gtest.C(t, func(t *gtest.T) {
a1 := []interface{}{1, 2, 3, 4, 5}
array1 := garray.NewArrayFrom(a1)
chunks := array1.Chunk(2)
t.Assert(len(chunks), 3)
t.Assert(chunks[0], []interface{}{1, 2})
t.Assert(chunks[1], []interface{}{3, 4})
t.Assert(chunks[2], []interface{}{5})
t.Assert(array1.Chunk(0), nil)
})
gtest.C(t, func(t *gtest.T) {
a1 := []interface{}{1, 2, 3, 4, 5}
array1 := garray.NewArrayFrom(a1)
chunks := array1.Chunk(3)
t.Assert(len(chunks), 2)
t.Assert(chunks[0], []interface{}{1, 2, 3})
t.Assert(chunks[1], []interface{}{4, 5})
t.Assert(array1.Chunk(0), nil)
})
gtest.C(t, func(t *gtest.T) {
a1 := []interface{}{1, 2, 3, 4, 5, 6}
array1 := garray.NewArrayFrom(a1)
chunks := array1.Chunk(2)
t.Assert(len(chunks), 3)
t.Assert(chunks[0], []interface{}{1, 2})
t.Assert(chunks[1], []interface{}{3, 4})
t.Assert(chunks[2], []interface{}{5, 6})
t.Assert(array1.Chunk(0), nil)
})
gtest.C(t, func(t *gtest.T) {
a1 := []interface{}{1, 2, 3, 4, 5, 6}
array1 := garray.NewArrayFrom(a1)
chunks := array1.Chunk(3)
t.Assert(len(chunks), 2)
t.Assert(chunks[0], []interface{}{1, 2, 3})
t.Assert(chunks[1], []interface{}{4, 5, 6})
t.Assert(array1.Chunk(0), nil)
})
} | explode_data.jsonl/13901 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 654
} | [
2830,
3393,
1857,
27588,
3122,
1155,
353,
8840,
836,
8,
341,
3174,
1944,
727,
1155,
11,
2915,
1155,
353,
82038,
836,
8,
341,
197,
11323,
16,
1669,
3056,
4970,
6257,
90,
16,
11,
220,
17,
11,
220,
18,
11,
220,
19,
11,
220,
20,
532... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestParseConflictSchemaName(t *testing.T) {
searchDir := "testdata/conflict_name"
p := New()
p.ParseDependency = true
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
expected, err := ioutil.ReadFile(filepath.Join(searchDir, "expected.json"))
assert.NoError(t, err)
assert.Equal(t, string(expected), string(b))
} | explode_data.jsonl/63565 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 164
} | [
2830,
3393,
14463,
57974,
8632,
675,
1155,
353,
8840,
836,
8,
341,
45573,
6184,
1669,
330,
92425,
59241,
21242,
1269,
698,
3223,
1669,
1532,
741,
3223,
8937,
36387,
284,
830,
198,
9859,
1669,
281,
8937,
7082,
20447,
6184,
11,
1887,
7082... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAgent_DeregisterService_ACLDeny(t *testing.T) {
t.Parallel()
a := NewTestAgent(t.Name(), TestACLConfig())
defer a.Shutdown()
service := &structs.NodeService{
ID: "test",
Service: "test",
}
if err := a.AddService(service, nil, false, ""); err != nil {
t.Fatalf("err: %v", err)
}
t.Run("no token", func(t *testing.T) {
req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test", nil)
if _, err := a.srv.AgentDeregisterService(nil, req); !acl.IsErrPermissionDenied(err) {
t.Fatalf("err: %v", err)
}
})
t.Run("root token", func(t *testing.T) {
req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test?token=root", nil)
if _, err := a.srv.AgentDeregisterService(nil, req); err != nil {
t.Fatalf("err: %v", err)
}
})
} | explode_data.jsonl/33635 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 345
} | [
2830,
3393,
16810,
1557,
52633,
1571,
1860,
97627,
23619,
88,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
11323,
1669,
1532,
2271,
16810,
1155,
2967,
1507,
3393,
55393,
2648,
2398,
16867,
264,
10849,
18452,
2822,
52934,
1669,
60... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestSuObject(t *testing.T) {
assert := assert.T(t).This
DefaultSingleQuotes = true
defer func() { DefaultSingleQuotes = false }()
ob := SuObject{}
assert(ob.String()).Is("#()")
assert(ob.Size()).Is(0)
iv := SuInt(123)
ob.Add(iv)
assert(ob.Size()).Is(1)
assert(ob.String()).Is("#(123)")
sv := SuStr("hello")
ob.Add(sv)
assert(ob.Size()).Is(2)
assert(ob.Get(nil, Zero)).Is(iv)
assert(ob.Get(nil, One)).Is(sv)
ob.Set(sv, iv)
assert(ob.String()).Is("#(123, 'hello', hello: 123)")
ob.Set(iv, sv)
assert(ob.Size()).Is(4)
} | explode_data.jsonl/7111 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 245
} | [
2830,
3393,
36459,
1190,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
836,
1155,
568,
1986,
198,
91084,
10888,
43780,
284,
830,
198,
16867,
2915,
368,
314,
7899,
10888,
43780,
284,
895,
50746,
63353,
1669,
16931,
1190,
16094,
6948,
4... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestToggle(t *testing.T) {
opts := optsFor()
if opts.ToggleSort {
t.Error()
}
opts = optsFor("--bind=a:toggle-sort")
if !opts.ToggleSort {
t.Error()
}
opts = optsFor("--bind=a:toggle-sort", "--bind=a:up")
if opts.ToggleSort {
t.Error()
}
} | explode_data.jsonl/40871 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 128
} | [
2830,
3393,
18897,
1155,
353,
8840,
836,
8,
341,
64734,
1669,
12185,
2461,
741,
743,
12185,
80700,
10231,
341,
197,
3244,
6141,
741,
197,
630,
64734,
284,
12185,
2461,
21549,
7666,
24239,
25,
17703,
46540,
1138,
743,
753,
10518,
80700,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestGetBool(t *testing.T) {
tests := []struct {
b *bool
wb bool
wset bool
}{
{nil, false, false},
{Boolp(true), true, true},
{Boolp(false), false, true},
}
for i, tt := range tests {
b, set := GetBool(tt.b)
if b != tt.wb {
t.Errorf("#%d: value = %v, want %v", i, b, tt.wb)
}
if set != tt.wset {
t.Errorf("#%d: set = %v, want %v", i, set, tt.wset)
}
}
} | explode_data.jsonl/66765 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 211
} | [
2830,
3393,
1949,
11233,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
2233,
262,
353,
2641,
198,
197,
6692,
65,
256,
1807,
198,
197,
6692,
746,
1807,
198,
197,
59403,
197,
197,
90,
8385,
11,
895,
11,
895,
1583,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestDealVersion(t *testing.T) {
twinCloudEdgeVersion := dttype.TwinVersion{
CloudVersion: 1,
EdgeVersion: 1,
}
twinCloudVersion := dttype.TwinVersion{
CloudVersion: 1,
EdgeVersion: 0,
}
tests := []struct {
name string
version *dttype.TwinVersion
reqVersion *dttype.TwinVersion
dealType int
errorWant bool
err error
}{
{
name: "TestDealVersion(): Case 1: dealType=3",
version: &dttype.TwinVersion{},
dealType: SyncTwinDeleteDealType,
errorWant: true,
err: nil,
},
{
name: "TestDealVersion(): Case 2: dealType>=1 && version.EdgeVersion>reqVersion.EdgeVersion",
version: &twinCloudEdgeVersion,
reqVersion: &twinCloudVersion,
dealType: SyncDealType,
errorWant: false,
err: errors.New("not allowed to sync due to version conflict"),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
got, err := dealVersion(test.version, test.reqVersion, test.dealType)
if !reflect.DeepEqual(err, test.err) {
t.Errorf("DTManager.TestDealVersion() case failed: got = %v, Want = %v", err, test.err)
return
}
if !reflect.DeepEqual(got, test.errorWant) {
t.Errorf("DTManager.TestDealVersion() case failed: got = %v, want %v", got, test.errorWant)
}
})
}
} | explode_data.jsonl/30639 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 561
} | [
2830,
3393,
72841,
5637,
1155,
353,
8840,
836,
8,
341,
3244,
7526,
16055,
11656,
5637,
1669,
7594,
1313,
836,
7526,
5637,
515,
197,
197,
16055,
5637,
25,
220,
16,
345,
197,
197,
11656,
5637,
25,
220,
220,
16,
345,
197,
532,
3244,
75... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestExtractEtag(t *testing.T) {
t.Run("no etag present", func(t *testing.T) {
ok, etag := extractEtag(&commonv1pb.StateItem{})
assert.False(t, ok)
assert.Empty(t, etag)
})
t.Run("empty etag exists", func(t *testing.T) {
ok, etag := extractEtag(&commonv1pb.StateItem{
Etag: &commonv1pb.Etag{},
})
assert.True(t, ok)
assert.Empty(t, etag)
})
t.Run("non-empty etag exists", func(t *testing.T) {
ok, etag := extractEtag(&commonv1pb.StateItem{
Etag: &commonv1pb.Etag{
Value: "a",
},
})
assert.True(t, ok)
assert.Equal(t, "a", etag)
})
} | explode_data.jsonl/21745 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 277
} | [
2830,
3393,
28959,
36,
4578,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
2152,
1842,
351,
3042,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
59268,
11,
1842,
351,
1669,
8649,
36,
4578,
2099,
5464,
85,
16,
16650,
18942,
1234,
37... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPosition(t *testing.T) {
expected := position.NewPosition(1, 1, 1, 1)
for _, n := range nodes {
n.SetPosition(expected)
actual := n.GetPosition()
assert.DeepEqual(t, expected, actual)
}
} | explode_data.jsonl/52690 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 78
} | [
2830,
3393,
3812,
1155,
353,
8840,
836,
8,
341,
42400,
1669,
2309,
7121,
3812,
7,
16,
11,
220,
16,
11,
220,
16,
11,
220,
16,
340,
2023,
8358,
308,
1669,
2088,
7798,
341,
197,
9038,
4202,
3812,
15253,
340,
197,
88814,
1669,
308,
22... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestCreateMirrorPod(t *testing.T) {
for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "foo",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "file",
},
},
}
pods := []*api.Pod{pod}
kl.podManager.SetPods(pods)
err := kl.syncPod(pod, nil, &container.PodStatus{}, updateType)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
podFullName := kubecontainer.GetPodFullName(pod)
if !manager.HasPod(podFullName) {
t.Errorf("expected mirror pod %q to be created", podFullName)
}
if manager.NumOfPods() != 1 || !manager.HasPod(podFullName) {
t.Errorf("expected one mirror pod %q, got %v", podFullName, manager.GetPods())
}
}
} | explode_data.jsonl/43337 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 419
} | [
2830,
3393,
4021,
54216,
23527,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
2647,
929,
1669,
2088,
3056,
74,
392,
67553,
92183,
23527,
929,
69094,
392,
67553,
92183,
23527,
4021,
11,
595,
392,
67553,
92183,
23527,
4289,
92,
341,
197,
1818... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestKubernetesStore_Success(t *testing.T) {
f := fake.Clientset{}
store := kubernetesStore{
namespace: "dummy",
client: &f,
}
f.AddReactor("get", "hooks",
func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
return true, &v1alpha12.Hook{
ObjectMeta: v1.ObjectMeta{
Name: "generatedHookName",
},
Spec: v1alpha12.HookSpec{
ForwardURL: "http://test.com",
Body: "body",
Headers: nil,
},
Status: v1alpha12.HookStatus{
Phase: v1alpha12.HookPhasePending,
Attempts: 0,
},
}, nil
})
f.AddReactor("update", "hooks",
func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
hook := action.(clienttesting.UpdateAction).GetObject().(*v1alpha12.Hook)
assert.Equal(t, hook.Name, "generatedHookName")
assert.Equal(t, hook.Status.Phase, v1alpha12.HookPhaseSuccess)
assert.Equal(t, hook.Status.Message, "")
assert.NotNil(t, hook.Status.CompletedTimestamp)
return true, hook, nil
})
err := store.Success("hookName")
assert.NoError(t, err)
assert.Equal(t, 2, len(f.Actions()))
assert.Equal(t, "get", f.Actions()[0].GetVerb())
assert.Equal(t, "hooks", f.Actions()[0].GetResource().Resource)
assert.Equal(t, "v1alpha1", f.Actions()[0].GetResource().Version)
assert.Equal(t, "captainhook.io", f.Actions()[0].GetResource().Group)
assert.Equal(t, "update", f.Actions()[1].GetVerb())
assert.Equal(t, "hooks", f.Actions()[1].GetResource().Resource)
assert.Equal(t, "v1alpha1", f.Actions()[1].GetResource().Version)
assert.Equal(t, "captainhook.io", f.Actions()[1].GetResource().Group)
} | explode_data.jsonl/23005 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 705
} | [
2830,
3393,
42,
29827,
6093,
87161,
1155,
353,
8840,
836,
8,
341,
1166,
1669,
12418,
11716,
746,
31483,
57279,
1669,
595,
29827,
6093,
515,
197,
56623,
25,
330,
31390,
756,
197,
25291,
25,
262,
609,
69,
345,
197,
630,
1166,
1904,
693,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetFlavorZone(t *testing.T) {
flavorId := "BBC-G3-01"
queryArgs := &ListFlavorZonesArgs{
FlavorId: flavorId,
}
if res, err := BBC_CLIENT.ListFlavorZones(queryArgs); err != nil {
fmt.Println("Get flavor zoneName failed: ", err)
} else {
fmt.Println("Get flavor zoneName success, result: ", res)
}
} | explode_data.jsonl/4068 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 127
} | [
2830,
3393,
1949,
3882,
3292,
15363,
1155,
353,
8840,
836,
8,
341,
1166,
75,
3292,
764,
1669,
330,
66755,
12010,
18,
12,
15,
16,
698,
27274,
4117,
1669,
609,
852,
3882,
3292,
57,
3154,
4117,
515,
197,
197,
3882,
3292,
764,
25,
17172... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_mergeMaps_simple(t *testing.T) {
m1 := map[string]interface{}{
"a": 1,
"b": 2,
}
m2 := map[string]interface{}{
"b": 4,
}
expected := map[string]interface{}{
"a": 1,
"b": 4,
}
err := mergeMaps(m1, m2, nil)
require.NoError(t, err)
require.Equal(t, expected, m1)
} | explode_data.jsonl/82638 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 142
} | [
2830,
3393,
20888,
36562,
30015,
1155,
353,
8840,
836,
8,
341,
2109,
16,
1669,
2415,
14032,
31344,
67066,
197,
197,
56693,
788,
220,
16,
345,
197,
197,
1,
65,
788,
220,
17,
345,
197,
630,
2109,
17,
1669,
2415,
14032,
31344,
67066,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.