text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func TestGetBuffering(t *testing.T) {
testCases := []struct {
desc string
labels map[string]string
expected *types.Buffering
}{
{
desc: "should return nil when no buffering labels",
labels: map[string]string{},
expected: nil,
},
{
desc: "should return a struct when buffering labels are set",
labels: map[string]string{
TraefikBackendBufferingMaxResponseBodyBytes: "10485760",
TraefikBackendBufferingMemResponseBodyBytes: "2097152",
TraefikBackendBufferingMaxRequestBodyBytes: "10485760",
TraefikBackendBufferingMemRequestBodyBytes: "2097152",
TraefikBackendBufferingRetryExpression: "IsNetworkError() && Attempts() <= 2",
},
expected: &types.Buffering{
MaxResponseBodyBytes: 10485760,
MemResponseBodyBytes: 2097152,
MaxRequestBodyBytes: 10485760,
MemRequestBodyBytes: 2097152,
RetryExpression: "IsNetworkError() && Attempts() <= 2",
},
},
}
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
actual := GetBuffering(test.labels)
assert.Equal(t, test.expected, actual)
})
}
} | explode_data.jsonl/51862 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 472
} | [
2830,
3393,
1949,
4095,
287,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
41653,
257,
914,
198,
197,
95143,
256,
2415,
14032,
30953,
198,
197,
42400,
353,
9242,
22622,
287,
198,
197,
59403,
197,
197,
515,
29... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStepper_After(t *testing.T) {
st := Stepper{
N: 2,
U: Unum,
}
val, err := st.After(2)
assert.NoError(t, err)
assert.Equal(t, 4, val)
} | explode_data.jsonl/78067 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 77
} | [
2830,
3393,
20903,
6922,
1566,
1046,
1155,
353,
8840,
836,
8,
341,
18388,
1669,
3360,
6922,
515,
197,
18317,
25,
220,
17,
345,
197,
15980,
25,
1230,
372,
345,
197,
532,
19302,
11,
1848,
1669,
357,
36892,
7,
17,
340,
6948,
35699,
115... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestEnforceNamespaceLabelOnPrometheusMonitors(t *testing.T) {
type testCase struct {
Name string
ServiceMonitor monitoringv1.ServiceMonitor
PromSpecEnforcedNamespaceLabel string
ExcludedFromEnforcement []monitoringv1.ObjectReference
Expected monitoringv1.ServiceMonitor
}
testcases := []testCase{
{
Name: "servicemonitor-ns-enforced-add",
ServiceMonitor: expandServiceMonitor(&promServiceMonitorFlat{
Name: "foo",
Namespace: "bar",
MetricRelabelConfigs: []*monitoringv1.RelabelConfig{},
RelabelConfigs: []*monitoringv1.RelabelConfig{},
}),
PromSpecEnforcedNamespaceLabel: "namespace",
Expected: expandServiceMonitor(&promServiceMonitorFlat{
Name: "foo",
Namespace: "bar",
MetricRelabelConfigs: []*monitoringv1.RelabelConfig{
{
TargetLabel: "namespace",
Replacement: "bar",
},
},
RelabelConfigs: []*monitoringv1.RelabelConfig{
{
TargetLabel: "namespace",
Replacement: "bar",
},
},
}),
},
{
Name: "servicemonitor-ns-enforced-exclude-by-name",
ServiceMonitor: expandServiceMonitor(&promServiceMonitorFlat{
Name: "exclude-me",
Namespace: "bar",
MetricRelabelConfigs: []*monitoringv1.RelabelConfig{},
RelabelConfigs: []*monitoringv1.RelabelConfig{},
}),
PromSpecEnforcedNamespaceLabel: "namespace",
ExcludedFromEnforcement: []monitoringv1.ObjectReference{
{
Namespace: "bar",
Group: monitoring.GroupName,
Resource: monitoringv1.ServiceMonitorName,
Name: "exclude-me",
},
},
Expected: expandServiceMonitor(&promServiceMonitorFlat{
Name: "exclude-me",
Namespace: "bar",
MetricRelabelConfigs: []*monitoringv1.RelabelConfig{},
RelabelConfigs: []*monitoringv1.RelabelConfig{},
}),
},
{
Name: "servicemonitor-ns-enforced-exclude-all-by-namespace",
ServiceMonitor: expandServiceMonitor(&promServiceMonitorFlat{
Name: "exclude-me",
Namespace: "bar",
MetricRelabelConfigs: []*monitoringv1.RelabelConfig{},
RelabelConfigs: []*monitoringv1.RelabelConfig{},
}),
PromSpecEnforcedNamespaceLabel: "namespace",
ExcludedFromEnforcement: []monitoringv1.ObjectReference{
{
Namespace: "bar",
Group: monitoring.GroupName,
Resource: monitoringv1.ServiceMonitorName,
},
},
Expected: expandServiceMonitor(&promServiceMonitorFlat{
Name: "exclude-me",
Namespace: "bar",
MetricRelabelConfigs: []*monitoringv1.RelabelConfig{},
RelabelConfigs: []*monitoringv1.RelabelConfig{},
}),
},
}
for _, tc := range testcases {
t.Run(tc.Name,
func(t *testing.T) {
nsLabeler := New(tc.PromSpecEnforcedNamespaceLabel, tc.ExcludedFromEnforcement, true)
tc.ServiceMonitor.Spec.Endpoints[0].MetricRelabelConfigs = nsLabeler.GetRelabelingConfigs(tc.ServiceMonitor.TypeMeta, tc.ServiceMonitor.ObjectMeta, tc.ServiceMonitor.Spec.Endpoints[0].MetricRelabelConfigs)
tc.ServiceMonitor.Spec.Endpoints[0].RelabelConfigs = nsLabeler.GetRelabelingConfigs(tc.ServiceMonitor.TypeMeta, tc.ServiceMonitor.ObjectMeta, tc.ServiceMonitor.Spec.Endpoints[0].RelabelConfigs)
if diff := cmp.Diff(tc.Expected, tc.ServiceMonitor); diff != "" {
t.Errorf("Unexpected result (-want +got):\n%s", diff)
}
},
)
}
} | explode_data.jsonl/29312 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1559
} | [
2830,
3393,
1702,
8833,
22699,
2476,
1925,
35186,
39705,
11095,
11905,
1155,
353,
8840,
836,
8,
1476,
13158,
54452,
2036,
341,
197,
21297,
5968,
914,
198,
197,
91619,
30098,
338,
16558,
85,
16,
13860,
30098,
198,
197,
10025,
441,
8327,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestHistogramDataPoint_Count(t *testing.T) {
ms := NewHistogramDataPoint()
ms.InitEmpty()
assert.EqualValues(t, uint64(0), ms.Count())
testValCount := uint64(17)
ms.SetCount(testValCount)
assert.EqualValues(t, testValCount, ms.Count())
} | explode_data.jsonl/19552 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 94
} | [
2830,
3393,
77210,
1043,
2609,
50775,
1155,
353,
8840,
836,
8,
341,
47691,
1669,
1532,
77210,
1043,
2609,
741,
47691,
26849,
3522,
741,
6948,
12808,
6227,
1155,
11,
2622,
21,
19,
7,
15,
701,
9829,
6134,
2398,
18185,
2208,
2507,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestServerResumptionDisabled(t *testing.T) {
sessionFilePath := tempFile("")
defer os.Remove(sessionFilePath)
config := testConfig.Clone()
testIssue := &serverTest{
name: "IssueTicketPreDisable",
command: []string{"openssl", "s_client", "-cipher", "AES128-SHA", "-ciphersuites", "TLS_AES_128_GCM_SHA256", "-sess_out", sessionFilePath},
config: config,
wait: true,
}
testResume := &serverTest{
name: "ResumeDisabled",
command: []string{"openssl", "s_client", "-cipher", "AES128-SHA", "-ciphersuites", "TLS_AES_128_GCM_SHA256", "-sess_in", sessionFilePath},
config: config,
validate: func(state ConnectionState) error {
if state.DidResume {
return errors.New("resumed with SessionTicketsDisabled")
}
return nil
},
}
config.SessionTicketsDisabled = false
runServerTestTLS12(t, testIssue)
config.SessionTicketsDisabled = true
runServerTestTLS12(t, testResume)
config.SessionTicketsDisabled = false
runServerTestTLS13(t, testIssue)
config.SessionTicketsDisabled = true
runServerTestTLS13(t, testResume)
} | explode_data.jsonl/36346 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 401
} | [
2830,
3393,
5475,
1061,
60574,
25907,
1155,
353,
8840,
836,
8,
341,
25054,
19090,
1669,
2730,
1703,
31764,
16867,
2643,
13270,
16264,
19090,
692,
25873,
1669,
1273,
2648,
64463,
2822,
18185,
42006,
1669,
609,
4030,
2271,
515,
197,
11609,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestMissingOrInvalidOSArchFetchRun(t *testing.T) {
ctx := testutils.NewRktRunCtx()
defer ctx.Cleanup()
tests := getMissingOrInvalidTests(t, ctx)
defer osArchTestRemoveImages(tests)
for i, tt := range tests {
imgHash, err := importImageAndFetchHash(t, ctx, "", tt.image)
if err != nil {
t.Fatalf("%v", err)
}
rktCmd := fmt.Sprintf("%s run --mds-register=false %s", ctx.Cmd(), imgHash)
t.Logf("Running test #%v: %v", i, rktCmd)
runRktAndCheckOutput(t, rktCmd, tt.expectedLine, tt.expectError)
}
} | explode_data.jsonl/14980 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 224
} | [
2830,
3393,
25080,
2195,
7928,
3126,
18727,
20714,
6727,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
1273,
6031,
7121,
49,
5840,
6727,
23684,
741,
16867,
5635,
727,
60639,
741,
78216,
1669,
633,
25080,
2195,
7928,
18200,
1155,
11,
5635,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestDelete(t *testing.T) {
mb, err := mailbox.Create("delete")
if err != nil {
t.Fatal(err)
}
msg, err := mb.PutMessage("TEST")
if err != nil {
t.Fatal(err)
}
key := mailbox.AccessKey{MailboxId: mb.Id}
key.Create()
req := api.DeleteMessageRequest{Message: msg.Id}
req.Sign(key.Name, key.Secret)
resp := api.DeleteMessageResponse{}
statusCode := doRequest(t, req, &resp, "delete")
if statusCode != 200 {
t.Fatal("Server responded with", statusCode)
}
count, err := mb.MessageCount()
if err != nil {
t.Fatal(err)
}
if count != 0 {
t.Fatal("Message count should be 0 but is", count)
}
} | explode_data.jsonl/15714 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 249
} | [
2830,
3393,
6435,
1155,
353,
8840,
836,
8,
341,
2109,
65,
11,
1848,
1669,
45742,
7251,
445,
4542,
1138,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
630,
21169,
11,
1848,
1669,
10016,
39825,
2052,
445,
10033,
1138,
743... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestRemoveInvalidPDBs(t *testing.T) {
deplabels := map[string]string{"foo": "deployment"}
sslabels := map[string]string{"foo": "statefulset"}
replicas := int32(2)
one := intstr.FromInt(1)
pdbs := []*pv1beta1.PodDisruptionBudget{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pdb-1",
Labels: ownerLabels,
},
Spec: pv1beta1.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: deplabels,
},
MinAvailable: &one,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "pdb-2",
Labels: ownerLabels,
},
Spec: pv1beta1.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: sslabels,
},
MinAvailable: &one,
},
},
}
deployments := []*appsv1.Deployment{
{
ObjectMeta: metav1.ObjectMeta{
Name: "deployment-1",
Labels: deplabels,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: deplabels,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: deplabels,
},
},
},
},
}
statefulSets := []*appsv1.StatefulSet{
{
ObjectMeta: metav1.ObjectMeta{
Name: "stateful-set-1",
Labels: sslabels,
},
Spec: appsv1.StatefulSetSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: sslabels,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: sslabels,
},
},
},
},
}
namespaces := []*v1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
},
},
}
controller := &PDBController{
Interface: setupMockKubernetes(t, pdbs, deployments, statefulSets, namespaces, nil),
}
err := controller.addPDBs(namespaces[0])
if err != nil {
t.Error(err)
}
for _, pdb := range []string{"pdb-1", "pdb-2"} {
pdbResource, err := controller.Interface.PolicyV1beta1().PodDisruptionBudgets("default").Get(pdb, metav1.GetOptions{})
if err == nil {
t.Fatalf("unexpected pdb (%s) found: %v", pdb, pdbResource)
}
if !errors.IsNotFound(err) {
t.Fatalf("unexpected error: %s", err)
}
}
} | explode_data.jsonl/53283 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1030
} | [
2830,
3393,
13021,
7928,
47,
3506,
82,
1155,
353,
8840,
836,
8,
341,
58351,
500,
780,
82,
1669,
2415,
14032,
30953,
4913,
7975,
788,
330,
82213,
16707,
34472,
16873,
1669,
2415,
14032,
30953,
4913,
7975,
788,
330,
2454,
1262,
746,
16707... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestPushImage(t *testing.T) {
require := require.New(t)
ctx, cleanup := context.BuildContextFixtureWithSampleImage()
defer cleanup()
p, err := PushClientFixture(ctx)
require.NoError(err)
require.NoError(p.Push(testutil.SampleImageTag))
} | explode_data.jsonl/63641 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 86
} | [
2830,
3393,
16644,
1906,
1155,
353,
8840,
836,
8,
341,
17957,
1669,
1373,
7121,
1155,
340,
20985,
11,
21290,
1669,
2266,
25212,
1972,
18930,
2354,
17571,
1906,
741,
16867,
21290,
2822,
3223,
11,
1848,
1669,
22950,
2959,
18930,
7502,
340,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestEnqueueReferringRouteNotEnqueueIfHasNoLatestReady(t *testing.T) {
_, _, _, controller, reconciler, _, _, _, _ := newTestSetup(t)
config := getTestConfiguration()
f := reconciler.EnqueueReferringRoute(controller)
f(config)
// add this item to avoid being blocked by queue.
expected := "queue-has-no-work"
controller.WorkQueue.AddRateLimited(expected)
if k, _ := controller.WorkQueue.Get(); k != expected {
t.Errorf("Expected %v, saw %v", expected, k)
}
} | explode_data.jsonl/3286 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 162
} | [
2830,
3393,
1702,
4584,
3945,
14443,
4899,
2623,
1702,
4584,
2679,
10281,
2753,
31992,
19202,
1155,
353,
8840,
836,
8,
341,
197,
6878,
8358,
8358,
6461,
11,
31445,
5769,
11,
8358,
8358,
8358,
716,
1669,
501,
2271,
21821,
1155,
340,
2587... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFleetAggregatedPlayerStatus(t *testing.T) {
if !runtime.FeatureEnabled(runtime.FeaturePlayerTracking) {
t.SkipNow()
}
t.Parallel()
ctx := context.Background()
client := framework.AgonesClient.AgonesV1()
flt := defaultFleet(framework.Namespace)
flt.Spec.Template.Spec.Players = &agonesv1.PlayersSpec{
InitialCapacity: 10,
}
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt.DeepCopy(), metav1.CreateOptions{})
assert.NoError(t, err)
framework.AssertFleetCondition(t, flt, func(fleet *agonesv1.Fleet) bool {
if fleet.Status.Players == nil {
logrus.WithField("status", fleet.Status).Info("No Players")
return false
}
logrus.WithField("status", fleet.Status).Info("Checking Capacity")
return fleet.Status.Players.Capacity == 30
})
list, err := framework.ListGameServersFromFleet(flt)
assert.NoError(t, err)
// set 3 random capacities, and connect a random number of players
totalCapacity := 0
totalPlayers := 0
for i := range list {
// Do this, otherwise scopelint complains about "using a reference for the variable on range scope"
gs := &list[i]
players := rand.IntnRange(1, 5)
capacity := rand.IntnRange(players, 100)
totalCapacity += capacity
msg := fmt.Sprintf("PLAYER_CAPACITY %d", capacity)
reply, err := e2e.SendGameServerUDP(gs, msg)
if err != nil {
t.Fatalf("Could not message GameServer: %v", err)
}
assert.Equal(t, fmt.Sprintf("ACK: %s\n", msg), reply)
totalPlayers += players
for i := 1; i <= players; i++ {
msg := "PLAYER_CONNECT " + fmt.Sprintf("%d", i)
logrus.WithField("msg", msg).WithField("gs", gs.ObjectMeta.Name).Info("Sending Player Connect")
// retry on failure. Will stop flakiness of UDP packets being sent/received.
err := wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {
reply, err := e2e.SendGameServerUDP(gs, msg)
if err != nil {
logrus.WithError(err).Warn("error with udp packet")
return false, nil
}
assert.Equal(t, fmt.Sprintf("ACK: %s\n", msg), reply)
return true, nil
})
assert.NoError(t, err)
}
}
framework.AssertFleetCondition(t, flt, func(fleet *agonesv1.Fleet) bool {
logrus.WithField("players", fleet.Status.Players).WithField("totalCapacity", totalCapacity).
WithField("totalPlayers", totalPlayers).Info("Checking Capacity")
// since UDP packets might fail, we might get an extra player, so we'll check for that.
return (fleet.Status.Players.Capacity == int64(totalCapacity)) && (fleet.Status.Players.Count >= int64(totalPlayers))
})
} | explode_data.jsonl/15434 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 928
} | [
2830,
3393,
37,
18973,
9042,
93040,
4476,
2522,
1155,
353,
8840,
836,
8,
341,
743,
753,
22255,
58434,
5462,
89467,
58434,
4476,
37119,
8,
341,
197,
3244,
57776,
7039,
741,
197,
532,
3244,
41288,
7957,
741,
20985,
1669,
2266,
19047,
741,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestCreateUserPresenter_FailureCase(t *testing.T) {
presenter, useCase, ctrl := (&CreateUserPresenterTest{}).setup(t)
defer ctrl.Finish()
username, email, password :=
"username",
"user@email.com",
"$2a$10$KtwHGGRiKWRDEq/g/2RAguaqIqU7iJNM11aFeqcwzDhuv9jDY35uW"
useCase.EXPECT().
Execute(&definitions.CreateUserDTO{
Username: username,
Email: email,
Password: password,
}).
Return(nil, &shared.Error{})
// act
result, err := presenter.Handle(&contracts.CreateUserPresenterRequest{
Body: &contracts.CreateUserPresenterRequestBody{
Username: username,
Email: email,
Password: password,
},
})
// assert
assert.Nil(t, result)
assert.Equal(t, err, &shared.Error{})
} | explode_data.jsonl/7600 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 302
} | [
2830,
3393,
4021,
1474,
33849,
1400,
9373,
4207,
1155,
353,
8840,
836,
8,
341,
3223,
74646,
11,
990,
4207,
11,
23743,
1669,
15899,
4021,
1474,
33849,
2271,
6257,
568,
15188,
1155,
340,
16867,
23743,
991,
18176,
741,
72358,
11,
2551,
11,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMeterDBManager(t *testing.T) {
registry := prometheus.NewRegistry()
m := &manager{databases: []*VersionedDatabase{
{
Database: memdb.New(),
Version: version.NewDefaultVersion(2, 0, 0),
},
{
Database: memdb.New(),
Version: version.NewDefaultVersion(1, 5, 0),
},
{
Database: memdb.New(),
Version: version.DefaultVersion1_0_0,
},
}}
// Create meterdb manager with fresh registry and confirm
// that there are no errors registering metrics for multiple
// versioned databases.
manager, err := m.NewMeterDBManager("", registry)
assert.NoError(t, err)
dbs := manager.GetDatabases()
assert.Len(t, dbs, 3)
_, ok := dbs[0].Database.(*meterdb.Database)
assert.True(t, ok)
_, ok = dbs[1].Database.(*meterdb.Database)
assert.False(t, ok)
_, ok = dbs[2].Database.(*meterdb.Database)
assert.False(t, ok)
// Confirm that the error from a name conflict is handled correctly
_, err = m.NewMeterDBManager("", registry)
assert.Error(t, err)
} | explode_data.jsonl/37004 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 371
} | [
2830,
3393,
68224,
3506,
2043,
1155,
353,
8840,
836,
8,
341,
197,
29172,
1669,
2706,
39705,
7121,
15603,
2822,
2109,
1669,
609,
13297,
90,
67,
23822,
25,
29838,
5637,
291,
5988,
515,
197,
197,
515,
298,
197,
5988,
25,
1833,
1999,
7121... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIsInSlice(t *testing.T) {
s := []string{"a", "b", "c"}
if !isInSlice("a", s) {
t.Error("isInSlice error")
return
}
if isInSlice("d", s) {
t.Error("isInSlice error")
}
} | explode_data.jsonl/52169 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 94
} | [
2830,
3393,
3872,
641,
33236,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
3056,
917,
4913,
64,
497,
330,
65,
497,
330,
66,
16707,
743,
753,
285,
641,
33236,
445,
64,
497,
274,
8,
341,
197,
3244,
6141,
445,
285,
641,
33236,
1465,
113... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestIsLessThan1(t *testing.T) {
for _, hey := range lessThanTests {
if !hey.x1.IsLessThan(hey.x2) {
t.Fatalf("%v should be less than %v", hey.x1, hey.x2)
}
if hey.x2.IsLessThan(hey.x1) {
t.Fatalf("%v should not be less than %v", hey.x2, hey.x1)
}
}
} | explode_data.jsonl/39013 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 140
} | [
2830,
3393,
3872,
27451,
26067,
16,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
34209,
1669,
2088,
2686,
26067,
18200,
341,
197,
743,
753,
35561,
1993,
16,
4506,
27451,
26067,
7,
35561,
1993,
17,
8,
341,
298,
3244,
30762,
4430,
85,
1265... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestIndex_LockErr(t *testing.T) {
t.Log("index should return a 503 if unable to list locks")
RegisterMockTestingT(t)
l := mocks.NewMockLocker()
When(l.List()).ThenReturn(nil, errors.New("err"))
s := server.Server{
Locker: l,
}
req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil))
w := httptest.NewRecorder()
s.Index(w, req)
responseContains(t, w, 503, "Could not retrieve locks: err")
} | explode_data.jsonl/15316 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 160
} | [
2830,
3393,
1552,
2351,
1176,
7747,
1155,
353,
8840,
836,
8,
341,
3244,
5247,
445,
1252,
1265,
470,
264,
220,
20,
15,
18,
421,
11889,
311,
1140,
31676,
1138,
79096,
11571,
16451,
51,
1155,
340,
8810,
1669,
68909,
7121,
11571,
87253,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDeviceClassOID_readOID(t *testing.T) {
var snmpClient network.MockSNMPClient
ctx := network.NewContextWithDeviceConnection(context.Background(), &network.RequestDeviceConnection{
SNMP: &network.RequestDeviceConnectionSNMP{
SnmpClient: &snmpClient,
},
})
snmpClient.
On("SNMPWalk", ctx, "1").
Return([]network.SNMPResponse{
network.NewSNMPResponse("1.1", gosnmp.OctetString, "Port 1"),
network.NewSNMPResponse("1.2", gosnmp.OctetString, "Port 2"),
network.NewSNMPResponse("1.3", gosnmp.OctetString, "Port 3"),
network.NewSNMPResponse("1.4", gosnmp.OctetString, ""),
}, nil)
sut := deviceClassOID{
SNMPGetConfiguration: network.SNMPGetConfiguration{
OID: "1",
},
}
expected := map[int]interface{}{
1: value.New("Port 1"),
2: value.New("Port 2"),
3: value.New("Port 3"),
4: value.New(""),
}
res, err := sut.readOID(ctx, nil, false)
if assert.NoError(t, err) {
assert.Equal(t, expected, res)
}
} | explode_data.jsonl/68078 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 401
} | [
2830,
3393,
6985,
1957,
29805,
6443,
29805,
1155,
353,
8840,
836,
8,
341,
2405,
4131,
1307,
2959,
3922,
24664,
18966,
5781,
2959,
198,
20985,
1669,
3922,
7121,
1972,
2354,
6985,
4526,
5378,
19047,
1507,
609,
17511,
9659,
6985,
4526,
515,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_createPackageMap(t *testing.T) {
tests := []struct {
name string
importPath string
wantPackageName string
wantOK bool
}{
{"golang package", "context", "context", true},
{"third party", "golang.org/x/tools/present", "present", true},
//{"modules", "rsc.io/quote/v3", "quote", true},
{"fail", "this/should/not/work", "", false},
}
var importPaths []string
for _, t := range tests {
importPaths = append(importPaths, t.importPath)
}
packages := createPackageMap(importPaths)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotPackageName, gotOk := packages[tt.importPath]
if gotPackageName != tt.wantPackageName {
t.Errorf("createPackageMap() gotPackageName = %v, wantPackageName = %v", gotPackageName, tt.wantPackageName)
}
if gotOk != tt.wantOK {
t.Errorf("createPackageMap() gotOk = %v, wantOK = %v", gotOk, tt.wantOK)
}
})
}
} | explode_data.jsonl/3269 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 383
} | [
2830,
3393,
8657,
13100,
2227,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
310,
914,
198,
197,
21918,
1820,
414,
914,
198,
197,
50780,
65655,
914,
198,
197,
50780,
3925,
688,
1807,
198,
197,
59403,
197,
197... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
manager.dsStore.Add(ds)
addNodes(manager.nodeStore, 0, 5, nil)
addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, ds, 2)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 1)
addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 4, 0)
}
} | explode_data.jsonl/50324 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 327
} | [
2830,
3393,
5877,
89177,
61317,
1806,
4525,
23527,
82,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
8282,
1669,
2088,
2647,
2580,
69388,
368,
341,
197,
83336,
1669,
501,
89177,
1649,
445,
7975,
1138,
197,
83336,
36473,
16689,
19816,
284,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestMapProxy_TryRemoveWithNilKey(t *testing.T) {
_, err := mp.TryRemove(nil, 1, time.Second)
AssertErrorNotNil(t, err, "remove did not return an error for nil key")
mp.Clear()
} | explode_data.jsonl/56960 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 70
} | [
2830,
3393,
2227,
16219,
1139,
884,
13021,
2354,
19064,
1592,
1155,
353,
8840,
836,
8,
341,
197,
6878,
1848,
1669,
10490,
19824,
13021,
27907,
11,
220,
16,
11,
882,
32435,
340,
18017,
1454,
96144,
1155,
11,
1848,
11,
330,
5399,
1521,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestRemovedSysVars(t *testing.T) {
store, clean := realtikvtest.CreateMockStoreAndSetup(t)
defer clean()
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
variable.RegisterSysVar(&variable.SysVar{Scope: variable.ScopeGlobal | variable.ScopeSession, Name: "bogus_var", Value: "acdc"})
result := tk.MustQuery("SHOW GLOBAL VARIABLES LIKE 'bogus_var'")
result.Check(testkit.Rows("bogus_var acdc"))
result = tk.MustQuery("SELECT @@GLOBAL.bogus_var")
result.Check(testkit.Rows("acdc"))
tk.MustExec("SET GLOBAL bogus_var = 'newvalue'")
// unregister
variable.UnregisterSysVar("bogus_var")
result = tk.MustQuery("SHOW GLOBAL VARIABLES LIKE 'bogus_var'")
result.Check(testkit.Rows()) // empty
_, err := tk.Exec("SET GLOBAL bogus_var = 'newvalue'")
require.Equal(t, "[variable:1193]Unknown system variable 'bogus_var'", err.Error())
_, err = tk.Exec("SELECT @@GLOBAL.bogus_var")
require.Equal(t, "[variable:1193]Unknown system variable 'bogus_var'", err.Error())
} | explode_data.jsonl/5746 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 385
} | [
2830,
3393,
42642,
32792,
28305,
1155,
353,
8840,
836,
8,
341,
57279,
11,
4240,
1669,
1931,
83,
1579,
85,
1944,
7251,
11571,
6093,
3036,
21821,
1155,
340,
16867,
4240,
741,
3244,
74,
1669,
1273,
8226,
7121,
2271,
7695,
1155,
11,
3553,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCommandSide_RemoveProjectMember(t *testing.T) {
type fields struct {
eventstore *eventstore.Eventstore
}
type args struct {
ctx context.Context
projectID string
userID string
resourceOwner string
}
type res struct {
want *domain.ObjectDetails
err func(error) bool
}
tests := []struct {
name string
fields fields
args args
res res
}{
{
name: "invalid member projectid missing, error",
fields: fields{
eventstore: eventstoreExpect(
t,
),
},
args: args{
ctx: context.Background(),
projectID: "",
userID: "user1",
resourceOwner: "org1",
},
res: res{
err: caos_errs.IsErrorInvalidArgument,
},
},
{
name: "invalid member userid missing, error",
fields: fields{
eventstore: eventstoreExpect(
t,
),
},
args: args{
ctx: context.Background(),
projectID: "project1",
userID: "",
resourceOwner: "org1",
},
res: res{
err: caos_errs.IsErrorInvalidArgument,
},
},
{
name: "member not existing, nil result",
fields: fields{
eventstore: eventstoreExpect(
t,
expectFilter(),
),
},
args: args{
ctx: context.Background(),
projectID: "project1",
userID: "user1",
resourceOwner: "org1",
},
res: res{
want: nil,
},
},
{
name: "member remove, ok",
fields: fields{
eventstore: eventstoreExpect(
t,
expectFilter(
eventFromEventPusher(
project.NewProjectMemberAddedEvent(context.Background(),
&project.NewAggregate("project1", "org1").Aggregate,
"user1",
[]string{"PROJECT_OWNER"}...,
),
),
),
expectPush(
[]*repository.Event{
eventFromEventPusher(project.NewProjectMemberRemovedEvent(context.Background(),
&project.NewAggregate("project1", "org1").Aggregate,
"user1",
)),
},
uniqueConstraintsFromEventConstraint(member.NewRemoveMemberUniqueConstraint("project1", "user1")),
),
),
},
args: args{
ctx: context.Background(),
projectID: "project1",
userID: "user1",
resourceOwner: "org1",
},
res: res{
want: &domain.ObjectDetails{
ResourceOwner: "org1",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &Commands{
eventstore: tt.fields.eventstore,
}
got, err := r.RemoveProjectMember(tt.args.ctx, tt.args.projectID, tt.args.userID, tt.args.resourceOwner)
if tt.res.err == nil {
assert.NoError(t, err)
}
if tt.res.err != nil && !tt.res.err(err) {
t.Errorf("got wrong err: %v ", err)
}
if tt.res.err == nil {
assert.Equal(t, tt.res.want, got)
}
})
}
} | explode_data.jsonl/41753 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1381
} | [
2830,
3393,
4062,
16384,
66843,
7849,
9366,
1155,
353,
8840,
836,
8,
341,
13158,
5043,
2036,
341,
197,
28302,
4314,
353,
3087,
4314,
6904,
4314,
198,
197,
532,
13158,
2827,
2036,
341,
197,
20985,
1843,
2266,
9328,
198,
197,
72470,
915,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestUpdateServiceAccount(t *testing.T) {
cluster1Endppoints := []*model.IstioEndpoint{
{Address: "10.172.0.1", ServiceAccount: "sa1"},
{Address: "10.172.0.2", ServiceAccount: "sa-vm1"},
}
testCases := []struct {
name string
clusterID string
endpoints []*model.IstioEndpoint
expect bool
}{
{
name: "added new endpoint",
clusterID: "c1",
endpoints: append(cluster1Endppoints, &model.IstioEndpoint{Address: "10.172.0.3", ServiceAccount: "sa1"}),
expect: false,
},
{
name: "added new sa",
clusterID: "c1",
endpoints: append(cluster1Endppoints, &model.IstioEndpoint{Address: "10.172.0.3", ServiceAccount: "sa2"}),
expect: true,
},
{
name: "updated endpoints address",
clusterID: "c1",
endpoints: []*model.IstioEndpoint{
{Address: "10.172.0.5", ServiceAccount: "sa1"},
{Address: "10.172.0.2", ServiceAccount: "sa-vm1"},
},
expect: false,
},
{
name: "deleted one endpoint with unique sa",
clusterID: "c1",
endpoints: []*model.IstioEndpoint{
{Address: "10.172.0.1", ServiceAccount: "sa1"},
},
expect: true,
},
{
name: "deleted one endpoint with duplicate sa",
clusterID: "c1",
endpoints: []*model.IstioEndpoint{
{Address: "10.172.0.2", ServiceAccount: "sa-vm1"},
},
expect: false,
},
{
name: "deleted endpoints",
clusterID: "c1",
endpoints: nil,
expect: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
s := new(xds.DiscoveryServer)
originalEndpointsShard := &xds.EndpointShards{
Shards: map[string][]*model.IstioEndpoint{
"c1": cluster1Endppoints,
"c2": {{Address: "10.244.0.1", ServiceAccount: "sa1"}, {Address: "10.244.0.2", ServiceAccount: "sa-vm2"}},
},
ServiceAccounts: map[string]struct{}{
"sa1": {},
"sa-vm1": {},
"sa-vm2": {},
},
}
originalEndpointsShard.Shards[tc.clusterID] = tc.endpoints
ret := s.UpdateServiceAccount(originalEndpointsShard, "test-svc")
if ret != tc.expect {
t.Errorf("expect UpdateServiceAccount %v, but got %v", tc.expect, ret)
}
})
}
} | explode_data.jsonl/7954 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1027
} | [
2830,
3393,
4289,
1860,
7365,
1155,
353,
8840,
836,
8,
341,
197,
18855,
16,
3727,
602,
14399,
1669,
29838,
2528,
2447,
267,
815,
27380,
515,
197,
197,
90,
4286,
25,
330,
16,
15,
13,
16,
22,
17,
13,
15,
13,
16,
497,
5362,
7365,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestMakePortMappings(t *testing.T) {
port := func(name string, protocol v1.Protocol, containerPort, hostPort int32, ip string) v1.ContainerPort {
return v1.ContainerPort{
Name: name,
Protocol: protocol,
ContainerPort: containerPort,
HostPort: hostPort,
HostIP: ip,
}
}
portMapping := func(name string, protocol v1.Protocol, containerPort, hostPort int, ip string) PortMapping {
return PortMapping{
Name: name,
Protocol: protocol,
ContainerPort: containerPort,
HostPort: hostPort,
HostIP: ip,
}
}
tests := []struct {
container *v1.Container
expectedPortMappings []PortMapping
}{
{
&v1.Container{
Name: "fooContainer",
Ports: []v1.ContainerPort{
port("", v1.ProtocolTCP, 80, 8080, "127.0.0.1"),
port("", v1.ProtocolTCP, 443, 4343, "192.168.0.1"),
port("foo", v1.ProtocolUDP, 555, 5555, ""),
// Duplicated, should be ignored.
port("foo", v1.ProtocolUDP, 888, 8888, ""),
// Duplicated, should be ignored.
port("", v1.ProtocolTCP, 80, 8888, ""),
},
},
[]PortMapping{
portMapping("fooContainer-TCP:80", v1.ProtocolTCP, 80, 8080, "127.0.0.1"),
portMapping("fooContainer-TCP:443", v1.ProtocolTCP, 443, 4343, "192.168.0.1"),
portMapping("fooContainer-foo", v1.ProtocolUDP, 555, 5555, ""),
},
},
}
for i, tt := range tests {
actual := MakePortMappings(tt.container)
assert.Equal(t, tt.expectedPortMappings, actual, "[%d]", i)
}
} | explode_data.jsonl/18704 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 697
} | [
2830,
3393,
8078,
7084,
83421,
1155,
353,
8840,
836,
8,
341,
52257,
1669,
2915,
3153,
914,
11,
11507,
348,
16,
54096,
11,
5476,
7084,
11,
3468,
7084,
526,
18,
17,
11,
5997,
914,
8,
348,
16,
33672,
7084,
341,
197,
853,
348,
16,
336... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAnalyzeConfig(t *testing.T) {
type args struct {
targetOS types.OS
configBlob []byte
disabledAnalyzers []analyzer.Type
}
tests := []struct {
name string
args args
want []types.Package
}{
{
name: "happy path",
args: args{
targetOS: types.OS{
Family: "alpine",
Name: "3.11.6",
},
configBlob: []byte("foo"),
},
want: []types.Package{
{Name: "musl", Version: "1.1.24-r2"},
},
},
{
name: "non-target OS",
args: args{
targetOS: types.OS{
Family: "debian",
Name: "9.2",
},
configBlob: []byte("foo"),
},
},
{
name: "Analyze returns an error",
args: args{
targetOS: types.OS{
Family: "alpine",
Name: "3.11.6",
},
configBlob: []byte("bar"),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
a := analyzer.NewAnalyzer(tt.args.disabledAnalyzers)
got := a.AnalyzeImageConfig(tt.args.targetOS, tt.args.configBlob)
assert.Equal(t, tt.want, got)
})
}
} | explode_data.jsonl/39285 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 522
} | [
2830,
3393,
2082,
55856,
2648,
1155,
353,
8840,
836,
8,
1476,
13158,
2827,
2036,
341,
197,
28861,
3126,
688,
4494,
57054,
198,
197,
25873,
37985,
286,
3056,
3782,
198,
197,
34597,
2312,
73307,
59619,
3056,
276,
27165,
10184,
198,
197,
5... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRulePartitionNotAllowed(t *testing.T) {
common.Log.Debug("Entering function: %s", common.GetFunctionName())
sqls := []string{
`CREATE TABLE trb3 (id INT, name VARCHAR(50), purchased DATE) PARTITION BY RANGE( YEAR(purchased) )
(
PARTITION p0 VALUES LESS THAN (1990),
PARTITION p1 VALUES LESS THAN (1995),
PARTITION p2 VALUES LESS THAN (2000),
PARTITION p3 VALUES LESS THAN (2005)
);`,
`ALTER TABLE t1 ADD PARTITION (PARTITION p3 VALUES LESS THAN (2002));`,
}
for _, sql := range sqls {
q, err := NewQuery4Audit(sql)
if err == nil {
rule := q.RulePartitionNotAllowed()
if rule.Item != "TBL.001" {
t.Error("Rule not match:", rule.Item, "Expect : TBL.001")
}
} else {
t.Error("sqlparser.Parse Error:", err)
}
}
common.Log.Debug("Exiting function: %s", common.GetFunctionName())
} | explode_data.jsonl/76844 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 357
} | [
2830,
3393,
11337,
49978,
97634,
1155,
353,
8840,
836,
8,
341,
83825,
5247,
20345,
445,
82867,
729,
25,
1018,
82,
497,
4185,
2234,
5152,
675,
2398,
30633,
82,
1669,
3056,
917,
515,
197,
197,
63,
22599,
14363,
489,
65,
18,
320,
307,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestToCrossRate(t *testing.T) {
data := []struct {
base sdk.Dec
quote sdk.Dec
expected sdk.Dec
}{
{
base: sdk.NewDec(1600),
quote: sdk.NewDec(100),
expected: sdk.NewDec(16),
},
{
base: sdk.NewDec(0),
quote: sdk.NewDec(100),
expected: sdk.NewDec(16),
},
{
base: sdk.NewDec(1600),
quote: sdk.NewDec(0),
expected: sdk.NewDec(16),
},
}
pbBase := ExchangeRateBallot{}
pbQuote := ExchangeRateBallot{}
cb := ExchangeRateBallot{}
for _, data := range data {
valAddr := sdk.ValAddress(secp256k1.GenPrivKey().PubKey().Address())
if !data.base.IsZero() {
pbBase = append(pbBase, NewVoteForTally(data.base, core.MicroKRWDenom, valAddr, 100))
}
pbQuote = append(pbQuote, NewVoteForTally(data.quote, core.MicroKRWDenom, valAddr, 100))
if !data.base.IsZero() && !data.quote.IsZero() {
cb = append(cb, NewVoteForTally(data.base.Quo(data.quote), core.MicroKRWDenom, valAddr, 100))
} else {
cb = append(cb, NewVoteForTally(sdk.ZeroDec(), core.MicroKRWDenom, valAddr, 0))
}
}
sort.Sort(cb)
baseMapBallot := pbBase.ToMap()
require.Equal(t, cb, pbQuote.ToCrossRateWithSort(baseMapBallot))
} | explode_data.jsonl/38391 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 549
} | [
2830,
3393,
1249,
28501,
11564,
1155,
353,
8840,
836,
8,
341,
8924,
1669,
3056,
1235,
341,
197,
24195,
257,
45402,
22442,
198,
197,
197,
2949,
262,
45402,
22442,
198,
197,
42400,
45402,
22442,
198,
197,
59403,
197,
197,
515,
298,
24195,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestString(t *testing.T) {
testCases := []struct {
name string
broker CeleryBroker
backend CeleryBackend
taskName string
taskFunc interface{}
inA string
inB string
expected string
}{
{
name: "string addition with redis broker/backend",
broker: redisBroker,
backend: redisBackend,
taskName: uuid.Must(uuid.NewV4()).String(),
taskFunc: addStr,
inA: "hello",
inB: "world",
expected: "helloworld",
},
{
name: "string addition with redis broker/backend with connection",
broker: redisBrokerWithConn,
backend: redisBackendWithConn,
taskName: uuid.Must(uuid.NewV4()).String(),
taskFunc: addStr,
inA: "hello",
inB: "world",
expected: "helloworld",
},
{
name: "string addition with amqp broker/backend",
broker: amqpBroker,
backend: amqpBackend,
taskName: uuid.Must(uuid.NewV4()).String(),
taskFunc: addStr,
inA: "hello",
inB: "world",
expected: "helloworld",
},
}
for _, tc := range testCases {
cli, _ := NewCeleryClient(tc.broker, tc.backend, 1)
cli.Register(tc.taskName, tc.taskFunc)
cli.StartWorker()
asyncResult, err := cli.Delay(tc.taskName, tc.inA, tc.inB)
if err != nil {
t.Errorf("test '%s': failed to get result for task %s: %+v", tc.name, tc.taskName, err)
cli.StopWorker()
continue
}
res, err := asyncResult.Get(TIMEOUT)
if err != nil {
t.Errorf("test '%s': failed to get result for task %s: %+v", tc.name, tc.taskName, err)
cli.StopWorker()
continue
}
if tc.expected != res.(string) {
t.Errorf("test '%s': returned result %+v is different from expected result %+v", tc.name, res, tc.expected)
}
cli.StopWorker()
}
} | explode_data.jsonl/77846 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 805
} | [
2830,
3393,
703,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
11609,
257,
914,
198,
197,
2233,
45985,
256,
46543,
722,
65545,
198,
197,
197,
20942,
220,
46543,
722,
29699,
198,
197,
49115,
675,
914,
198,
197... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestImageImmutability(t *testing.T) {
img := mutate.MediaType(empty.Image, types.OCIManifestSchema1)
t.Run("manifest", func(t *testing.T) {
// Check that Manifest is immutable.
changed, err := img.Manifest()
if err != nil {
t.Errorf("Manifest() = %v", err)
}
want := changed.DeepCopy() // Create a copy of original before mutating it.
changed.MediaType = types.DockerManifestList
if got, err := img.Manifest(); err != nil {
t.Errorf("Manifest() = %v", err)
} else if !cmp.Equal(got, want) {
t.Errorf("manifest changed! %s", cmp.Diff(got, want))
}
})
t.Run("config file", func(t *testing.T) {
// Check that ConfigFile is immutable.
changed, err := img.ConfigFile()
if err != nil {
t.Errorf("ConfigFile() = %v", err)
}
want := changed.DeepCopy() // Create a copy of original before mutating it.
changed.Author = "Jay Pegg"
if got, err := img.ConfigFile(); err != nil {
t.Errorf("ConfigFile() = %v", err)
} else if !cmp.Equal(got, want) {
t.Errorf("ConfigFile changed! %s", cmp.Diff(got, want))
}
})
} | explode_data.jsonl/3101 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 418
} | [
2830,
3393,
1906,
1427,
6984,
2897,
1155,
353,
8840,
836,
8,
341,
39162,
1669,
67182,
63714,
24216,
7528,
11,
4494,
13,
63983,
38495,
8632,
16,
692,
3244,
16708,
445,
42315,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
197,
322,
4248... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestGetEventsInRangeWithStreamToken(t *testing.T) {
t.Parallel()
db := MustCreateDatabase(t)
events, _ := SimpleRoom(t, testRoomID, testUserIDA, testUserIDB)
MustWriteEvents(t, db, events)
latest, err := db.SyncPosition(ctx)
if err != nil {
t.Fatalf("failed to get SyncPosition: %s", err)
}
// head towards the beginning of time
to := types.NewStreamToken(0, 0)
// backpaginate 5 messages starting at the latest position.
paginatedEvents, err := db.GetEventsInStreamingRange(ctx, &latest, &to, testRoomID, 5, true)
if err != nil {
t.Fatalf("GetEventsInRange returned an error: %s", err)
}
gots := gomatrixserverlib.HeaderedToClientEvents(db.StreamEventsToEvents(&testUserDeviceA, paginatedEvents), gomatrixserverlib.FormatAll)
assertEventsEqual(t, "", true, gots, reversed(events[len(events)-5:]))
} | explode_data.jsonl/36038 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 289
} | [
2830,
3393,
1949,
7900,
76059,
2354,
3027,
3323,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
20939,
1669,
15465,
4021,
5988,
1155,
340,
90873,
11,
716,
1669,
8993,
14003,
1155,
11,
1273,
14003,
915,
11,
1273,
36899,
32,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestSourceToKafkaBrokerToKnativeService(t *testing.T) {
client := test.SetupClusterAdmin(t)
cleanup := func() {
test.CleanupAll(t, client)
client.Clients.Eventing.EventingV1().Brokers(testNamespace).Delete(context.Background(), kafkaBrokerName, metav1.DeleteOptions{})
client.Clients.Eventing.SourcesV1().PingSources(testNamespace).Delete(context.Background(), pingSourceName, metav1.DeleteOptions{})
client.Clients.Eventing.EventingV1().Triggers(testNamespace).Delete(context.Background(), kafkatriggerName, metav1.DeleteOptions{})
client.Clients.Kube.CoreV1().ConfigMaps(testNamespace).Delete(context.Background(), cmName, metav1.DeleteOptions{})
client.Clients.Kube.CoreV1().Secrets(testNamespace).Delete(context.Background(), tlsSecret, metav1.DeleteOptions{})
client.Clients.Kube.CoreV1().Secrets(testNamespace).Delete(context.Background(), saslSecret, metav1.DeleteOptions{})
removePullSecretFromSA(t, client, testNamespace, serviceAccount, tlsSecret)
removePullSecretFromSA(t, client, testNamespace, serviceAccount, saslSecret)
}
test.CleanupOnInterrupt(t, cleanup)
defer cleanup()
_, err := utils.CopySecret(client.Clients.Kube.CoreV1(), "default", tlsSecret, testNamespace, "default")
if err != nil {
t.Fatalf("Could not copy Secret: %s to test namespace: %s", tlsSecret, testNamespace)
}
_, err = utils.CopySecret(client.Clients.Kube.CoreV1(), "default", saslSecret, testNamespace, "default")
if err != nil {
t.Fatalf("Could not copy Secret: %s to test namespace: %s", saslSecret, testNamespace)
}
ksvc, err := test.WithServiceReady(client, helloWorldService+"-broker", testNamespace, image)
if err != nil {
t.Fatal("Knative Service not ready", err)
}
// Create the configmap
_, err = client.Clients.Kube.CoreV1().ConfigMaps(testNamespace).Create(context.Background(), channelTemplateCM, metav1.CreateOptions{})
if err != nil {
t.Fatal("Unable to create Channel Template ConfigMap: ", err)
}
// Create the (kafka backed) broker
_, err = client.Clients.Eventing.EventingV1().Brokers(testNamespace).Create(context.Background(), broker, metav1.CreateOptions{})
if err != nil {
t.Fatal("Unable to create Kafka Backed Broker: ", err)
}
// Create the Trigger
_, err = client.Clients.Eventing.EventingV1().Triggers(testNamespace).Create(context.Background(), trigger, metav1.CreateOptions{})
if err != nil {
t.Fatal("Unable to create trigger: ", err)
}
// Create the source
_, err = client.Clients.Eventing.SourcesV1().PingSources(testNamespace).Create(context.Background(), brokerps, metav1.CreateOptions{})
if err != nil {
t.Fatal("Unable to create pingsource: ", err)
}
// Wait for text in kservice
servinge2e.WaitForRouteServingText(t, client, ksvc.Status.URL.URL(), helloWorldText)
} | explode_data.jsonl/62773 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 950
} | [
2830,
3393,
3608,
1249,
42,
21883,
65545,
1249,
42,
29738,
1860,
1155,
353,
8840,
836,
8,
341,
25291,
1669,
1273,
39820,
28678,
7210,
1155,
340,
1444,
60639,
1669,
2915,
368,
341,
197,
18185,
727,
60639,
2403,
1155,
11,
2943,
340,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLocalConfig_Defaults_OK(t *testing.T) {
// Arrange
testConfig := `
{
"ServiceDescription" : {
"DisplayName" : "My Service",
"Description" : "My Service Desc"
},
"Services" : [
{
"Path" : "test/path/1"
},
{
"Path" : "test/path/2"
}
]
}`
tmpFile := writeTestConfig(t, testConfig)
defer os.Remove(tmpFile)
vars := config.ReplacementVars{
ServiceName: "MyServiceName",
ServiceRoot: `C:\ProgramFiles\MyService`,
}
// Act
c, err := config.LoadConfig(tmpFile, vars)
// Assert
if err != nil {
t.Errorf("Error loading config: %v", err)
}
if !strings.Contains(c.ServiceConfig.StopFile, ".stop") {
t.Error("Expected default StopFile=.stop")
}
for _, service := range c.Services {
if service.GracefulShutdownTimeoutSecs != 5 {
t.Error("Expected default GracefulShutdownTimeoutSecs=5")
}
}
} | explode_data.jsonl/53029 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 450
} | [
2830,
3393,
7319,
2648,
60336,
82,
8375,
1155,
353,
8840,
836,
8,
341,
197,
322,
40580,
198,
18185,
2648,
1669,
22074,
262,
341,
286,
330,
1860,
5009,
1,
549,
341,
310,
330,
26456,
1,
549,
330,
5050,
5362,
756,
310,
330,
5009,
1,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestEncodeMessage(t *testing.T) {
// A generated message will be encoded using its XXX_Size and XXX_Marshal
// methods
pm := &testprotos.Test{
Foo: proto.String("bar"),
Array: []int32{0, 1, 2, 3},
S: &testprotos.Simple{
Name: proto.String("baz"),
Id: proto.Uint64(12345),
},
M: map[string]int32{
"a": 1,
"b": 2,
"c": 3,
"d": 4,
},
B: []byte{3, 2, 1, 0},
}
// A generated message will be encoded using its MarshalAppend and
// MarshalAppendDeterministic methods
md, err := desc.LoadMessageDescriptorForMessage(pm)
testutil.Ok(t, err)
dm := dynamic.NewMessage(md)
err = dm.ConvertFrom(pm)
testutil.Ok(t, err)
// This custom message will use MarshalDeterministic method or fall back to
// old proto.Marshal implementation for non-deterministic marshaling
cm := (*TestMessage)(pm)
testCases := []struct {
Name string
Msg proto.Message
}{
{Name: "generated", Msg: pm},
{Name: "dynamic", Msg: dm},
{Name: "custom", Msg: cm},
}
dels := []struct {
Name string
Delimited bool
}{
{Name: "not delimited", Delimited: false},
{Name: "delimited", Delimited: true},
}
var bytes []byte
for _, dl := range dels {
t.Run(dl.Name, func(t *testing.T) {
t.Run("deterministic", func(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
var cb codec.Buffer
cb.SetDeterministic(true)
if dl.Delimited {
err := cb.EncodeDelimitedMessage(tc.Msg)
testutil.Ok(t, err)
} else {
err := cb.EncodeMessage(tc.Msg)
testutil.Ok(t, err)
}
b := cb.Bytes()
if bytes == nil {
bytes = b
} else if dl.Delimited {
// delimited writes have varint-encoded length prefix
var lenBuf codec.Buffer
err := lenBuf.EncodeVarint(uint64(len(bytes)))
testutil.Ok(t, err)
testutil.Eq(t, append(lenBuf.Bytes(), bytes...), b)
} else {
// The generated proto message is the benchmark.
// Ensure that the others match its output.
testutil.Eq(t, bytes, b)
}
})
}
})
t.Run("non-deterministic", func(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
var cb codec.Buffer
if dl.Delimited {
err := cb.EncodeDelimitedMessage(tc.Msg)
testutil.Ok(t, err)
} else {
err := cb.EncodeMessage(tc.Msg)
testutil.Ok(t, err)
}
var b []byte
if dl.Delimited {
// delimited writes have varint-encoded length prefix
l, err := cb.DecodeVarint()
testutil.Ok(t, err)
b = cb.Bytes()
testutil.Eq(t, int(l), len(b))
} else {
b = cb.Bytes()
}
// we can't compare byte slices to benchmark since the
// message contains a map and we are not using deterministic
// marshal method; so verify that unmarshaling the bytes
// results in an equal message as the original
var pm2 testprotos.Test
err = proto.Unmarshal(b, &pm2)
testutil.Ok(t, err)
testutil.Require(t, proto.Equal(pm, &pm2))
})
}
})
})
}
} | explode_data.jsonl/38841 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1474
} | [
2830,
3393,
32535,
2052,
1155,
353,
8840,
836,
8,
341,
197,
322,
362,
7907,
1943,
686,
387,
20498,
1667,
1181,
19975,
45553,
323,
19975,
1245,
28423,
198,
197,
322,
5413,
198,
86511,
1669,
609,
1944,
4391,
436,
8787,
515,
197,
12727,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestTracker_pipelinerun_complete(t *testing.T) {
var (
pipelineName = "output-pipeline"
prName = "output-pipeline-1"
ns = "namespace"
task1Name = "output-task-1"
tr1Name = "output-task-1"
tr1Pod = "output-task-1-pod-123456"
task2Name = "output-task-2"
tr2Name = "output-task-2"
tr2Pod = "output-task-2-pod-123456"
allTasks = []string{}
onlyTask1 = []string{task1Name}
)
scenarios := []struct {
name string
tasks []string
expected []trh.Run
}{
{
name: "for all tasks",
tasks: allTasks,
expected: []trh.Run{
{
Name: tr1Name,
Task: task1Name,
}, {
Name: tr2Name,
Task: task2Name,
},
},
},
{
name: "for one task",
tasks: onlyTask1,
expected: []trh.Run{
{
Name: tr1Name,
Task: task1Name,
},
},
},
}
for _, s := range scenarios {
taskruns := []*v1alpha1.TaskRun{
tb.TaskRun(tr1Name, tb.TaskRunNamespace(ns),
tb.TaskRunSpec(
tb.TaskRunTaskRef(task1Name),
),
tb.TaskRunStatus(
tb.PodName(tr1Pod),
),
),
tb.TaskRun(tr2Name, tb.TaskRunNamespace(ns),
tb.TaskRunSpec(
tb.TaskRunTaskRef(task2Name),
),
tb.TaskRunStatus(
tb.PodName(tr2Pod),
),
),
}
initialPR := []*v1alpha1.PipelineRun{
tb.PipelineRun(prName, tb.PipelineRunNamespace(ns),
tb.PipelineRunLabel("tekton.dev/pipeline", prName),
tb.PipelineRunStatus(
tb.PipelineRunStatusCondition(apis.Condition{
Status: corev1.ConditionUnknown,
Reason: resources.ReasonRunning,
}),
tb.PipelineRunTaskRunsStatus(tr1Name, &v1alpha1.PipelineRunTaskRunStatus{
PipelineTaskName: task1Name,
Status: &taskruns[0].Status,
}),
),
),
}
prStatusFn := tb.PipelineRunStatus(
tb.PipelineRunStatusCondition(apis.Condition{
Status: corev1.ConditionTrue,
Reason: resources.ReasonSucceeded,
}),
tb.PipelineRunTaskRunsStatus(tr1Name, &v1alpha1.PipelineRunTaskRunStatus{
PipelineTaskName: task1Name,
Status: &taskruns[0].Status,
}),
tb.PipelineRunTaskRunsStatus(tr2Name, &v1alpha1.PipelineRunTaskRunStatus{
PipelineTaskName: task2Name,
Status: &taskruns[1].Status,
}),
)
pr := &v1alpha1.PipelineRun{}
prStatusFn(pr)
tc := startPipelineRun(t, pipelinetest.Data{PipelineRuns: initialPR, TaskRuns: taskruns}, pr.Status)
tracker := NewTracker(pipelineName, ns, tc)
output := taskRunsFor(s.tasks, tracker)
clitest.AssertOutput(t, s.expected, output)
}
} | explode_data.jsonl/66716 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1288
} | [
2830,
3393,
31133,
620,
81079,
10453,
359,
27675,
1155,
353,
8840,
836,
8,
341,
2405,
2399,
197,
3223,
8790,
675,
284,
330,
3006,
2268,
8790,
698,
197,
25653,
675,
981,
284,
330,
3006,
2268,
8790,
12,
16,
698,
197,
84041,
1843,
284,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGroupCreateIntegration(t *testing.T) {
c := client()
users := []User{
{
UserName: "ok",
FirstName: "ok",
LastName: "ok",
Email: "test@test.com",
ID: "ok",
},
}
gc, err := c.GroupCreate(&Group{
Name: "test-group",
Description: "a test group",
Email: "test@vinyldns.com",
Admins: users,
Members: users,
})
if err != nil {
t.Error(err)
}
gg, err := c.Group(gc.ID)
if err != nil {
t.Error(err)
}
if gg.ID != gc.ID {
t.Error(err)
}
} | explode_data.jsonl/12111 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 270
} | [
2830,
3393,
2808,
4021,
52464,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
2943,
741,
90896,
1669,
3056,
1474,
515,
197,
197,
515,
298,
197,
18856,
25,
220,
330,
562,
756,
298,
197,
26584,
25,
330,
562,
756,
298,
197,
27920,
25,
220,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestRedirectsNoLogin(t *testing.T) {
defer prepareTestEnv(t)()
var redirects = map[string]string{
"/user2/repo1/commits/master": "/user2/repo1/commits/branch/master",
"/user2/repo1/src/master": "/user2/repo1/src/branch/master",
"/user2/repo1/src/master/file.txt": "/user2/repo1/src/branch/master/file.txt",
"/user2/repo1/src/master/directory/file.txt": "/user2/repo1/src/branch/master/directory/file.txt",
"/user/avatar/Ghost/-1": "/assets/img/avatar_default.png",
"/api/v1/swagger": "/api/swagger",
}
for link, redirectLink := range redirects {
req := NewRequest(t, "GET", link)
resp := MakeRequest(t, req, http.StatusFound)
assert.EqualValues(t, path.Join(setting.AppSubURL, redirectLink), test.RedirectURL(resp))
}
} | explode_data.jsonl/76466 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 406
} | [
2830,
3393,
17725,
82,
2753,
6231,
1155,
353,
8840,
836,
8,
341,
16867,
10549,
2271,
14359,
1155,
8,
2822,
2405,
71171,
284,
2415,
14032,
30953,
515,
197,
197,
3115,
872,
17,
10758,
5368,
16,
14,
3621,
1199,
23303,
788,
394,
3521,
872... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestParseConditionMessage(t *testing.T) {
t.Parallel()
result, errs := ParseProgram(`
fun test(n: Int) {
pre {
n >= 0: "n must be positive"
}
return n
}
`)
require.Empty(t, errs)
utils.AssertEqualWithDiff(t,
[]ast.Declaration{
&ast.FunctionDeclaration{
Access: ast.AccessNotSpecified,
Identifier: ast.Identifier{
Identifier: "test",
Pos: ast.Position{Offset: 13, Line: 2, Column: 12},
},
ParameterList: &ast.ParameterList{
Parameters: []*ast.Parameter{
{
Label: "",
Identifier: ast.Identifier{Identifier: "n",
Pos: ast.Position{Offset: 18, Line: 2, Column: 17},
},
TypeAnnotation: &ast.TypeAnnotation{
IsResource: false,
Type: &ast.NominalType{
Identifier: ast.Identifier{
Identifier: "Int",
Pos: ast.Position{Offset: 21, Line: 2, Column: 20},
},
},
StartPos: ast.Position{Offset: 21, Line: 2, Column: 20},
},
Range: ast.Range{
StartPos: ast.Position{Offset: 18, Line: 2, Column: 17},
EndPos: ast.Position{Offset: 23, Line: 2, Column: 22},
},
},
},
Range: ast.Range{
StartPos: ast.Position{Offset: 17, Line: 2, Column: 16},
EndPos: ast.Position{Offset: 24, Line: 2, Column: 23},
},
},
ReturnTypeAnnotation: &ast.TypeAnnotation{
IsResource: false,
Type: &ast.NominalType{
Identifier: ast.Identifier{
Identifier: "",
Pos: ast.Position{Offset: 24, Line: 2, Column: 23},
},
},
StartPos: ast.Position{Offset: 24, Line: 2, Column: 23},
},
FunctionBlock: &ast.FunctionBlock{
Block: &ast.Block{
Statements: []ast.Statement{
&ast.ReturnStatement{
Expression: &ast.IdentifierExpression{
Identifier: ast.Identifier{
Identifier: "n",
Pos: ast.Position{Offset: 124, Line: 6, Column: 19},
},
},
Range: ast.Range{
StartPos: ast.Position{Offset: 117, Line: 6, Column: 12},
EndPos: ast.Position{Offset: 124, Line: 6, Column: 19},
},
},
},
Range: ast.Range{
StartPos: ast.Position{Offset: 26, Line: 2, Column: 25},
EndPos: ast.Position{Offset: 134, Line: 7, Column: 8},
},
},
PreConditions: &ast.Conditions{
{
Kind: ast.ConditionKindPre,
Test: &ast.BinaryExpression{
Operation: ast.OperationGreaterEqual,
Left: &ast.IdentifierExpression{
Identifier: ast.Identifier{
Identifier: "n",
Pos: ast.Position{Offset: 62, Line: 4, Column: 16},
},
},
Right: &ast.IntegerExpression{
Value: new(big.Int),
Base: 10,
Range: ast.Range{
StartPos: ast.Position{Offset: 67, Line: 4, Column: 21},
EndPos: ast.Position{Offset: 67, Line: 4, Column: 21},
},
},
},
Message: &ast.StringExpression{
Value: "n must be positive",
Range: ast.Range{
StartPos: ast.Position{Offset: 70, Line: 4, Column: 24},
EndPos: ast.Position{Offset: 89, Line: 4, Column: 43},
},
},
},
},
PostConditions: nil,
},
StartPos: ast.Position{Offset: 9, Line: 2, Column: 8},
},
},
result.Declarations(),
)
} | explode_data.jsonl/35977 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1770
} | [
2830,
3393,
14463,
10547,
2052,
1155,
353,
8840,
836,
8,
1476,
3244,
41288,
7957,
2822,
9559,
11,
70817,
1669,
14775,
10690,
61528,
286,
2464,
1273,
1445,
25,
1333,
8,
341,
310,
855,
341,
394,
308,
2604,
220,
15,
25,
330,
77,
1969,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTargetAggregatorsPerCommittee(t *testing.T) {
tests := []struct {
name string
}{
{
name: "Good",
},
}
service, err := prysmgrpc.New(context.Background(),
prysmgrpc.WithAddress(os.Getenv("PRYSMGRPC_ADDRESS")),
prysmgrpc.WithTimeout(timeout),
)
require.NoError(t, err)
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
targetAggregatorsPerCommittee, err := service.TargetAggregatorsPerCommittee(context.Background())
require.NoError(t, err)
require.NotNil(t, targetAggregatorsPerCommittee)
})
}
} | explode_data.jsonl/4615 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 226
} | [
2830,
3393,
6397,
9042,
7998,
2973,
3889,
33441,
6547,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
59403,
197,
197,
515,
298,
11609,
25,
330,
15216,
756,
197,
197,
1583,
197,
630,
52934,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestControllerGetCapabilities(t *testing.T) {
d, _ := NewFakeDriver(t)
capType := &csi.ControllerServiceCapability_Rpc{
Rpc: &csi.ControllerServiceCapability_RPC{
Type: csi.ControllerServiceCapability_RPC_UNKNOWN,
},
}
capList := []*csi.ControllerServiceCapability{{
Type: capType,
}}
d.Cap = capList
// Test valid request
req := csi.ControllerGetCapabilitiesRequest{}
resp, err := d.ControllerGetCapabilities(context.Background(), &req)
assert.NotNil(t, resp)
assert.Equal(t, resp.Capabilities[0].GetType(), capType)
assert.NoError(t, err)
} | explode_data.jsonl/59386 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 209
} | [
2830,
3393,
2051,
1949,
55315,
1155,
353,
8840,
836,
8,
341,
2698,
11,
716,
1669,
1532,
52317,
11349,
1155,
340,
1444,
391,
929,
1669,
609,
63229,
29112,
1860,
63746,
2568,
3992,
515,
197,
11143,
3992,
25,
609,
63229,
29112,
1860,
63746... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestH12_Head_ExplicitLen(t *testing.T) {
h12Compare{
ReqFunc: (*Client).Head,
Handler: func(w ResponseWriter, r *Request) {
if r.Method != "HEAD" {
t.Errorf("unexpected method %q", r.Method)
}
w.Header().Set("Content-Length", "1235")
},
}.run(t)
} | explode_data.jsonl/4747 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 122
} | [
2830,
3393,
39,
16,
17,
62,
12346,
62,
98923,
11271,
1155,
353,
8840,
836,
8,
341,
9598,
16,
17,
27374,
515,
197,
197,
27234,
9626,
25,
4609,
2959,
568,
12346,
345,
197,
197,
3050,
25,
2915,
3622,
5949,
6492,
11,
435,
353,
1900,
8... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestServer_Push_RejectIfDisabled(t *testing.T) {
testServer_Push_RejectSingleRequest(t,
func(p http.Pusher, r *http.Request) error {
if got, want := p.Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want {
return fmt.Errorf("Push()=%v, want %v", got, want)
}
return nil
},
Setting{SettingEnablePush, 0})
} | explode_data.jsonl/1971 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 144
} | [
2830,
3393,
5475,
1088,
1116,
50693,
583,
2679,
25907,
1155,
353,
8840,
836,
8,
341,
18185,
5475,
1088,
1116,
50693,
583,
10888,
1900,
1155,
345,
197,
29244,
1295,
1758,
34981,
261,
11,
435,
353,
1254,
9659,
8,
1465,
341,
298,
743,
26... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestInvalidAddition(t *testing.T) {
tev, fl := initialize(t)
defer tev.tearDown()
// Append block with invalid number
{
block := ledger.CreateNextBlock(fl, []*cb.Envelope{{Payload: []byte("My Data")}})
block.Header.Number++
assert.Error(t, fl.Append(block), "Addition of block with invalid number should fail")
}
// Append block with invalid previousHash
{
block := ledger.CreateNextBlock(fl, []*cb.Envelope{{Payload: []byte("My Data")}})
block.Header.PreviousHash = nil
assert.Error(t, fl.Append(block), "Addition of block with invalid previousHash should fail")
}
} | explode_data.jsonl/35034 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 207
} | [
2830,
3393,
7928,
2212,
680,
1155,
353,
8840,
836,
8,
341,
197,
665,
85,
11,
1320,
1669,
9468,
1155,
340,
16867,
1013,
85,
31853,
59342,
2822,
197,
322,
29807,
2504,
448,
8318,
1372,
198,
197,
515,
197,
47996,
1669,
46933,
7251,
5847,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIsBanned(t *testing.T) {
bannedSamples := []string{"2.144.0.0:0", "2.147.100.100:0", "2.147.255.255:0", "5.112.0.0:0", "5.127.255.255:0"}
cleanSamples := []string{"1.1.1.1:0", "73.231.0.156:0", "104.197.111.48:0"}
for _, ip := range bannedSamples {
if !isBanned(ip) {
t.Errorf("IP %s was not banned\n", ip)
}
}
for _, ip := range cleanSamples {
if isBanned(ip) {
t.Errorf("IP %s was banned\n", ip)
}
}
} | explode_data.jsonl/62197 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 220
} | [
2830,
3393,
3872,
33,
7295,
1155,
353,
8840,
836,
8,
341,
2233,
7295,
39571,
1669,
3056,
917,
4913,
17,
13,
16,
19,
19,
13,
15,
13,
15,
25,
15,
497,
330,
17,
13,
16,
19,
22,
13,
16,
15,
15,
13,
16,
15,
15,
25,
15,
497,
330... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestQuerySelect(t *testing.T) {
tb := testutil.NewTB(t)
storetestutil.RunSeriesInterestingCases(tb, 200e3, 200e3, func(t testutil.TB, samplesPerSeries, series int) {
benchQuerySelect(t, samplesPerSeries, series, true)
})
} | explode_data.jsonl/7542 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 87
} | [
2830,
3393,
2859,
3379,
1155,
353,
8840,
836,
8,
341,
62842,
1669,
1273,
1314,
7121,
31160,
1155,
340,
57279,
1944,
1314,
16708,
25544,
84315,
37302,
61414,
11,
220,
17,
15,
15,
68,
18,
11,
220,
17,
15,
15,
68,
18,
11,
2915,
1155,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEmptyCredGroupsDisableSetgroups(t *testing.T) {
cmd := whoamiCmd(t, os.Getuid(), os.Getgid(), false)
cmd.SysProcAttr.Credential = &syscall.Credential{}
if err := cmd.Run(); err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/36117 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 92
} | [
2830,
3393,
3522,
34,
1151,
22173,
25479,
1649,
16753,
1155,
353,
8840,
836,
8,
341,
25920,
1669,
879,
10606,
15613,
1155,
11,
2643,
2234,
2423,
1507,
2643,
2234,
34849,
1507,
895,
340,
25920,
59418,
24508,
13371,
727,
30320,
284,
609,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestHstore(t *testing.T) {
db := openTestConn(t)
defer db.Close()
// quitely create hstore if it doesn't exist
_, err := db.Exec("CREATE EXTENSION IF NOT EXISTS hstore")
if err != nil {
t.Skipf("Skipping hstore tests - hstore extension create failed: %s", err.Error())
}
hs := Hstore{}
// test for null-valued hstores
err = db.QueryRow("SELECT NULL::hstore").Scan(&hs)
if err != nil {
t.Fatal(err)
}
if hs.Map != nil {
t.Fatalf("expected null map")
}
err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs)
if err != nil {
t.Fatalf("re-query null map failed: %s", err.Error())
}
if hs.Map != nil {
t.Fatalf("expected null map")
}
// test for empty hstores
err = db.QueryRow("SELECT ''::hstore").Scan(&hs)
if err != nil {
t.Fatal(err)
}
if hs.Map == nil {
t.Fatalf("expected empty map, got null map")
}
if len(hs.Map) != 0 {
t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map))
}
err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs)
if err != nil {
t.Fatalf("re-query empty map failed: %s", err.Error())
}
if hs.Map == nil {
t.Fatalf("expected empty map, got null map")
}
if len(hs.Map) != 0 {
t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map))
}
// a few example maps to test out
hsOnePair := Hstore{
Map: map[string]sql.NullString{
"key1": {String: "value1", Valid: true},
},
}
hsThreePairs := Hstore{
Map: map[string]sql.NullString{
"key1": {String: "value1", Valid: true},
"key2": {String: "value2", Valid: true},
"key3": {String: "value3", Valid: true},
},
}
hsSmorgasbord := Hstore{
Map: map[string]sql.NullString{
"nullstring": {String: "NULL", Valid: true},
"actuallynull": {String: "", Valid: false},
"NULL": {String: "NULL string key", Valid: true},
"withbracket": {String: "value>42", Valid: true},
"withequal": {String: "value=42", Valid: true},
`"withquotes1"`: {String: `this "should" be fine`, Valid: true},
`"withquotes"2"`: {String: `this "should\" also be fine`, Valid: true},
"embedded1": {String: "value1=>x1", Valid: true},
"embedded2": {String: `"value2"=>x2`, Valid: true},
"withnewlines": {String: "\n\nvalue\t=>2", Valid: true},
"<<all sorts of crazy>>": {String: `this, "should,\" also, => be fine`, Valid: true},
},
}
// test encoding in query params, then decoding during Scan
testBidirectional := func(h Hstore) {
err = db.QueryRow("SELECT $1::hstore", h).Scan(&hs)
if err != nil {
t.Fatalf("re-query %d-pair map failed: %s", len(h.Map), err.Error())
}
if hs.Map == nil {
t.Fatalf("expected %d-pair map, got null map", len(h.Map))
}
if len(hs.Map) != len(h.Map) {
t.Fatalf("expected %d-pair map, got len(map)=%d", len(h.Map), len(hs.Map))
}
for key, val := range hs.Map {
otherval, found := h.Map[key]
if !found {
t.Fatalf(" key '%v' not found in %d-pair map", key, len(h.Map))
}
if otherval.Valid != val.Valid {
t.Fatalf(" value %v <> %v in %d-pair map", otherval, val, len(h.Map))
}
if otherval.String != val.String {
t.Fatalf(" value '%v' <> '%v' in %d-pair map", otherval.String, val.String, len(h.Map))
}
}
}
testBidirectional(hsOnePair)
testBidirectional(hsThreePairs)
testBidirectional(hsSmorgasbord)
} | explode_data.jsonl/23327 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1503
} | [
2830,
3393,
39,
4314,
1155,
353,
8840,
836,
8,
341,
20939,
1669,
1787,
2271,
9701,
1155,
340,
16867,
2927,
10421,
2822,
197,
322,
16835,
974,
1855,
305,
4314,
421,
432,
3171,
944,
3000,
198,
197,
6878,
1848,
1669,
2927,
30798,
445,
22... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestGetRecentTrades(t *testing.T) {
t.Parallel()
currencyPair, err := currency.NewPairFromString("BTC-USDT")
if err != nil {
t.Fatal(err)
}
_, err = o.GetRecentTrades(currencyPair, asset.Spot)
if err != nil {
t.Error(err)
}
} | explode_data.jsonl/30211 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 104
} | [
2830,
3393,
1949,
25140,
1282,
3452,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
1444,
5088,
12443,
11,
1848,
1669,
11413,
7121,
12443,
44491,
445,
59118,
32340,
10599,
1138,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestRevertOneliner(t *testing.T) {
tcases := []struct {
in, exp string
cmdResult interface{}
}{
{in: "create instanceprofile name=stuff", exp: "delete instanceprofile name=stuff"},
{in: "delete instanceprofile name=stuff", exp: "create instanceprofile name=stuff"},
{in: "create appscalingtarget dimension=dim max-capacity=10 min-capacity=4 resource=res role=role service-namespace=ecs", exp: "delete appscalingtarget dimension=dim resource=res service-namespace=ecs"},
{in: "create appscalingpolicy dimension=my-dim name=my-name resource=my-res service-namespace=my-ns stepscaling-adjustment-type=my-sat stepscaling-adjustments=[0:0.25:-1,0.25:0.75:0,0.75::+1] type=my-type", exp: "delete appscalingpolicy dimension=my-dim name=my-name resource=my-res service-namespace=my-ns"},
{in: "attach containertask image=toto memory-hard-limit=64 container-name=test-container name=test-service", exp: "detach containertask container-name=test-container name=test-service"},
{in: "update securitygroup cidr=0.0.0.0/0 id=sg-12345 inbound=authorize portrange=443 protocol=tcp", exp: "update securitygroup cidr=0.0.0.0/0 id=sg-12345 inbound=revoke portrange=443 protocol=tcp"},
{in: "update securitygroup cidr=0.0.0.0/0 id=sg-12345 outbound=revoke portrange=443 protocol=tcp", exp: "update securitygroup cidr=0.0.0.0/0 id=sg-12345 outbound=authorize portrange=443 protocol=tcp"},
{in: "attach mfadevice id=my-mfa-device-id user=toto mfa-code-1=1234 mfa-code-2=2345", exp: "detach mfadevice id=my-mfa-device-id user=toto"},
{in: "detach mfadevice id=my-mfa-device-id user=toto", exp: "attach mfadevice id=my-mfa-device-id user=toto"},
{in: "stop instance ids=inst-id-1", exp: "check instance id=inst-id-1 state=stopped timeout=180\nstart instance ids=inst-id-1", cmdResult: "inst-id-1"},
{in: "start instance ids=inst-id-1", exp: "check instance id=inst-id-1 state=running timeout=180\nstop instance ids=inst-id-1", cmdResult: "inst-id-1"},
{in: "stop instance ids=inst-id-1,inst-id-2", exp: "check instance id=inst-id-1 state=stopped timeout=180\ncheck instance id=inst-id-2 state=stopped timeout=180\nstart instance ids=[inst-id-1,inst-id-2]", cmdResult: "inst-id-1"},
{in: "start instance ids=inst-id-1,inst-id-2", exp: "check instance id=inst-id-1 state=running timeout=180\ncheck instance id=inst-id-2 state=running timeout=180\nstop instance ids=[inst-id-1,inst-id-2]", cmdResult: "inst-id-1"},
{in: "stop database id=my-db-id", exp: "start database id=my-db-id"},
{in: "start database id=my-db-id", exp: "stop database id=my-db-id"},
{in: "create instanceprofile name='my funny name with spaces'", exp: "delete instanceprofile name='my funny name with spaces'"},
{in: "create appscalingtarget dimension=dim max-capacity=10 min-capacity=4 resource=['one res','two','three','4', 5, '4.3', 5.1] role=role service-namespace=ecs", exp: "delete appscalingtarget dimension=dim resource=['one res',two,three,'4',5,'4.3',5.1] service-namespace=ecs"},
{in: "create classicloadbalancer name=my-classic-loadb", exp: "delete classicloadbalancer name=my-classic-loadb", cmdResult: "my-classic-loadb"},
}
for _, tcase := range tcases {
parsed := MustParse(tcase.in)
if tcase.cmdResult != nil {
parsed.CommandNodesIterator()[0].CmdResult = tcase.cmdResult
}
reverted, err := parsed.Revert()
if err != nil {
t.Fatalf("case '%s': %s", tcase.in, err)
}
if got, want := reverted.String(), tcase.exp; got != want {
t.Fatalf("got\n%q\n\nwant\n%q\n", got, want)
}
}
} | explode_data.jsonl/39585 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1333
} | [
2830,
3393,
693,
1621,
1925,
301,
10453,
1155,
353,
8840,
836,
8,
341,
3244,
23910,
1669,
3056,
1235,
341,
197,
17430,
11,
1343,
256,
914,
198,
197,
25920,
2077,
3749,
16094,
197,
59403,
197,
197,
90,
258,
25,
330,
3182,
2867,
5365,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestVersion(t *testing.T) {
// First, create a new server and connection.
serverSocket, clientSocket, err := unet.SocketPair(false)
if err != nil {
t.Fatalf("socketpair got err %v expected nil", err)
}
defer clientSocket.Close()
// Create a new server and client.
s := NewServer(nil)
go s.Handle(serverSocket)
// NewClient does a Tversion exchange, so this is our test for success.
c, err := NewClient(clientSocket, 1024*1024 /* 1M message size */, HighestVersionString())
if err != nil {
t.Fatalf("got %v, expected nil", err)
}
// Check a bogus version string.
if err := c.sendRecv(&Tversion{Version: "notokay", MSize: 1024 * 1024}, &Rversion{}); err != syscall.EINVAL {
t.Errorf("got %v expected %v", err, syscall.EINVAL)
}
// Check a bogus version number.
if err := c.sendRecv(&Tversion{Version: "9P1000.L", MSize: 1024 * 1024}, &Rversion{}); err != syscall.EINVAL {
t.Errorf("got %v expected %v", err, syscall.EINVAL)
}
// Check a too high version number.
if err := c.sendRecv(&Tversion{Version: versionString(highestSupportedVersion + 1), MSize: 1024 * 1024}, &Rversion{}); err != syscall.EAGAIN {
t.Errorf("got %v expected %v", err, syscall.EAGAIN)
}
// Check an invalid MSize.
if err := c.sendRecv(&Tversion{Version: versionString(highestSupportedVersion), MSize: 0}, &Rversion{}); err != syscall.EINVAL {
t.Errorf("got %v expected %v", err, syscall.EINVAL)
}
} | explode_data.jsonl/2854 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 510
} | [
2830,
3393,
5637,
1155,
353,
8840,
836,
8,
341,
197,
322,
5512,
11,
1855,
264,
501,
3538,
323,
3633,
624,
41057,
10286,
11,
2943,
10286,
11,
1848,
1669,
650,
295,
52089,
12443,
3576,
340,
743,
1848,
961,
2092,
341,
197,
3244,
30762,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestMissingLoggerFromContext(t *testing.T) {
notExpected := NopLogger()
ctx := context.Background()
actual := From(ctx)
assert.True(t, actual != notExpected, "unexpected logger instance from context")
assert.NotNil(t, actual, "expected non-nil logger")
} | explode_data.jsonl/66894 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 85
} | [
2830,
3393,
25080,
7395,
3830,
1972,
1155,
353,
8840,
836,
8,
341,
97266,
18896,
1669,
451,
453,
7395,
741,
20985,
1669,
2266,
19047,
741,
88814,
1669,
5542,
7502,
340,
6948,
32443,
1155,
11,
5042,
961,
537,
18896,
11,
330,
53859,
5925,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestCollectionParsing(t *testing.T) {
ccp, ccpBytes, err := getCollectionConfigFromBytes([]byte(sampleCollectionConfigGood))
assert.NoError(t, err)
assert.NotNil(t, ccp)
assert.NotNil(t, ccpBytes)
conf := ccp.Config[0].GetStaticCollectionConfig()
pol, _ := policydsl.FromString("OR('A.member', 'B.member')")
assert.Equal(t, 3, int(conf.RequiredPeerCount))
assert.Equal(t, 483279847, int(conf.MaximumPeerCount))
assert.Equal(t, "foo", conf.Name)
assert.True(t, proto.Equal(pol, conf.MemberOrgsPolicy.GetSignaturePolicy()))
assert.Equal(t, 10, int(conf.BlockToLive))
assert.Equal(t, true, conf.MemberOnlyRead)
assert.Nil(t, conf.EndorsementPolicy)
t.Logf("conf=%s", conf)
// Test default values for RequiredPeerCount and MaxPeerCount
ccp, ccpBytes, err = getCollectionConfigFromBytes([]byte(sampleCollectionConfigGoodNoMaxPeerCountOrRequiredPeerCount))
assert.NoError(t, err)
assert.NotNil(t, ccp)
assert.NotNil(t, ccpBytes)
conf = ccp.Config[0].GetStaticCollectionConfig()
pol, _ = policydsl.FromString("OR('A.member', 'B.member')")
assert.Equal(t, 0, int(conf.RequiredPeerCount))
assert.Equal(t, 1, int(conf.MaximumPeerCount))
assert.Equal(t, "foo", conf.Name)
assert.True(t, proto.Equal(pol, conf.MemberOrgsPolicy.GetSignaturePolicy()))
assert.Equal(t, 10, int(conf.BlockToLive))
assert.Equal(t, true, conf.MemberOnlyRead)
assert.Nil(t, conf.EndorsementPolicy)
t.Logf("conf=%s", conf)
ccp, ccpBytes, err = getCollectionConfigFromBytes([]byte(sampleCollectionConfigGoodWithSignaturePolicy))
assert.NoError(t, err)
assert.NotNil(t, ccp)
assert.NotNil(t, ccpBytes)
conf = ccp.Config[0].GetStaticCollectionConfig()
pol, _ = policydsl.FromString("OR('A.member', 'B.member')")
assert.Equal(t, 3, int(conf.RequiredPeerCount))
assert.Equal(t, 483279847, int(conf.MaximumPeerCount))
assert.Equal(t, "foo", conf.Name)
assert.True(t, proto.Equal(pol, conf.MemberOrgsPolicy.GetSignaturePolicy()))
assert.Equal(t, 10, int(conf.BlockToLive))
assert.Equal(t, true, conf.MemberOnlyRead)
assert.True(t, proto.Equal(pol, conf.EndorsementPolicy.GetSignaturePolicy()))
t.Logf("conf=%s", conf)
ccp, ccpBytes, err = getCollectionConfigFromBytes([]byte(sampleCollectionConfigGoodWithChannelConfigPolicy))
assert.NoError(t, err)
assert.NotNil(t, ccp)
assert.NotNil(t, ccpBytes)
conf = ccp.Config[0].GetStaticCollectionConfig()
pol, _ = policydsl.FromString("OR('A.member', 'B.member')")
assert.Equal(t, 3, int(conf.RequiredPeerCount))
assert.Equal(t, 483279847, int(conf.MaximumPeerCount))
assert.Equal(t, "foo", conf.Name)
assert.True(t, proto.Equal(pol, conf.MemberOrgsPolicy.GetSignaturePolicy()))
assert.Equal(t, 10, int(conf.BlockToLive))
assert.Equal(t, true, conf.MemberOnlyRead)
assert.Equal(t, "/Channel/Application/Endorsement", conf.EndorsementPolicy.GetChannelConfigPolicyReference())
t.Logf("conf=%s", conf)
failureTests := []struct {
name string
collectionConfig string
expectedErr string
}{
{
name: "Invalid member orgs policy",
collectionConfig: sampleCollectionConfigBad,
expectedErr: "invalid policy barf: unrecognized token 'barf' in policy string",
},
{
name: "Invalid collection config",
collectionConfig: "barf",
expectedErr: "could not parse the collection configuration: invalid character 'b' looking for beginning of value",
},
{
name: "Invalid signature policy",
collectionConfig: sampleCollectionConfigBadInvalidSignaturePolicy,
expectedErr: `invalid endorsement policy [&chaincode.endorsementPolicy{ChannelConfigPolicy:"", SignaturePolicy:"invalid"}]: invalid signature policy: invalid`,
},
{
name: "Signature policy and channel config policy both specified",
collectionConfig: sampleCollectionConfigBadSignaturePolicyAndChannelConfigPolicy,
expectedErr: `invalid endorsement policy [&chaincode.endorsementPolicy{ChannelConfigPolicy:"/Channel/Application/Endorsement", SignaturePolicy:"OR('A.member', 'B.member')"}]: cannot specify both "--signature-policy" and "--channel-config-policy"`,
},
}
for _, test := range failureTests {
t.Run(test.name, func(t *testing.T) {
ccp, ccpBytes, err = getCollectionConfigFromBytes([]byte(test.collectionConfig))
assert.EqualError(t, err, test.expectedErr)
assert.Nil(t, ccp)
assert.Nil(t, ccpBytes)
})
}
} | explode_data.jsonl/46374 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1642
} | [
2830,
3393,
6482,
68839,
1155,
353,
8840,
836,
8,
341,
63517,
79,
11,
98897,
7078,
11,
1848,
1669,
633,
6482,
2648,
3830,
7078,
10556,
3782,
32968,
6482,
2648,
15216,
1171,
6948,
35699,
1155,
11,
1848,
340,
6948,
93882,
1155,
11,
98897,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTransfomersImageDefaultConfig(t *testing.T) {
th := kusttest_test.MakeHarness(t)
makeTransfomersImageBase(th)
m := th.Run("/app/base", th.MakeDefaultOptions())
th.AssertActualEqualsExpected(m, `
apiVersion: v1
group: apps
kind: Deployment
metadata:
name: deploy1
spec:
template:
spec:
containers:
- image: nginx:v2
name: ngnix
- image: foobar@sha256:24a0c4b4
name: repliaced-with-digest
- image: my-postgres:v3
name: postgresdb
initContainers:
- image: my-nginx:previous
name: nginx2
- image: myprivaterepohostname:1234/my/cool-alpine:1.8.0
name: init-alpine
---
kind: randomKind
metadata:
name: random
spec:
template:
spec:
containers:
- image: nginx:v2
name: ngnix1
spec2:
template:
spec:
containers:
- image: nginx:v2
name: nginx3
- image: my-nginx:previous
name: nginx4
spec3:
template:
spec:
initContainers:
- image: my-postgres:v3
name: postgresdb
- image: my-docker@sha256:25a0d4b4
name: init-docker
- image: myprivaterepohostname:1234/my/image:v1.0.1
name: myImage
- image: myprivaterepohostname:1234/my/image:v1.0.1
name: myImage2
- image: my-app-image:v1
name: my-app
- image: my-cool-app:latest
name: my-cool-app
`)
} | explode_data.jsonl/49821 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 683
} | [
2830,
3393,
3167,
69,
68538,
1906,
3675,
2648,
1155,
353,
8840,
836,
8,
341,
70479,
1669,
595,
590,
1944,
4452,
50133,
74248,
1155,
340,
77438,
3167,
69,
68538,
1906,
3978,
24365,
340,
2109,
1669,
270,
16708,
4283,
676,
26090,
497,
270,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAutomaticHTTP2_ListenAndServe_GetCertificate(t *testing.T) {
cert, err := tls.X509KeyPair(internal.LocalhostCert, internal.LocalhostKey)
if err != nil {
t.Fatal(err)
}
testAutomaticHTTP2_ListenAndServe(t, &tls.Config{
GetCertificate: func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
return &cert, nil
},
})
} | explode_data.jsonl/22413 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 138
} | [
2830,
3393,
62790,
9230,
17,
27104,
268,
96059,
13614,
33202,
1155,
353,
8840,
836,
8,
341,
1444,
529,
11,
1848,
1669,
55026,
4338,
20,
15,
24,
1592,
12443,
98364,
20856,
3790,
36934,
11,
5306,
20856,
3790,
1592,
340,
743,
1848,
961,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGroupUint(t *testing.T) {
v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}}
grouped := v.GroupUint(func(i int, val uint) string {
return fmt.Sprintf("%v", i%2 == 0)
}).data.(map[string][]uint)
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
} | explode_data.jsonl/23461 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 159
} | [
2830,
3393,
2808,
21570,
1155,
353,
8840,
836,
8,
1476,
5195,
1669,
609,
1130,
90,
691,
25,
3056,
2496,
90,
2496,
7,
16,
701,
2622,
7,
16,
701,
2622,
7,
16,
701,
2622,
7,
16,
701,
2622,
7,
16,
701,
2622,
7,
16,
9139,
630,
4426... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBoundedReadWriteSeeker_Seek__DynamicFileSize__SeekEnd_Negative(t *testing.T) {
sb := NewSeekableBuffer()
// Add twenty bytes.
_, err := sb.Write([]byte("01234567890123456789"))
log.PanicIf(err)
_, err = sb.Seek(0, os.SEEK_SET)
log.PanicIf(err)
brws, err := NewBoundedReadWriteSeeker(sb, 5, 0)
log.PanicIf(err)
offsetRaw, err := brws.Seek(-10, os.SEEK_END)
log.PanicIf(err)
if offsetRaw != 15-10 {
t.Fatalf("Relative offset not correct: (%d)", offsetRaw)
}
realOffsetRaw := GetOffset(sb)
if realOffsetRaw != 5+15-10 {
t.Fatalf("Real offset not correct: (%d)", realOffsetRaw)
}
} | explode_data.jsonl/21770 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 258
} | [
2830,
3393,
33,
13082,
58610,
39350,
261,
26920,
1225,
563,
21752,
67649,
563,
39350,
3727,
1604,
15060,
1155,
353,
8840,
836,
8,
341,
24842,
1669,
1532,
39350,
480,
4095,
2822,
197,
322,
2691,
17073,
5820,
624,
197,
6878,
1848,
1669,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestScopeExpandEvalBlock(t *testing.T) {
schema := &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"foo": {
Type: cty.String,
},
},
BlockTypes: map[string]*configschema.NestedBlock{
"bar": {
Nesting: configschema.NestingMap,
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"baz": {
Type: cty.String,
},
},
},
},
},
}
data := &dataForTests{
LocalValues: map[string]cty.Value{
"greeting": cty.StringVal("howdy"),
"list": cty.ListVal([]cty.Value{
cty.StringVal("elem0"),
cty.StringVal("elem1"),
}),
"map": cty.MapVal(map[string]cty.Value{
"key1": cty.StringVal("val1"),
"key2": cty.StringVal("val2"),
}),
},
}
tests := map[string]struct {
Config string
Want cty.Value
}{
"empty": {
`
`,
cty.ObjectVal(map[string]cty.Value{
"foo": cty.NullVal(cty.String),
"bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{
"baz": cty.String,
})),
}),
},
"literal attribute": {
`
foo = "hello"
`,
cty.ObjectVal(map[string]cty.Value{
"foo": cty.StringVal("hello"),
"bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{
"baz": cty.String,
})),
}),
},
"variable attribute": {
`
foo = local.greeting
`,
cty.ObjectVal(map[string]cty.Value{
"foo": cty.StringVal("howdy"),
"bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{
"baz": cty.String,
})),
}),
},
"one static block": {
`
bar "static" {}
`,
cty.ObjectVal(map[string]cty.Value{
"foo": cty.NullVal(cty.String),
"bar": cty.MapVal(map[string]cty.Value{
"static": cty.ObjectVal(map[string]cty.Value{
"baz": cty.NullVal(cty.String),
}),
}),
}),
},
"two static blocks": {
`
bar "static0" {
baz = 0
}
bar "static1" {
baz = 1
}
`,
cty.ObjectVal(map[string]cty.Value{
"foo": cty.NullVal(cty.String),
"bar": cty.MapVal(map[string]cty.Value{
"static0": cty.ObjectVal(map[string]cty.Value{
"baz": cty.StringVal("0"),
}),
"static1": cty.ObjectVal(map[string]cty.Value{
"baz": cty.StringVal("1"),
}),
}),
}),
},
"dynamic blocks from list": {
`
dynamic "bar" {
for_each = local.list
labels = [bar.value]
content {
baz = bar.key
}
}
`,
cty.ObjectVal(map[string]cty.Value{
"foo": cty.NullVal(cty.String),
"bar": cty.MapVal(map[string]cty.Value{
"elem0": cty.ObjectVal(map[string]cty.Value{
"baz": cty.StringVal("0"),
}),
"elem1": cty.ObjectVal(map[string]cty.Value{
"baz": cty.StringVal("1"),
}),
}),
}),
},
"dynamic blocks from map": {
`
dynamic "bar" {
for_each = local.map
labels = [bar.key]
content {
baz = bar.value
}
}
`,
cty.ObjectVal(map[string]cty.Value{
"foo": cty.NullVal(cty.String),
"bar": cty.MapVal(map[string]cty.Value{
"key1": cty.ObjectVal(map[string]cty.Value{
"baz": cty.StringVal("val1"),
}),
"key2": cty.ObjectVal(map[string]cty.Value{
"baz": cty.StringVal("val2"),
}),
}),
}),
},
"everything at once": {
`
foo = "whoop"
bar "static0" {
baz = "s0"
}
dynamic "bar" {
for_each = local.list
labels = [bar.value]
content {
baz = bar.key
}
}
bar "static1" {
baz = "s1"
}
dynamic "bar" {
for_each = local.map
labels = [bar.key]
content {
baz = bar.value
}
}
bar "static2" {
baz = "s2"
}
`,
cty.ObjectVal(map[string]cty.Value{
"foo": cty.StringVal("whoop"),
"bar": cty.MapVal(map[string]cty.Value{
"key1": cty.ObjectVal(map[string]cty.Value{
"baz": cty.StringVal("val1"),
}),
"key2": cty.ObjectVal(map[string]cty.Value{
"baz": cty.StringVal("val2"),
}),
"elem0": cty.ObjectVal(map[string]cty.Value{
"baz": cty.StringVal("0"),
}),
"elem1": cty.ObjectVal(map[string]cty.Value{
"baz": cty.StringVal("1"),
}),
"static0": cty.ObjectVal(map[string]cty.Value{
"baz": cty.StringVal("s0"),
}),
"static1": cty.ObjectVal(map[string]cty.Value{
"baz": cty.StringVal("s1"),
}),
"static2": cty.ObjectVal(map[string]cty.Value{
"baz": cty.StringVal("s2"),
}),
}),
}),
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
file, parseDiags := hclsyntax.ParseConfig([]byte(test.Config), "", hcl.Pos{Line: 1, Column: 1})
if len(parseDiags) != 0 {
t.Errorf("unexpected diagnostics during parse")
for _, diag := range parseDiags {
t.Errorf("- %s", diag)
}
return
}
body := file.Body
scope := &Scope{
Data: data,
}
body, expandDiags := scope.ExpandBlock(body, schema)
if expandDiags.HasErrors() {
t.Fatal(expandDiags.Err())
}
got, valDiags := scope.EvalBlock(body, schema)
if valDiags.HasErrors() {
t.Fatal(valDiags.Err())
}
if !got.RawEquals(test.Want) {
// We'll JSON-ize our values here just so it's easier to
// read them in the assertion output.
gotJSON := formattedJSONValue(got)
wantJSON := formattedJSONValue(test.Want)
t.Errorf(
"wrong result\nconfig: %s\ngot: %s\nwant: %s",
test.Config, gotJSON, wantJSON,
)
}
})
}
} | explode_data.jsonl/16186 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2864
} | [
2830,
3393,
10803,
38946,
54469,
4713,
1155,
353,
8840,
836,
8,
341,
1903,
3416,
1669,
609,
1676,
17349,
28477,
515,
197,
197,
10516,
25,
2415,
14032,
8465,
1676,
17349,
33775,
515,
298,
197,
1,
7975,
788,
341,
571,
27725,
25,
272,
18... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) {
objs := []runtime.Object{highPriNominatedPodInfo.Pod, unschedulablePodInfo.Pod}
q := NewTestQueueWithObjects(context.Background(), newDefaultQueueSort(), objs)
q.Add(highPriNominatedPodInfo.Pod)
q.AddUnschedulableIfNotPresent(newQueuedPodInfoForLookup(highPriNominatedPodInfo.Pod), q.SchedulingCycle()) // Must not add anything.
q.AddUnschedulableIfNotPresent(newQueuedPodInfoForLookup(unschedulablePodInfo.Pod), q.SchedulingCycle())
expectedNominatedPods := &nominator{
nominatedPodToNode: map[types.UID]string{
unschedulablePodInfo.Pod.UID: "node1",
highPriNominatedPodInfo.Pod.UID: "node1",
},
nominatedPods: map[string][]*framework.PodInfo{
"node1": {highPriNominatedPodInfo, unschedulablePodInfo},
},
}
if diff := cmp.Diff(q.PodNominator, expectedNominatedPods, cmp.AllowUnexported(nominator{}), cmpopts.IgnoreFields(nominator{}, "podLister", "RWMutex")); diff != "" {
t.Errorf("Unexpected diff after adding pods (-want, +got):\n%s", diff)
}
if p, err := q.Pop(); err != nil || p.Pod != highPriNominatedPodInfo.Pod {
t.Errorf("Expected: %v after Pop, but got: %v", highPriNominatedPodInfo.Pod.Name, p.Pod.Name)
}
if len(q.PodNominator.(*nominator).nominatedPods) != 1 {
t.Errorf("Expected nomindatePods to have one element: %v", q.PodNominator)
}
if getUnschedulablePod(q, unschedulablePodInfo.Pod) != unschedulablePodInfo.Pod {
t.Errorf("Pod %v was not found in the unschedulableQ.", unschedulablePodInfo.Pod.Name)
}
} | explode_data.jsonl/68184 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 597
} | [
2830,
3393,
20555,
7554,
21346,
1806,
72243,
360,
480,
2679,
2623,
21195,
1155,
353,
8840,
836,
8,
341,
22671,
82,
1669,
3056,
22255,
8348,
90,
11892,
92878,
45,
49515,
23527,
1731,
88823,
11,
6975,
2397,
360,
480,
23527,
1731,
88823,
5... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestRegistry(t *testing.T) {
p := &testProtocol{}
if l := len(All()); l != 0 {
t.Fatal("failed to register")
}
if got := Get(p.Name()); got != nil {
t.Fatal("already registered")
}
Register(p)
if l := len(All()); l != 1 {
t.Fatal("failed to register")
}
if got := Get(p.Name()); got == nil {
t.Fatal("failed to register")
}
} | explode_data.jsonl/4144 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 146
} | [
2830,
3393,
15603,
1155,
353,
8840,
836,
8,
341,
3223,
1669,
609,
1944,
20689,
16094,
743,
326,
1669,
2422,
7,
2403,
13426,
326,
961,
220,
15,
341,
197,
3244,
26133,
445,
16091,
311,
4161,
1138,
197,
532,
743,
2684,
1669,
2126,
1295,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestGenRandomString(t *testing.T) {
rand.Seed(time.Now().UnixNano())
r, err := regexp.Compile("^[a-zA-Z0-9]*$")
if err != nil {
panic(err)
}
for i := -1; i <= 64; i++ {
result := GenRandomString(i)
if i < 1 {
if result != "" {
t.Fatalf("expected GenRandomString(%d) to return empty string, got %s", i, result)
}
} else {
if len(result) != i {
t.Fatalf("expected GenRandomString(%d) to return a string of length %d, got %s of length %d", i, i, result, len(result))
}
if !r.MatchString(result) {
t.Fatalf("expected GenRandomString(%d) to return an alphanumeric string, got %s instead", i, result)
}
}
}
} | explode_data.jsonl/43277 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 282
} | [
2830,
3393,
9967,
13999,
703,
1155,
353,
8840,
836,
8,
341,
7000,
437,
5732,
291,
9730,
13244,
1005,
55832,
83819,
12367,
7000,
11,
1848,
1669,
41877,
89323,
445,
27736,
64,
21088,
11171,
15,
12,
24,
8465,
87653,
743,
1848,
961,
2092,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestNewRequestSetsAccept(t *testing.T) {
r := NewRequest(nil, "get", &url.URL{Path: "/path/"}, "", ContentConfig{}, Serializers{}, nil, nil, 0)
if r.headers.Get("Accept") != "" {
t.Errorf("unexpected headers: %#v", r.headers)
}
r = NewRequest(nil, "get", &url.URL{Path: "/path/"}, "", ContentConfig{ContentType: "application/other"}, Serializers{}, nil, nil, 0)
if r.headers.Get("Accept") != "application/other, */*" {
t.Errorf("unexpected headers: %#v", r.headers)
}
} | explode_data.jsonl/13245 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 183
} | [
2830,
3393,
3564,
1900,
30175,
16646,
1155,
353,
8840,
836,
8,
341,
7000,
1669,
1532,
1900,
27907,
11,
330,
455,
497,
609,
1085,
20893,
90,
1820,
25,
3521,
2343,
11225,
2137,
7342,
8883,
2648,
22655,
11215,
12230,
22655,
2092,
11,
2092,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestMapKeySet(t *testing.T) {
m := Map[string, int]{
"one": 1,
"two": 2,
"three": 3,
}
set := m.KeySet()
if len(set) != 3 {
t.Fatalf("key set should equal map length")
}
if !set.Has("one") || !set.Has("two") || !set.Has("three") {
t.Fatalf("key set is missing data")
}
} | explode_data.jsonl/45347 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 134
} | [
2830,
3393,
2227,
1592,
1649,
1155,
353,
8840,
836,
8,
341,
2109,
1669,
5027,
14032,
11,
526,
60,
515,
197,
197,
1,
603,
788,
220,
16,
345,
197,
197,
1,
19789,
788,
220,
17,
345,
197,
197,
1,
27856,
788,
220,
18,
345,
197,
532,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestWithMarkup(t *testing.T) {
src := `
Notes
-----
a *b* **c** <*>d<*>
`
expected := `
Doc
Section[Notes]
Paragraph
Text[a ]
Inline[i "b"]
Text[ ]
Inline[b "c"]
Text[ <*>d<*>]
`
assertParse(t, expected, src)
} | explode_data.jsonl/21271 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 125
} | [
2830,
3393,
2354,
53088,
1155,
353,
8840,
836,
8,
341,
41144,
1669,
22074,
220,
18068,
198,
220,
79949,
64,
353,
65,
9,
3070,
66,
334,
69483,
67,
78822,
397,
3989,
42400,
1669,
22074,
9550,
198,
197,
9620,
58,
21667,
921,
197,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestURLScheme(t *testing.T) {
var (
are = is.New(t)
dt = map[string]struct {
in string
out string
}{
"default": {},
"unknown": {in: "oops"},
"git": {in: vcs.Git, out: "git://"},
"http": {in: vcs.HTTP, out: "http://"},
"https": {in: vcs.HTTPS, out: "https://"},
"ssh+git": {in: vcs.SSHGit, out: "ssh://git@"},
}
)
for name, tt := range dt {
tt := tt
t.Run(name, func(t *testing.T) {
are.Equal(vcs.URLScheme(tt.in), tt.out) // mismatch result
})
}
} | explode_data.jsonl/14827 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 264
} | [
2830,
3393,
3144,
28906,
1155,
353,
8840,
836,
8,
341,
2405,
2399,
197,
197,
546,
284,
374,
7121,
1155,
340,
197,
97980,
220,
284,
2415,
14032,
60,
1235,
341,
298,
17430,
220,
914,
198,
298,
13967,
914,
198,
197,
197,
59403,
298,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCanUseDefaults(t *testing.T) {
var (
testbody = `{"jsonrpc":"2.0", "result":"boogaloo"}`
requestBody []byte
)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
b, err := ioutil.ReadAll(r.Body)
if err != nil && err != io.EOF {
t.Fatal(err)
}
requestBody = b
w.WriteHeader(http.StatusOK)
w.Write([]byte(testbody))
}))
sut := jsonrpc.NewClient(
mustParse(server.URL),
"add",
)
type addRequest struct {
A int
B int
}
in := addRequest{2, 2}
result, err := sut.Endpoint()(context.Background(), in)
if err != nil {
t.Fatal(err)
}
rs, ok := result.(string)
if !ok {
t.Fatalf("result is not string: (%T)%+v", result, result)
}
if rs != "boogaloo" {
t.Fatalf("want=boogaloo, got=%s", rs)
}
var requestAtServer jsonrpc.Request
err = json.Unmarshal(requestBody, &requestAtServer)
if err != nil {
t.Fatal(err)
}
var paramsAtServer addRequest
err = json.Unmarshal(requestAtServer.Params, ¶msAtServer)
if err != nil {
t.Fatal(err)
}
if paramsAtServer != in {
t.Fatalf("want=%+v, got=%+v", in, paramsAtServer)
}
} | explode_data.jsonl/1078 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 496
} | [
2830,
3393,
6713,
10253,
16273,
1155,
353,
8840,
836,
8,
341,
2405,
2399,
197,
18185,
2599,
262,
284,
1565,
4913,
2236,
29414,
3252,
17,
13,
15,
497,
330,
1382,
3252,
749,
538,
278,
2624,
9207,
3989,
197,
23555,
5444,
3056,
3782,
198,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestHaGroupDeleteParams_WithHTTPClient(t *testing.T) {
p := NewHaGroupDeleteParams()
cli := &http.Client{}
p = p.WithHTTPClient(cli)
require.NotNil(t, p.HTTPClient)
assert.Equal(t, cli, p.HTTPClient)
} | explode_data.jsonl/7754 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 86
} | [
2830,
3393,
32942,
2808,
6435,
4870,
62,
2354,
9230,
2959,
1155,
353,
8840,
836,
8,
341,
3223,
1669,
1532,
32942,
2808,
6435,
4870,
741,
86448,
1669,
609,
1254,
11716,
16094,
3223,
284,
281,
26124,
9230,
2959,
70249,
340,
17957,
93882,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestWorkerAPIAuth(t *testing.T) {
t.Run("certificate signed by a trusted CA", func(t *testing.T) {
cases := []struct {
caseDesc string
subj string
success bool
}{
{"valid CN 1", "/CN=worker.osbuild.org", true},
{"valid CN 2", "/CN=localhost", true},
{"invalid CN", "/CN=example.com", false},
}
for _, c := range cases {
t.Run(c.caseDesc, func(t *testing.T) {
ckp, err := newCertificateKeyPair("/etc/osbuild-composer/ca-crt.pem", "/etc/osbuild-composer/ca-key.pem", c.subj)
require.NoError(t, err)
defer ckp.remove()
testRoute(t, "https://localhost:8700/api/worker/v1/status", ckp, c.success)
})
}
})
t.Run("certificate signed by an untrusted CA", func(t *testing.T) {
// generate a new CA
ca, err := newSelfSignedCertificateKeyPair("/CN=osbuild.org")
require.NoError(t, err)
defer ca.remove()
// create a new certificate and signed it with the new CA
ckp, err := newCertificateKeyPair(ca.certificate(), ca.key(), "/CN=localhost")
require.NoError(t, err)
defer ckp.remove()
testRoute(t, "https://localhost:8700/api/worker/v1/status", ckp, false)
})
t.Run("self-signed certificate", func(t *testing.T) {
// generate a new self-signed certificate
ckp, err := newSelfSignedCertificateKeyPair("/CN=osbuild.org")
require.NoError(t, err)
defer ckp.remove()
testRoute(t, "https://localhost:8700/api/worker/v1/status", ckp, false)
})
} | explode_data.jsonl/65321 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 594
} | [
2830,
3393,
21936,
7082,
5087,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
63586,
8499,
553,
264,
21789,
9183,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
1444,
2264,
1669,
3056,
1235,
341,
298,
2722,
11065,
914,
198,
298,
28624... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestListListeners(t *testing.T) {
th.SetupHTTP()
defer th.TeardownHTTP()
HandleListenerListSuccessfully(t)
pages := 0
err := listeners.List(fake.ServiceClient(), listeners.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
pages++
actual, err := listeners.ExtractListeners(page)
if err != nil {
return false, err
}
if len(actual) != 2 {
t.Fatalf("Expected 2 listeners, got %d", len(actual))
}
th.CheckDeepEquals(t, ListenerWeb, actual[0])
th.CheckDeepEquals(t, ListenerDb, actual[1])
return true, nil
})
th.AssertNoErr(t, err)
if pages != 1 {
t.Errorf("Expected 1 page, saw %d", pages)
}
} | explode_data.jsonl/27548 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 257
} | [
2830,
3393,
852,
31570,
1155,
353,
8840,
836,
8,
341,
70479,
39820,
9230,
741,
16867,
270,
94849,
37496,
9230,
741,
197,
6999,
2743,
852,
35959,
1155,
692,
3223,
1134,
1669,
220,
15,
198,
9859,
1669,
23562,
5814,
74138,
13860,
2959,
150... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestWrite(t *testing.T) {
i := NewGrafanaIni(&testGrafanaConfig)
sb, sha := i.Write()
hash := sha256.New()
_, err := io.WriteString(hash, testIni)
require.NoError(t, err)
require.Equal(t, sha, fmt.Sprintf("%x", hash.Sum(nil)))
require.Equal(t, sb, testIni)
} | explode_data.jsonl/24525 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 121
} | [
2830,
3393,
7985,
1155,
353,
8840,
836,
8,
341,
8230,
1669,
1532,
38,
15453,
3362,
43325,
2099,
1944,
38,
15453,
3362,
2648,
340,
24842,
11,
15870,
1669,
600,
4073,
2822,
50333,
1669,
15870,
17,
20,
21,
7121,
741,
197,
6878,
1848,
166... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestForceCloseFailLocalDataLoss(t *testing.T) {
t.Parallel()
aliceChannel, _, cleanUp, err := CreateTestChannels(
channeldb.SingleFunderBit,
)
if err != nil {
t.Fatalf("unable to create test channels: %v", err)
}
defer cleanUp()
// Now that we have our set of channels, we'll modify the channel state
// to have a non-default channel flag.
err = aliceChannel.channelState.ApplyChanStatus(
channeldb.ChanStatusLocalDataLoss,
)
if err != nil {
t.Fatalf("unable to apply channel state: %v", err)
}
// Due to the change above, if we attempt to force close this
// channel, we should fail as it isn't safe to force close a
// channel that isn't in the pure default state.
_, err = aliceChannel.ForceClose()
if err == nil {
t.Fatalf("expected force close to fail due to non-default " +
"chan state")
}
} | explode_data.jsonl/72510 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 287
} | [
2830,
3393,
18573,
7925,
19524,
7319,
1043,
39838,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
197,
63195,
9629,
11,
8358,
4240,
2324,
11,
1848,
1669,
4230,
2271,
35925,
1006,
197,
23049,
1020,
783,
65,
23119,
37,
7995,
8344,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestValidatePersistentVolumeSpec(t *testing.T) {
fsmode := core.PersistentVolumeFilesystem
blockmode := core.PersistentVolumeBlock
scenarios := map[string]struct {
isExpectedFailure bool
isInlineSpec bool
pvSpec *core.PersistentVolumeSpec
}{
"pv-pvspec-valid": {
isExpectedFailure: false,
isInlineSpec: false,
pvSpec: &core.PersistentVolumeSpec{
Capacity: core.ResourceList{
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
},
StorageClassName: "testclass",
PersistentVolumeReclaimPolicy: core.PersistentVolumeReclaimRecycle,
AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteOnce},
PersistentVolumeSource: core.PersistentVolumeSource{
HostPath: &core.HostPathVolumeSource{
Path: "/foo",
Type: newHostPathType(string(core.HostPathDirectory)),
},
},
VolumeMode: &fsmode,
NodeAffinity: simpleVolumeNodeAffinity("foo", "bar"),
},
},
"inline-pvspec-with-capacity": {
isExpectedFailure: true,
isInlineSpec: true,
pvSpec: &core.PersistentVolumeSpec{
Capacity: core.ResourceList{
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
},
PersistentVolumeSource: core.PersistentVolumeSource{
CSI: &core.CSIPersistentVolumeSource{Driver: "test-driver", VolumeHandle: "test-123", ReadOnly: true},
},
AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteOnce},
},
},
"inline-pvspec-with-sc": {
isExpectedFailure: true,
isInlineSpec: true,
pvSpec: &core.PersistentVolumeSpec{
PersistentVolumeSource: core.PersistentVolumeSource{
CSI: &core.CSIPersistentVolumeSource{Driver: "test-driver", VolumeHandle: "test-123", ReadOnly: true},
},
AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteOnce},
StorageClassName: "testclass",
},
},
"inline-pvspec-with-non-fs-volume-mode": {
isExpectedFailure: true,
isInlineSpec: true,
pvSpec: &core.PersistentVolumeSpec{
PersistentVolumeSource: core.PersistentVolumeSource{
CSI: &core.CSIPersistentVolumeSource{Driver: "test-driver", VolumeHandle: "test-123", ReadOnly: true},
},
AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteOnce},
VolumeMode: &blockmode,
},
},
"inline-pvspec-with-non-retain-reclaim-policy": {
isExpectedFailure: true,
isInlineSpec: true,
pvSpec: &core.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: core.PersistentVolumeReclaimRecycle,
PersistentVolumeSource: core.PersistentVolumeSource{
CSI: &core.CSIPersistentVolumeSource{Driver: "test-driver", VolumeHandle: "test-123", ReadOnly: true},
},
AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteOnce},
},
},
"inline-pvspec-with-node-affinity": {
isExpectedFailure: true,
isInlineSpec: true,
pvSpec: &core.PersistentVolumeSpec{
PersistentVolumeSource: core.PersistentVolumeSource{
CSI: &core.CSIPersistentVolumeSource{Driver: "test-driver", VolumeHandle: "test-123", ReadOnly: true},
},
AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteOnce},
NodeAffinity: simpleVolumeNodeAffinity("foo", "bar"),
},
},
"inline-pvspec-with-non-csi-source": {
isExpectedFailure: true,
isInlineSpec: true,
pvSpec: &core.PersistentVolumeSpec{
PersistentVolumeSource: core.PersistentVolumeSource{
HostPath: &core.HostPathVolumeSource{
Path: "/foo",
Type: newHostPathType(string(core.HostPathDirectory)),
},
},
AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteOnce},
},
},
"inline-pvspec-valid-with-access-modes-and-mount-options": {
isExpectedFailure: false,
isInlineSpec: true,
pvSpec: &core.PersistentVolumeSpec{
PersistentVolumeSource: core.PersistentVolumeSource{
CSI: &core.CSIPersistentVolumeSource{Driver: "test-driver", VolumeHandle: "test-123", ReadOnly: true},
},
AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteOnce},
MountOptions: []string{"soft", "read-write"},
},
},
"inline-pvspec-valid-with-access-modes": {
isExpectedFailure: false,
isInlineSpec: true,
pvSpec: &core.PersistentVolumeSpec{
PersistentVolumeSource: core.PersistentVolumeSource{
CSI: &core.CSIPersistentVolumeSource{Driver: "test-driver", VolumeHandle: "test-123", ReadOnly: true},
},
AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteOnce},
},
},
"inline-pvspec-with-missing-acess-modes": {
isExpectedFailure: true,
isInlineSpec: true,
pvSpec: &core.PersistentVolumeSpec{
PersistentVolumeSource: core.PersistentVolumeSource{
CSI: &core.CSIPersistentVolumeSource{Driver: "test-driver", VolumeHandle: "test-123", ReadOnly: true},
},
MountOptions: []string{"soft", "read-write"},
},
},
}
for name, scenario := range scenarios {
errs := ValidatePersistentVolumeSpec(scenario.pvSpec, "", scenario.isInlineSpec, field.NewPath("field"))
if len(errs) == 0 && scenario.isExpectedFailure {
t.Errorf("Unexpected success for scenario: %s", name)
}
if len(errs) > 0 && !scenario.isExpectedFailure {
t.Errorf("Unexpected failure for scenario: %s - %+v", name, errs)
}
}
} | explode_data.jsonl/25618 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2136
} | [
2830,
3393,
17926,
53194,
18902,
8327,
1155,
353,
8840,
836,
8,
341,
1166,
3563,
534,
1669,
6200,
61655,
18902,
1703,
8948,
198,
47996,
8516,
1669,
6200,
61655,
18902,
4713,
198,
29928,
60494,
1669,
2415,
14032,
60,
1235,
341,
197,
19907,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestSyncer_SyncAny_reject(t *testing.T) {
syncer, connSnapshot := setupOfferSyncer(t)
// s22 is tried first, then s12, then s11, then errNoSnapshots
s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
_, err := syncer.AddSnapshot(simplePeer("id"), s22)
require.NoError(t, err)
_, err = syncer.AddSnapshot(simplePeer("id"), s12)
require.NoError(t, err)
_, err = syncer.AddSnapshot(simplePeer("id"), s11)
require.NoError(t, err)
connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
Snapshot: toABCI(s22), AppHash: []byte("app_hash"),
}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil)
connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
Snapshot: toABCI(s12), AppHash: []byte("app_hash"),
}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil)
connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
Snapshot: toABCI(s11), AppHash: []byte("app_hash"),
}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil)
_, _, err = syncer.SyncAny(0)
assert.Equal(t, errNoSnapshots, err)
connSnapshot.AssertExpectations(t)
} | explode_data.jsonl/46287 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 540
} | [
2830,
3393,
12154,
261,
1098,
1721,
8610,
1288,
583,
1155,
353,
8840,
836,
8,
341,
1903,
1721,
261,
11,
4534,
15009,
1669,
6505,
39462,
12154,
261,
1155,
692,
197,
322,
274,
17,
17,
374,
6679,
1156,
11,
1221,
274,
16,
17,
11,
1221,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReaderConnectError(t *testing.T) {
client, err := NewClient(ClientOptions{
URL: "pulsar://invalid-hostname:6650",
})
assert.Nil(t, err)
defer client.Close()
reader, err := client.CreateReader(ReaderOptions{
Topic: "my-topic",
StartMessageID: LatestMessageID(),
})
// Expect error in creating consumer
assert.Nil(t, reader)
assert.NotNil(t, err)
assert.Equal(t, err.Error(), "connection error")
} | explode_data.jsonl/6383 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 163
} | [
2830,
3393,
5062,
14611,
1454,
1155,
353,
8840,
836,
8,
341,
25291,
11,
1848,
1669,
1532,
2959,
46851,
3798,
515,
197,
79055,
25,
330,
79,
14295,
277,
1110,
11808,
38589,
606,
25,
21,
21,
20,
15,
756,
197,
8824,
6948,
59678,
1155,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBulkIndexerIntegration(t *testing.T) {
body := `{"body":"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat."}`
testCases := []struct {
name string
CompressRequestBodyEnabled bool
}{
{
name: "Without body compression",
CompressRequestBodyEnabled: false,
},
{
name: "With body compression",
CompressRequestBodyEnabled: true,
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
t.Run("Default", func(t *testing.T) {
var countSuccessful uint64
indexName := "test-bulk-integration"
es, _ := elasticsearch.NewClient(elasticsearch.Config{
CompressRequestBody: tt.CompressRequestBodyEnabled,
Logger: &elastictransport.ColorLogger{Output: os.Stdout},
})
es.Indices.Delete([]string{indexName}, es.Indices.Delete.WithIgnoreUnavailable(true))
es.Indices.Create(
indexName,
es.Indices.Create.WithBody(strings.NewReader(`{"settings": {"number_of_shards": 1, "number_of_replicas": 0, "refresh_interval":"5s"}}`)),
es.Indices.Create.WithWaitForActiveShards("1"))
bi, _ := esutil.NewBulkIndexer(esutil.BulkIndexerConfig{
Index: indexName,
Client: es,
// FlushBytes: 3e+6,
})
numItems := 100000
start := time.Now().UTC()
for i := 1; i <= numItems; i++ {
err := bi.Add(context.Background(), esutil.BulkIndexerItem{
Action: "index",
DocumentID: strconv.Itoa(i),
Body: strings.NewReader(body),
OnSuccess: func(ctx context.Context, item esutil.BulkIndexerItem, res esutil.BulkIndexerResponseItem) {
atomic.AddUint64(&countSuccessful, 1)
},
})
if err != nil {
t.Fatalf("Unexpected error: %s", err)
}
}
if err := bi.Close(context.Background()); err != nil {
t.Errorf("Unexpected error: %s", err)
}
stats := bi.Stats()
if stats.NumAdded != uint64(numItems) {
t.Errorf("Unexpected NumAdded: want=%d, got=%d", numItems, stats.NumAdded)
}
if stats.NumIndexed != uint64(numItems) {
t.Errorf("Unexpected NumIndexed: want=%d, got=%d", numItems, stats.NumIndexed)
}
if stats.NumFailed != 0 {
t.Errorf("Unexpected NumFailed: want=0, got=%d", stats.NumFailed)
}
if countSuccessful != uint64(numItems) {
t.Errorf("Unexpected countSuccessful: want=%d, got=%d", numItems, countSuccessful)
}
fmt.Printf(" Added %d documents to indexer. Succeeded: %d. Failed: %d. Requests: %d. Duration: %s (%.0f docs/sec)\n",
stats.NumAdded,
stats.NumFlushed,
stats.NumFailed,
stats.NumRequests,
time.Since(start).Truncate(time.Millisecond),
1000.0/float64(time.Since(start)/time.Millisecond)*float64(stats.NumFlushed))
})
t.Run("Multiple indices", func(t *testing.T) {
es, _ := elasticsearch.NewClient(elasticsearch.Config{
CompressRequestBody: tt.CompressRequestBodyEnabled,
Logger: &elastictransport.ColorLogger{Output: os.Stdout},
})
bi, _ := esutil.NewBulkIndexer(esutil.BulkIndexerConfig{
Index: "test-index-a",
Client: es,
})
// Default index
for i := 1; i <= 10; i++ {
err := bi.Add(context.Background(), esutil.BulkIndexerItem{
Action: "index",
DocumentID: strconv.Itoa(i),
Body: strings.NewReader(body),
})
if err != nil {
t.Fatalf("Unexpected error: %s", err)
}
}
// Index 1
for i := 1; i <= 10; i++ {
err := bi.Add(context.Background(), esutil.BulkIndexerItem{
Action: "index",
Index: "test-index-b",
Body: strings.NewReader(body),
})
if err != nil {
t.Fatalf("Unexpected error: %s", err)
}
}
// Index 2
for i := 1; i <= 10; i++ {
err := bi.Add(context.Background(), esutil.BulkIndexerItem{
Action: "index",
Index: "test-index-c",
Body: strings.NewReader(body),
})
if err != nil {
t.Fatalf("Unexpected error: %s", err)
}
}
if err := bi.Close(context.Background()); err != nil {
t.Errorf("Unexpected error: %s", err)
}
stats := bi.Stats()
expectedIndexed := 10 + 10 + 10
if stats.NumIndexed != uint64(expectedIndexed) {
t.Errorf("Unexpected NumIndexed: want=%d, got=%d", expectedIndexed, stats.NumIndexed)
}
res, err := es.Indices.Exists([]string{"test-index-a", "test-index-b", "test-index-c"})
if err != nil {
t.Fatalf("Unexpected error: %s", err)
}
if res.StatusCode != 200 {
t.Errorf("Expected indices to exist, but got a [%s] response", res.Status())
}
})
t.Run("External version", func(t *testing.T) {
var index string = "test-index-a"
es, _ := elasticsearch.NewClient(elasticsearch.Config{
CompressRequestBody: tt.CompressRequestBodyEnabled,
Logger: &elastictransport.ColorLogger{Output: os.Stdout},
})
es.Indices.Delete([]string{index}, es.Indices.Delete.WithIgnoreUnavailable(true))
es.Indices.Create(index, es.Indices.Create.WithWaitForActiveShards("1"))
bulkIndex := func(bulkIndexer esutil.BulkIndexer, baseVersion int) {
var countTotal int = 500
var countSuccessful uint64
for i := 0; i < countTotal; i++ {
version := int64(baseVersion + i)
item := esutil.BulkIndexerItem{
Action: "index",
Index: index,
DocumentID: strconv.Itoa(i),
Body: strings.NewReader(body),
Version: &version,
VersionType: "external",
Routing: `"{required": true}`,
OnSuccess: func(ctx context.Context, item esutil.BulkIndexerItem, item2 esutil.BulkIndexerResponseItem) {
if version != item2.Version &&
version != *item.Version &&
item2.Result != "created" {
t.Fatalf("version mismatch, expected: %d, got: %d && %d", version, item.Version, item2.Version)
}
atomic.AddUint64(&countSuccessful, 1)
},
}
err := bulkIndexer.Add(context.Background(), item)
if err != nil {
t.Fatal(err)
}
}
if err := bulkIndexer.Close(context.Background()); err != nil {
t.Fatal(err)
}
if int(countSuccessful) != countTotal {
t.Fatalf("Unexpected countSuccessful, expected %d, got: %d", countTotal, countSuccessful)
}
}
bi, _ := esutil.NewBulkIndexer(esutil.BulkIndexerConfig{
Index: index,
Client: es,
})
bulkIndex(bi, 500)
bi, _ = esutil.NewBulkIndexer(esutil.BulkIndexerConfig{
Index: index,
Client: es,
})
bulkIndex(bi, 900)
})
})
}
} | explode_data.jsonl/74666 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 3140
} | [
2830,
3393,
88194,
1552,
261,
52464,
1155,
353,
8840,
836,
8,
341,
35402,
1669,
1565,
4913,
2599,
3252,
32783,
26342,
23655,
2444,
27212,
11,
35140,
56681,
30060,
11,
10923,
653,
79122,
18965,
86404,
8621,
72204,
1842,
57296,
58917,
85927,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetMessage_Post(t *testing.T) {
// Assert
body := bytes.NewBufferString("GRUMBYCAT")
req, err := http.NewRequest("POST", "https://api.secrethub.io/repos/jdoe/catpictures", body)
assert.OK(t, err)
req.Header.Set("Date", "Fri, 10 Mar 2017 16:25:54 CET")
bodySum := sha256.Sum256(body.Bytes())
encodedBody := base64.StdEncoding.EncodeToString(bodySum[:])
expected := "POST\n" +
encodedBody + "\n" +
"Fri, 10 Mar 2017 16:25:54 CET\n" +
"/repos/jdoe/catpictures;\n" +
""
// Act
result, err := getMessage(req)
assert.OK(t, err)
// Assert
assertMessage(t, expected, string(result))
} | explode_data.jsonl/15487 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 254
} | [
2830,
3393,
1949,
2052,
66726,
1155,
353,
8840,
836,
8,
1476,
197,
322,
5319,
198,
35402,
1669,
5820,
7121,
4095,
703,
445,
8626,
2794,
19912,
28196,
5130,
24395,
11,
1848,
1669,
1758,
75274,
445,
2946,
497,
330,
2428,
1110,
2068,
4523,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNs(t *testing.T) {
ev := eval.NewEvaler()
ed := NewEditor(os.Stdin, os.Stdout).(*editor)
ev.Global.AddNs("edit", ed.Ns())
ev.EvalSource(eval.NewScriptSource("[t]", "[t]", "edit:max-height = 20"))
if ed.core.Config.RenderConfig.MaxHeight != 20 {
t.Errorf("Failed to set MaxHeight to 20 via binding")
}
} | explode_data.jsonl/40145 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 130
} | [
2830,
3393,
47360,
1155,
353,
8840,
836,
8,
341,
74837,
1669,
5603,
7121,
54469,
261,
741,
197,
291,
1669,
1532,
9410,
9638,
36086,
258,
11,
2643,
83225,
568,
4071,
8868,
340,
74837,
27381,
1904,
47360,
445,
3587,
497,
1578,
2067,
82,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestInteractiveBackend_SelectStarterProject(t *testing.T) {
type fields struct {
asker func(ctrl *gomock.Controller) asker.Asker
registryClient registry.Client
}
type args struct {
devfile func() parser.DevfileObj
flags map[string]string
}
tests := []struct {
name string
fields fields
args args
want *v1alpha2.StarterProject
wantErr bool
}{
{
name: "no flags, no starter selected",
fields: fields{
asker: func(ctrl *gomock.Controller) asker.Asker {
client := asker.NewMockAsker(ctrl)
client.EXPECT().AskStarterProject(gomock.Any()).Return(false, 0, nil)
return client
},
},
args: args{
devfile: func() parser.DevfileObj {
devfileData, _ := data.NewDevfileData(string(data.APISchemaVersion200))
return parser.DevfileObj{
Data: devfileData,
}
},
flags: map[string]string{},
},
want: nil,
wantErr: false,
},
{
name: "no flags, starter selected",
fields: fields{
asker: func(ctrl *gomock.Controller) asker.Asker {
client := asker.NewMockAsker(ctrl)
client.EXPECT().AskStarterProject(gomock.Any()).Return(true, 1, nil)
return client
},
},
args: args{
devfile: func() parser.DevfileObj {
devfileData, _ := data.NewDevfileData(string(data.APISchemaVersion200))
_ = devfileData.AddStarterProjects([]v1alpha2.StarterProject{
{
Name: "starter1",
},
{
Name: "starter2",
},
{
Name: "starter3",
},
})
return parser.DevfileObj{
Data: devfileData,
}
},
flags: map[string]string{},
},
want: &v1alpha2.StarterProject{
Name: "starter2",
},
wantErr: false,
},
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
var askerClient asker.Asker
if tt.fields.asker != nil {
askerClient = tt.fields.asker(ctrl)
}
o := &InteractiveBackend{
askerClient: askerClient,
registryClient: tt.fields.registryClient,
}
got1, err := o.SelectStarterProject(tt.args.devfile(), tt.args.flags)
if (err != nil) != tt.wantErr {
t.Errorf("InteractiveBackend.SelectStarterProject() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got1, tt.want) {
t.Errorf("InteractiveBackend.SelectStarterProject() got1 = %v, want %v", got1, tt.want)
}
})
}
} | explode_data.jsonl/48279 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1140
} | [
2830,
3393,
71686,
29699,
58073,
623,
5295,
7849,
1155,
353,
8840,
836,
8,
341,
13158,
5043,
2036,
341,
197,
197,
1073,
261,
688,
2915,
62100,
353,
36083,
1176,
29112,
8,
2548,
261,
875,
4886,
261,
198,
197,
197,
29172,
2959,
19424,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestProcessInfoIssue22068(t *testing.T) {
store, clean := realtikvtest.CreateMockStoreAndSetup(t)
defer clean()
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
var wg util.WaitGroupWrapper
wg.Run(func() {
tk.MustQuery("select 1 from t where a = (select sleep(5));").Check(testkit.Rows())
})
time.Sleep(2 * time.Second)
pi := tk.Session().ShowProcess()
require.NotNil(t, pi)
require.Equal(t, "select 1 from t where a = (select sleep(5));", pi.Info)
require.Nil(t, pi.Plan)
wg.Wait()
} | explode_data.jsonl/5750 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 222
} | [
2830,
3393,
7423,
1731,
42006,
17,
17,
15,
21,
23,
1155,
353,
8840,
836,
8,
341,
57279,
11,
4240,
1669,
1931,
83,
1579,
85,
1944,
7251,
11571,
6093,
3036,
21821,
1155,
340,
16867,
4240,
741,
3244,
74,
1669,
1273,
8226,
7121,
2271,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBuildDenyVariable(t *testing.T) {
invalidType := &ingress.Ingress{}
expected := ""
actual := buildDenyVariable(invalidType)
if expected != actual {
t.Errorf("Expected '%v' but returned '%v'", expected, actual)
}
a := buildDenyVariable("host1.example.com_/.well-known/acme-challenge")
b := buildDenyVariable("host1.example.com_/.well-known/acme-challenge")
if !reflect.DeepEqual(a, b) {
t.Errorf("Expected '%v' but returned '%v'", a, b)
}
} | explode_data.jsonl/80584 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 180
} | [
2830,
3393,
11066,
23619,
88,
7827,
1155,
353,
8840,
836,
8,
341,
197,
11808,
929,
1669,
609,
287,
673,
5337,
2483,
16094,
42400,
1669,
8389,
88814,
1669,
1936,
23619,
88,
7827,
5900,
1891,
929,
692,
743,
3601,
961,
5042,
341,
197,
32... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestParsePublicKeys(t *testing.T) {
if testing.Short() {
t.SkipNow()
return
}
config := &quick.Config{
MaxCount: testutil.ENCODE_REPEAT_COUNT,
}
err := quick.Check(parsePublicKeyOk, config)
testutil.AssertVerboseErrorIsNil(t, err)
} | explode_data.jsonl/34474 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 104
} | [
2830,
3393,
14463,
12676,
8850,
1155,
353,
8840,
836,
8,
341,
743,
7497,
55958,
368,
341,
197,
3244,
57776,
7039,
741,
197,
853,
198,
197,
630,
25873,
1669,
609,
27763,
10753,
515,
197,
197,
5974,
2507,
25,
1273,
1314,
52746,
14866,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestControllerDeploymentRegistry(t *testing.T) {
helmChartPath := "../charts/core"
options := &helm.Options{
SetValues: map[string]string{
"registry": "registry.neuvector.com",
},
}
// Test ingress
out := helm.RenderTemplate(t, options, helmChartPath, []string{"templates/controller-deployment.yaml"})
outs := splitYaml(out)
if len(outs) != 1 {
t.Errorf("Resource count is wrong. count=%v\n", len(outs))
}
var dep appsv1.Deployment
helm.UnmarshalK8SYaml(t, outs[0], &dep)
if dep.Spec.Template.Spec.Containers[0].Image != "registry.neuvector.com/controller:latest" {
t.Errorf("Image location is wrong, %v\n", dep.Spec.Template.Spec.Containers[0].Image)
}
} | explode_data.jsonl/55055 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 263
} | [
2830,
3393,
2051,
75286,
15603,
1155,
353,
8840,
836,
8,
341,
9598,
23162,
14488,
1820,
1669,
7005,
36584,
5320,
1837,
35500,
1669,
609,
51899,
22179,
515,
197,
22212,
6227,
25,
2415,
14032,
30953,
515,
298,
197,
1,
29172,
788,
330,
291... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func Test_ResolveString(t *testing.T) {
testCases := []struct {
name string
path string
attributes map[string]interface{}
result string
err error
}{
{
name: "empty attributes should produce error",
path: "x.y.z",
attributes: map[string]interface{}{},
result: "",
err: errors.New("string attribute `x.y.z` not found"),
},
{
name: "empty path should produce error",
path: "",
attributes: map[string]interface{}{
"xyz": "abc",
},
result: "",
err: errors.New("string attribute `` not found"),
},
{
name: "nonempty attributes with correct path should produce nonempty string",
path: "x.y.z",
attributes: map[string]interface{}{
"x": map[string]interface{}{
"y": map[string]interface{}{
"z": "abc",
},
},
},
result: "abc",
err: nil,
},
{
name: "nonempty attributes with incorrect path should produce empty string",
path: "a.b.c",
attributes: map[string]interface{}{
"x": map[string]interface{}{
"y": map[string]interface{}{
"xyz": "abc",
},
},
},
result: "",
err: errors.New("string attribute `a.b.c` not found"),
},
{
name: "nonempty attributes with correct single length path should produce nonempty string",
path: "a",
attributes: map[string]interface{}{
"a": "xyz",
},
result: "xyz",
err: nil,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result, err := ResolveString(tc.attributes, tc.path)
assert.Equal(t, result, tc.result)
assert.Equal(t, err, tc.err)
})
}
} | explode_data.jsonl/79609 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 744
} | [
2830,
3393,
62,
56808,
703,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
11609,
981,
914,
198,
197,
26781,
981,
914,
198,
197,
197,
12340,
2415,
14032,
31344,
16094,
197,
9559,
257,
914,
198,
197,
9859,
286,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMaximum(t *testing.T) {
GenerateValuesAsYaml(t, "maximum.test.schema.json",
func(console *tests.ConsoleWrapper, donec chan struct{}) {
defer close(donec)
// Test boolean type
console.ExpectString("Enter a value for numberValue")
console.SendLine("11.1")
console.ExpectString("Sorry, your reply was invalid: 11.1 is not less than or equal to 10.1")
console.ExpectString("Enter a value for numberValue")
console.SendLine("1")
console.ExpectString("Enter a value for integerValue")
console.SendLine("21")
console.ExpectString("Sorry, your reply was invalid: 21 is not less than or equal to 20")
console.ExpectString("Enter a value for integerValue")
console.SendLine("2")
console.ExpectEOF()
})
} | explode_data.jsonl/61753 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 261
} | [
2830,
3393,
27309,
1155,
353,
8840,
836,
8,
341,
197,
31115,
6227,
2121,
56,
9467,
1155,
11,
330,
39187,
5958,
30892,
4323,
756,
197,
29244,
52818,
353,
23841,
46298,
11542,
11,
2814,
66,
26023,
2036,
28875,
341,
298,
16867,
3265,
34232... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReload(t *testing.T) {
ctx := context.TODO()
// Create BananaBoatBot
cloneCfg := stdConfig
b := bot.NewBananaBoatBot(ctx, &cloneCfg)
defer b.Close(ctx)
// Say hello
b.HandleHandlers(ctx, "test", &irc.Message{
Command: irc.PRIVMSG,
Params: []string{"testbot1", "HELLO"},
})
// Read response
svrI, _ := b.Servers.Load("test")
messages := svrI.(client.IrcServerInterface).GetMessages()
msg := <-messages
if msg.Command != irc.PRIVMSG {
t.Fatalf("Got wrong message type in response1: %s", msg.Command)
}
if msg.Params[1] != "HELLO" {
t.Fatalf("Got wrong parameters in response1: %s", strings.Join(msg.Params, ","))
}
// Set new config file and reload Lua
b.Config.LuaFile = "../test/trivial2.lua"
b.ReloadLua(ctx)
// Send another PM
b.HandleHandlers(ctx, "test", &irc.Message{
Command: irc.PRIVMSG,
Params: []string{"testbot1", "HELLO"},
})
// Get response
msg = <-messages
// This time bot must say GOODBYE
if msg.Command != irc.PRIVMSG {
t.Fatalf("Got wrong message type in response2: %s", msg.Command)
}
if msg.Params[1] != "GOODBYE" {
t.Fatalf("Got wrong parameters in response2: %s", strings.Join(msg.Params, ","))
}
} | explode_data.jsonl/53606 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 482
} | [
2830,
3393,
50035,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
2266,
90988,
741,
197,
322,
4230,
75824,
1233,
266,
23502,
198,
197,
19982,
42467,
1669,
1460,
2648,
198,
2233,
1669,
10924,
7121,
50241,
3362,
1233,
266,
23502,
7502,
11,
60... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestBasic(t *testing.T) {
l := NewSkiplist(arenaSize, new(y.DefaultKeyComparator))
val1 := newValue(42)
val2 := newValue(52)
val3 := newValue(62)
val4 := newValue(72)
// Try inserting values.
// Somehow require.Nil doesn't work when checking for unsafe.Pointer(nil).
l.Put(y.KeyWithTs([]byte("key1"), 0), y.ValueStruct{Value: val1, Meta: 55, UserMeta: 0})
l.Put(y.KeyWithTs([]byte("key2"), 2), y.ValueStruct{Value: val2, Meta: 56, UserMeta: 0})
l.Put(y.KeyWithTs([]byte("key3"), 0), y.ValueStruct{Value: val3, Meta: 57, UserMeta: 0})
v := l.Get(y.KeyWithTs([]byte("key"), 0))
require.True(t, v.Value == nil)
v = l.Get(y.KeyWithTs([]byte("key1"), 0))
require.True(t, v.Value != nil)
require.EqualValues(t, "00042", string(v.Value))
require.EqualValues(t, 55, v.Meta)
v = l.Get(y.KeyWithTs([]byte("key2"), 0))
require.True(t, v.Value == nil)
v = l.Get(y.KeyWithTs([]byte("key3"), 0))
require.True(t, v.Value != nil)
require.EqualValues(t, "00062", string(v.Value))
require.EqualValues(t, 57, v.Meta)
l.Put(y.KeyWithTs([]byte("key3"), 1), y.ValueStruct{Value: val4, Meta: 12, UserMeta: 0})
v = l.Get(y.KeyWithTs([]byte("key3"), 1))
require.True(t, v.Value != nil)
require.EqualValues(t, "00072", string(v.Value))
require.EqualValues(t, 12, v.Meta)
} | explode_data.jsonl/49650 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 547
} | [
2830,
3393,
15944,
1155,
353,
8840,
836,
8,
341,
8810,
1669,
1532,
50,
6642,
39934,
7,
30527,
1695,
11,
501,
7021,
13275,
1592,
38658,
1171,
19302,
16,
1669,
24174,
7,
19,
17,
340,
19302,
17,
1669,
24174,
7,
20,
17,
340,
19302,
18,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetContainerInstanceTagsFromEC2APIFailToDescribeECSTagsForInstance(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl)
ec2Client := mock_ec2.NewMockClient(ctrl)
agent := &ecsAgent{
ec2MetadataClient: ec2MetadataClient,
ec2Client: ec2Client,
}
instanceID := "iid"
gomock.InOrder(
ec2MetadataClient.EXPECT().InstanceID().Return(instanceID, nil),
ec2Client.EXPECT().DescribeECSTagsForInstance(instanceID).Return(nil, errors.New("error")),
)
resTags, err := agent.getContainerInstanceTagsFromEC2API()
assert.Error(t, err)
assert.Nil(t, resTags)
} | explode_data.jsonl/41605 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 254
} | [
2830,
3393,
1949,
4502,
2523,
15930,
3830,
7498,
17,
7082,
19524,
1249,
74785,
7498,
784,
2032,
2461,
2523,
1155,
353,
8840,
836,
8,
341,
84381,
1669,
342,
316,
1176,
7121,
2051,
1155,
340,
16867,
23743,
991,
18176,
2822,
197,
757,
17,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestObjectWithRelation(t *testing.T) {
assertParseOneTerm(t, "relation term value", `{"x": 1+1}`, ObjectTerm(
Item(StringTerm("x"), CallTerm(RefTerm(VarTerm("plus")), IntNumberTerm(1), IntNumberTerm(1))),
))
assertParseError(t, "invalid relation term value", `{"x": 0= }`)
} | explode_data.jsonl/50454 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 106
} | [
2830,
3393,
1190,
2354,
33790,
1155,
353,
8840,
836,
8,
341,
6948,
14463,
3966,
17249,
1155,
11,
330,
22221,
4647,
897,
497,
1565,
4913,
87,
788,
220,
16,
10,
16,
28350,
3002,
17249,
1006,
197,
61574,
2242,
17249,
445,
87,
3975,
7143,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPodAutoscalerDuckTypes(t *testing.T) {
tests := []struct {
name string
t duck.Implementable
}{{
name: "conditions",
t: &duckv1beta1.Conditions{},
}}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
err := duck.VerifyType(&PodAutoscaler{}, test.t)
if err != nil {
t.Errorf("VerifyType(PodAutoscaler, %T) = %v", test.t, err)
}
})
}
} | explode_data.jsonl/32161 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 187
} | [
2830,
3393,
23527,
19602,
436,
63084,
35,
1942,
4173,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
3244,
262,
35985,
26914,
2764,
480,
198,
197,
15170,
515,
197,
11609,
25,
330,
16495,
756,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGetOAuthApps(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
AdminClient := th.SystemAdminClient
defaultRolePermissions := th.SaveDefaultRolePermissions()
enableOAuthServiceProvider := th.App.Config().ServiceSettings.EnableOAuthServiceProvider
defer func() {
th.RestoreDefaultRolePermissions(defaultRolePermissions)
th.App.UpdateConfig(func(cfg *model.Config) { cfg.ServiceSettings.EnableOAuthServiceProvider = enableOAuthServiceProvider })
}()
// Grant permission to regular users.
th.AddPermissionToRole(model.PERMISSION_MANAGE_OAUTH.Id, model.SYSTEM_USER_ROLE_ID)
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.EnableOAuthServiceProvider = true })
oapp := &model.OAuthApp{Name: GenerateTestAppName(), Homepage: "https://nowhere.com", Description: "test", CallbackUrls: []string{"https://nowhere.com"}}
rapp, resp := AdminClient.CreateOAuthApp(oapp)
CheckNoError(t, resp)
oapp.Name = GenerateTestAppName()
rapp2, resp := Client.CreateOAuthApp(oapp)
CheckNoError(t, resp)
apps, resp := AdminClient.GetOAuthApps(0, 1000)
CheckNoError(t, resp)
found1 := false
found2 := false
for _, a := range apps {
if a.Id == rapp.Id {
found1 = true
}
if a.Id == rapp2.Id {
found2 = true
}
}
if !found1 || !found2 {
t.Fatal("missing oauth app")
}
apps, resp = AdminClient.GetOAuthApps(1, 1)
CheckNoError(t, resp)
if len(apps) != 1 {
t.Fatal("paging failed")
}
apps, resp = Client.GetOAuthApps(0, 1000)
CheckNoError(t, resp)
if len(apps) != 1 && apps[0].Id != rapp2.Id {
t.Fatal("wrong apps returned")
}
// Revoke permission from regular users.
th.RemovePermissionFromRole(model.PERMISSION_MANAGE_OAUTH.Id, model.SYSTEM_USER_ROLE_ID)
_, resp = Client.GetOAuthApps(0, 1000)
CheckForbiddenStatus(t, resp)
Client.Logout()
_, resp = Client.GetOAuthApps(0, 1000)
CheckUnauthorizedStatus(t, resp)
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.EnableOAuthServiceProvider = false })
_, resp = AdminClient.GetOAuthApps(0, 1000)
CheckNotImplementedStatus(t, resp)
} | explode_data.jsonl/30126 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 771
} | [
2830,
3393,
1949,
57850,
53602,
1155,
353,
8840,
836,
8,
341,
70479,
1669,
18626,
1005,
3803,
15944,
741,
16867,
270,
836,
682,
4454,
741,
71724,
1669,
270,
11716,
198,
197,
7210,
2959,
1669,
270,
16620,
7210,
2959,
271,
11940,
9030,
23... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_DeleteNilId(t *testing.T) {
// arrange
store := &orderTestRepo{
DeleteResult: struct{ Err error }{Err: nil},
}
svr := api.NewOrderServer(store)
ctx := context.Background()
// act
_, err := svr.Delete(ctx, &ordercomm.DeleteOrderRequest{Id: uuid.Nil.String()})
// assert
if err == nil {
t.Fatalf("expected error invoking delete with nil id, but none received")
}
sts := status.Convert(err)
if sts == nil {
t.Fatalf("error is not a status.Status error")
}
if expected := codes.InvalidArgument; sts.Code() != expected {
t.Errorf("expected status code %s, provided code %s", expected, sts.Code())
}
} | explode_data.jsonl/36872 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 226
} | [
2830,
3393,
57418,
19064,
764,
1155,
353,
8840,
836,
8,
341,
197,
322,
30893,
198,
57279,
1669,
609,
1358,
2271,
25243,
515,
197,
96672,
2077,
25,
2036,
90,
15495,
1465,
335,
90,
7747,
25,
2092,
1583,
197,
532,
1903,
18920,
1669,
6330... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestReplacePE32(t *testing.T) {
f := parseImage(t)
// Apply the visitor.
replace := &ReplacePE32{
Predicate: func(f *uefi.File, name string) bool {
return f.Header.UUID == *testGUID
},
NewPE32: []byte("banana"),
}
if err := replace.Run(f); err != nil {
t.Fatal(err)
}
// We expect one match.
if len(replace.Matches) != 1 {
t.Fatalf("got %d matches; expected 1", len(replace.Matches))
}
// Find the section and make sure it contains the expected data.
results := find(t, f, testGUID)
if len(results) != 1 {
t.Fatalf("got %d matches; expected 1", len(results))
}
want := []byte{0x0a, 0x00, 0x00, byte(uefi.SectionTypePE32), 'b', 'a', 'n', 'a', 'n', 'a'}
got := results[0].Sections[0].Buf()
if !reflect.DeepEqual(want, got) {
t.Fatalf("want %v; got %v", want, got)
}
} | explode_data.jsonl/78103 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 339
} | [
2830,
3393,
23107,
1740,
18,
17,
1155,
353,
8840,
836,
8,
341,
1166,
1669,
4715,
1906,
1155,
692,
197,
322,
20552,
279,
20181,
624,
197,
8156,
1669,
609,
23107,
1740,
18,
17,
515,
197,
10025,
16874,
25,
2915,
955,
353,
361,
9983,
85... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMSSQLQueryInit(t *testing.T) {
MSSQLQuery, ok := NewSqlserverSQLQuery(sMSSQLQuery)
if ok != nil {
t.Fail()
}
MSSQLQuery.Init()
} | explode_data.jsonl/22806 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 65
} | [
2830,
3393,
44,
1220,
3588,
2859,
3803,
1155,
353,
8840,
836,
8,
341,
9209,
1220,
3588,
2859,
11,
5394,
1669,
1532,
8269,
4030,
6688,
2859,
1141,
44,
1220,
3588,
2859,
340,
743,
5394,
961,
2092,
341,
197,
3244,
57243,
741,
197,
630,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestDecoder(t *testing.T) {
for _, p := range pairs {
decoder := NewDecoder(StdEncoding, strings.NewReader(p.encoded))
dbuf := make([]byte, StdEncoding.DecodedLen(len(p.encoded)))
count, err := decoder.Read(dbuf)
if err != nil && err != io.EOF {
t.Fatal("Read failed", err)
}
testEqual(t, "Read from %q = length %v, want %v", p.encoded, count, len(p.decoded))
testEqual(t, "Decoding of %q = %q, want %q", p.encoded, string(dbuf[0:count]), p.decoded)
if err != io.EOF {
count, err = decoder.Read(dbuf)
}
testEqual(t, "Read from %q = %v, want %v", p.encoded, err, io.EOF)
}
} | explode_data.jsonl/35053 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 271
} | [
2830,
3393,
20732,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
281,
1669,
2088,
13530,
341,
197,
197,
48110,
1669,
1532,
20732,
7,
22748,
14690,
11,
9069,
68587,
1295,
13,
19329,
1171,
197,
20939,
1704,
1669,
1281,
10556,
3782,
11,
42517,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func Test_Compile(t *testing.T) {
if _, err := os.Stat(*cm); os.IsNotExist(err) {
t.Fatalf("Invalid cm path %q err: %s", *cm, err)
}
} | explode_data.jsonl/4024 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 64
} | [
2830,
3393,
16946,
12192,
1155,
353,
8840,
836,
8,
341,
743,
8358,
1848,
1669,
2643,
53419,
4071,
6226,
1215,
2643,
4506,
45535,
3964,
8,
341,
197,
3244,
30762,
445,
7928,
9961,
1815,
1018,
80,
1848,
25,
1018,
82,
497,
353,
6226,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestAddAppVersion(t *testing.T) {
assert := assert.New(t)
db := model.AddAppVersion(1, "3.69")
if assert.NotNil(db) {
assert.Equal(nil, db.Error, "they should be equal")
}
} | explode_data.jsonl/53341 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 78
} | [
2830,
3393,
2212,
2164,
5637,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
340,
20939,
1669,
1614,
1904,
2164,
5637,
7,
16,
11,
330,
18,
13,
21,
24,
5130,
743,
2060,
93882,
9791,
8,
341,
197,
6948,
12808,
27907,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestMoveBatchChange(t *testing.T) {
if testing.Short() {
t.Skip()
}
ctx := context.Background()
db := dbtesting.GetDB(t)
user := ct.CreateTestUser(t, db, true)
userID := user.ID
orgName := "move-batch-change-test"
orgID := ct.InsertTestOrg(t, db, orgName)
cstore := store.New(db)
batchSpec := &batches.BatchSpec{
RawSpec: ct.TestRawBatchSpec,
UserID: userID,
NamespaceUserID: userID,
}
if err := cstore.CreateBatchSpec(ctx, batchSpec); err != nil {
t.Fatal(err)
}
batchChange := &batches.BatchChange{
BatchSpecID: batchSpec.ID,
Name: "old-name",
InitialApplierID: userID,
LastApplierID: userID,
LastAppliedAt: time.Now(),
NamespaceUserID: batchSpec.UserID,
}
if err := cstore.CreateBatchChange(ctx, batchChange); err != nil {
t.Fatal(err)
}
r := &Resolver{store: cstore}
s, err := graphqlbackend.NewSchema(db, r, nil, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}
// Move to a new name
batchChangeAPIID := string(marshalBatchChangeID(batchChange.ID))
newBatchChagneName := "new-name"
input := map[string]interface{}{
"batchChange": batchChangeAPIID,
"newName": newBatchChagneName,
}
var response struct{ MoveBatchChange apitest.BatchChange }
actorCtx := actor.WithActor(ctx, actor.FromUser(userID))
apitest.MustExec(actorCtx, t, s, input, &response, mutationMoveBatchChange)
haveBatchChange := response.MoveBatchChange
if diff := cmp.Diff(input["newName"], haveBatchChange.Name); diff != "" {
t.Fatalf("unexpected name (-want +got):\n%s", diff)
}
wantURL := fmt.Sprintf("/users/%s/batch-changes/%s", user.Username, newBatchChagneName)
if diff := cmp.Diff(wantURL, haveBatchChange.URL); diff != "" {
t.Fatalf("unexpected URL (-want +got):\n%s", diff)
}
// Move to a new namespace
orgAPIID := graphqlbackend.MarshalOrgID(orgID)
input = map[string]interface{}{
"batchChange": string(marshalBatchChangeID(batchChange.ID)),
"newNamespace": orgAPIID,
}
apitest.MustExec(actorCtx, t, s, input, &response, mutationMoveBatchChange)
haveBatchChange = response.MoveBatchChange
if diff := cmp.Diff(string(orgAPIID), haveBatchChange.Namespace.ID); diff != "" {
t.Fatalf("unexpected namespace (-want +got):\n%s", diff)
}
wantURL = fmt.Sprintf("/organizations/%s/batch-changes/%s", orgName, newBatchChagneName)
if diff := cmp.Diff(wantURL, haveBatchChange.URL); diff != "" {
t.Fatalf("unexpected URL (-want +got):\n%s", diff)
}
} | explode_data.jsonl/13590 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 996
} | [
2830,
3393,
9860,
21074,
4072,
1155,
353,
8840,
836,
8,
341,
743,
7497,
55958,
368,
341,
197,
3244,
57776,
741,
197,
630,
20985,
1669,
2266,
19047,
741,
20939,
1669,
2927,
8840,
2234,
3506,
1155,
692,
19060,
1669,
20251,
7251,
2271,
147... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func TestProtocol_HandleConsignmentTransfer(t *testing.T) {
require := require.New(t)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
tests := []struct {
bucketOwner string
blkHeight uint64
to address.Address
// consignment fields
nilPayload bool
consignType string
reclaim string
wrongSig bool
sigIndex uint64
sigNonce uint64
status iotextypes.ReceiptStatus
}{
// case I: p.hu.IsPre(config.Greenland, blkCtx.BlockHeight)
{
identityset.PrivateKey(2).HexString(),
1,
identityset.Address(3),
false,
"Ethereum",
_reclaim,
false,
0,
1,
iotextypes.ReceiptStatus_ErrUnauthorizedOperator,
},
// case II: len(act.Payload()) == 0
{
identityset.PrivateKey(2).HexString(),
5553821,
identityset.Address(3),
true,
"Ethereum",
_reclaim,
false,
0,
1,
iotextypes.ReceiptStatus_ErrUnauthorizedOperator,
},
// case III: type is not Ethereum
{
identityset.PrivateKey(2).HexString(),
5553821,
identityset.Address(3),
false,
"xx",
_reclaim,
false,
0,
1,
iotextypes.ReceiptStatus_ErrUnauthorizedOperator,
},
// case IV: msg.Reclaim != _reclaim
{
identityset.PrivateKey(2).HexString(),
5553821,
identityset.Address(3),
false,
"Ethereum",
"wrong reclaim",
false,
0,
1,
iotextypes.ReceiptStatus_ErrUnauthorizedOperator,
},
// case V: RecoverPubkeyFromEccSig error
{
identityset.PrivateKey(2).HexString(),
5553821,
identityset.Address(3),
false,
"Ethereum",
_reclaim,
true,
0,
1,
iotextypes.ReceiptStatus_ErrUnauthorizedOperator,
},
// case VI: transferor is not bucket.Owner
{
identityset.PrivateKey(31).HexString(),
5553821,
identityset.Address(1),
false,
"Ethereum",
_reclaim,
false,
0,
1,
iotextypes.ReceiptStatus_ErrUnauthorizedOperator,
},
// case VII: transferee is not actCtx.Caller
{
identityset.PrivateKey(32).HexString(),
5553821,
identityset.Address(3),
false,
"Ethereum",
_reclaim,
false,
0,
1,
iotextypes.ReceiptStatus_ErrUnauthorizedOperator,
},
// case VIII: signed asset id is not equal to bucket.Index
{
identityset.PrivateKey(32).HexString(),
5553821,
identityset.Address(1),
false,
"Ethereum",
_reclaim,
false,
1,
1,
iotextypes.ReceiptStatus_ErrUnauthorizedOperator,
},
// case IX: transfereeNonce is not equal to actCtx.Nonce
{
identityset.PrivateKey(32).HexString(),
5553821,
identityset.Address(1),
false,
"Ethereum",
_reclaim,
false,
0,
2,
iotextypes.ReceiptStatus_ErrUnauthorizedOperator,
},
// case X: success
{
identityset.PrivateKey(32).HexString(),
5553821,
identityset.Address(1),
false,
"Ethereum",
_reclaim,
false,
0,
1,
iotextypes.ReceiptStatus_Success,
},
}
for _, test := range tests {
sm, p, cand1, cand2, cc := initAll(t, ctrl)
caller := identityset.Address(1)
initBalance := int64(1000)
require.NoError(setupAccount(sm, caller, initBalance))
stakeAmount := "100000000000000000000"
gasPrice := big.NewInt(unit.Qev)
gasLimit := uint64(10000)
initCreateStake(t, sm, identityset.Address(32), initBalance, gasPrice, gasLimit, 1, test.blkHeight, time.Now(), gasLimit, p, cand2, stakeAmount, false)
initCreateStake(t, sm, identityset.Address(31), initBalance, gasPrice, gasLimit, 1, test.blkHeight, time.Now(), gasLimit, p, cand1, stakeAmount, false)
// transfer to test.to through consignment
var consign []byte
if !test.nilPayload {
consign = newconsignment(require, int(test.sigIndex), int(test.sigNonce), test.bucketOwner, test.to.String(), test.consignType, test.reclaim, test.wrongSig)
}
act, err := action.NewTransferStake(1, caller.String(), 0, consign, gasLimit, gasPrice)
require.NoError(err)
intrinsic, err := act.IntrinsicGas()
require.NoError(err)
ctx := protocol.WithActionCtx(context.Background(), protocol.ActionCtx{
Caller: caller,
GasPrice: gasPrice,
IntrinsicGas: intrinsic,
Nonce: 1,
})
ctx = protocol.WithBlockCtx(ctx, protocol.BlockCtx{
BlockHeight: test.blkHeight,
BlockTimeStamp: time.Now(),
GasLimit: gasLimit,
})
r, err := p.Handle(ctx, act, sm)
require.NoError(err)
if r != nil {
require.Equal(uint64(test.status), r.Status)
} else {
require.Equal(test.status, iotextypes.ReceiptStatus_Failure)
}
if test.status == iotextypes.ReceiptStatus_Success {
// test bucket index and bucket
bucketIndices, err := getCandBucketIndices(sm, cand2.Owner)
require.NoError(err)
require.Equal(1, len(*bucketIndices))
bucketIndices, err = getVoterBucketIndices(sm, test.to)
require.NoError(err)
require.Equal(1, len(*bucketIndices))
indices := *bucketIndices
bucket, err := getBucket(sm, indices[0])
require.NoError(err)
require.Equal(cand2.Owner, bucket.Candidate)
require.Equal(test.to.String(), bucket.Owner.String())
require.Equal(stakeAmount, bucket.StakedAmount.String())
// test candidate
candidate, err := getCandidate(sm, cand1.Owner)
require.NoError(err)
require.LessOrEqual(uint64(0), candidate.Votes.Uint64())
csm, err := NewCandidateStateManager(sm, cc)
require.NoError(err)
candidate = csm.GetByOwner(cand1.Owner)
require.NotNil(candidate)
require.LessOrEqual(uint64(0), candidate.Votes.Uint64())
require.Equal(cand1.Name, candidate.Name)
require.Equal(cand1.Operator, candidate.Operator)
require.Equal(cand1.Reward, candidate.Reward)
require.Equal(cand1.Owner, candidate.Owner)
require.LessOrEqual(uint64(0), candidate.Votes.Uint64())
require.LessOrEqual(uint64(0), candidate.SelfStake.Uint64())
// test staker's account
caller, err := accountutil.LoadAccount(sm, hash.BytesToHash160(caller.Bytes()))
require.NoError(err)
actCost, err := act.Cost()
require.NoError(err)
require.Equal(uint64(1), caller.Nonce)
total := big.NewInt(0)
require.Equal(unit.ConvertIotxToRau(initBalance), total.Add(total, caller.Balance).Add(total, actCost))
}
}
} | explode_data.jsonl/64536 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2707
} | [
2830,
3393,
20689,
42714,
15220,
3531,
21970,
1155,
353,
8840,
836,
8,
341,
17957,
1669,
1373,
7121,
1155,
340,
84381,
1669,
342,
316,
1176,
7121,
2051,
1155,
340,
16867,
23743,
991,
18176,
2822,
78216,
1669,
3056,
1235,
341,
197,
2233,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestDataDestinations(t *testing.T) {
ctx := NewContext(nil)
Convey("Given an empty data destination", t, func() {
dsts := newDataDestinations(NTBox, "test_component")
t := &Tuple{
InputName: "test_component",
Data: data.Map{
"v": data.Int(1),
},
}
Convey("When sending a tuple", func() {
var err error
So(func() {
err = dsts.Write(ctx, t)
}, ShouldNotPanic)
Convey("Then it shouldn't fail", func() {
So(err, ShouldBeNil)
})
})
Convey("When getting nodeType it should be NTBox", func() {
So(dsts.nodeType, ShouldEqual, NTBox)
})
})
Convey("Given data destinations", t, func() {
dsts := newDataDestinations(NTBox, "test_component")
recvs := make([]*pipeReceiver, 2)
for i := range recvs {
r, s := newPipe(fmt.Sprint("test", i+1), 1)
recvs[i] = r
dsts.add(fmt.Sprint("test_node_", i+1), s)
}
t := &Tuple{
InputName: "test_component",
Data: data.Map{
"v": data.Int(1),
},
}
Convey("When sending a tuple", func() {
So(dsts.Write(ctx, t), ShouldBeNil)
Convey("Then all destinations should receive it", func() {
t1, ok := <-recvs[0].in
So(ok, ShouldBeTrue)
t2, ok := <-recvs[1].in
So(ok, ShouldBeTrue)
Convey("And tuples should have the correct input name", func() {
So(t1.InputName, ShouldEqual, "test1")
So(t2.InputName, ShouldEqual, "test2")
})
})
})
Convey("When sending closing the destinations after sending a tuple", func() {
So(dsts.Write(ctx, t), ShouldBeNil)
So(dsts.Close(ctx), ShouldBeNil)
Convey("Then all receiver should receive a closing signal after the tuple", func() {
for _, r := range recvs {
_, ok := <-r.in
So(ok, ShouldBeTrue)
_, ok = <-r.in
So(ok, ShouldBeFalse)
}
})
})
Convey("When one destination is closed by the receiver side", func() {
recvs[0].close()
drainReceiver(recvs[0])
Reset(func() {
dsts.Close(ctx)
})
Convey("Then the destination receiver should eventually be removed", func() {
go func() {
for _ = range recvs[1].in {
}
}()
for {
if !dsts.has("test_node_1") {
break
}
dsts.Write(ctx, t)
}
_, ok := <-recvs[0].in
So(ok, ShouldBeFalse)
})
})
Convey("When adding a new destination after sending a tuple", func() {
for _, r := range recvs {
r := r
go func() {
for _ = range r.in {
}
}()
}
So(dsts.Write(ctx, t), ShouldBeNil)
r, s := newPipe("test3", 1)
So(dsts.add("test_node_3", s), ShouldBeNil)
Reset(func() {
dsts.Close(ctx)
})
Convey("Then the new receiver shouldn't receive the first tuple", func() {
recved := false
select {
case <-r.in:
recved = true
default:
}
So(recved, ShouldBeFalse)
})
Convey("Then the new receiver should receive a new tuple", func() {
So(dsts.Write(ctx, t), ShouldBeNil)
_, ok := <-r.in
So(ok, ShouldBeTrue)
})
})
Convey("When adding a destination with the duplicated name", func() {
_, s := newPipe("hoge", 1)
err := dsts.add("test_node_1", s)
Convey("Then it should fail", func() {
So(err, ShouldNotBeNil)
})
})
Convey("When removing a destination", func() {
dsts.remove("test_node_1")
Convey("Then the destination should be closed", func() {
_, ok := <-recvs[0].in
So(ok, ShouldBeFalse)
})
Convey("Then Write should work", func() {
So(dsts.Write(ctx, t), ShouldBeNil)
_, ok := <-recvs[1].in
So(ok, ShouldBeTrue)
})
})
Convey("When removing a destination after sending a tuple", func() {
go func() {
for _ = range recvs[1].in {
}
}()
Reset(func() {
dsts.Close(ctx)
})
So(dsts.Write(ctx, t), ShouldBeNil)
dsts.remove("test_node_1")
Convey("Then the destination should be able to receive the tuple", func() {
_, ok := <-recvs[0].in
So(ok, ShouldBeTrue)
_, ok = <-recvs[0].in
So(ok, ShouldBeFalse)
})
})
Convey("When removing a nonexistent destination", func() {
Convey("Then it shouldn't panic", func() {
So(func() {
dsts.remove("test_node_100")
}, ShouldNotPanic)
})
})
Convey("When pausing", func() {
dsts.pause()
ch := make(chan error)
go func() {
ch <- fmt.Errorf("dummy error")
ch <- dsts.Write(ctx, t)
}()
<-ch
Convey("Then the write should be blocked", func() {
Reset(func() {
dsts.resume()
<-ch
})
blocked := true
select {
case <-ch:
blocked = false
default:
}
So(blocked, ShouldBeTrue)
})
Convey("Then resume method unblocks the write", func() {
dsts.resume()
So(<-ch, ShouldBeNil)
})
})
})
} | explode_data.jsonl/38417 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2150
} | [
2830,
93200,
34830,
12634,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
1532,
1972,
27907,
692,
93070,
5617,
445,
22043,
458,
4287,
821,
9106,
497,
259,
11,
2915,
368,
341,
197,
2698,
36279,
1669,
51523,
34830,
12634,
7,
6408,
1611,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestErrors(t *testing.T) {
// None of these tests should make it to calling the service.
res := &Resolver{Client: fakeClient{}}
tests := []struct {
req *linkpb.LinkRequest
want codes.Code
}{
// Various missing parameters should report INVALID_ARGUMENT.
{new(linkpb.LinkRequest), codes.InvalidArgument},
{&linkpb.LinkRequest{
Identifier: "foo",
Include: []*linkpb.LinkRequest_Location{{
Path: "(", // bogus regexp
}},
}, codes.InvalidArgument},
{&linkpb.LinkRequest{
Identifier: "foo",
Exclude: []*linkpb.LinkRequest_Location{{
Root: "?", // bogus regexp
}},
}, codes.InvalidArgument},
}
ctx := context.Background()
for _, test := range tests {
_, err := res.Resolve(ctx, test.req)
got := status.Code(err)
if got != test.want {
t.Errorf("Resolve %+v: got code %v, want %v [%v]", test.req, got, test.want, err)
}
}
} | explode_data.jsonl/8537 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 348
} | [
2830,
3393,
13877,
1155,
353,
8840,
836,
8,
341,
197,
322,
2240,
315,
1493,
7032,
1265,
1281,
432,
311,
8098,
279,
2473,
624,
10202,
1669,
609,
18190,
90,
2959,
25,
12418,
2959,
6257,
532,
78216,
1669,
3056,
1235,
341,
197,
24395,
220... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestGetHostname(t *testing.T) {
// Reset memory counters
helpers.ResetMemoryStats()
code := fmt.Sprintf(`
with open(r'%s', 'w') as f:
name = datadog_agent.get_hostname()
if sys.version_info.major == 2:
assert type(name) == type(b"")
else:
assert type(name) == type(u"")
f.write(name)
`, tmpfile.Name())
out, err := run(code)
if err != nil {
t.Fatal(err)
}
if out != "localfoobar" {
t.Errorf("Unexpected printed value: '%s'", out)
}
// Check for leaks
helpers.AssertMemoryUsage(t)
} | explode_data.jsonl/24542 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 222
} | [
2830,
3393,
1949,
88839,
1155,
353,
8840,
836,
8,
341,
197,
322,
16932,
4938,
31532,
198,
197,
21723,
36660,
10642,
16635,
2822,
43343,
1669,
8879,
17305,
61528,
46948,
1787,
2601,
36165,
82,
516,
364,
86,
863,
438,
282,
510,
197,
11609... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestVTGateExecuteBatchKeyspaceIds(t *testing.T) {
ks := "TestVTGateExecuteBatchKeyspaceIds"
shard1 := "-20"
shard2 := "20-40"
createSandbox(ks)
hcVTGateTest.Reset()
sbc1 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil)
hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil)
kid10 := []byte{0x10}
kid30 := []byte{0x30}
qrl, err := rpcVTGate.ExecuteBatchKeyspaceIds(context.Background(),
[]*vtgatepb.BoundKeyspaceIdQuery{{
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: ks,
KeyspaceIds: [][]byte{kid10, kid30},
}, {
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: ks,
KeyspaceIds: [][]byte{kid10, kid30},
}},
topodatapb.TabletType_MASTER,
false,
nil,
executeOptions)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
if len(qrl) != 2 {
t.Errorf("want 2, got %v", len(qrl))
}
if qrl[0].RowsAffected != 2 {
t.Errorf("want 2, got %v", qrl[0].RowsAffected)
}
if !proto.Equal(sbc1.Options[0], executeOptions) {
t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc1.Options[0], executeOptions)
}
session, err := rpcVTGate.Begin(context.Background(), false)
rpcVTGate.ExecuteBatchKeyspaceIds(context.Background(),
[]*vtgatepb.BoundKeyspaceIdQuery{{
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: ks,
KeyspaceIds: [][]byte{kid10, kid30},
}, {
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: ks,
KeyspaceIds: [][]byte{kid10, kid30},
}},
topodatapb.TabletType_MASTER,
false,
session,
nil)
if len(session.ShardSessions) != 2 {
t.Errorf("want 2, got %d", len(session.ShardSessions))
}
} | explode_data.jsonl/7836 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 930
} | [
2830,
3393,
20457,
42318,
17174,
21074,
8850,
1306,
12701,
1155,
353,
8840,
836,
8,
341,
197,
2787,
1669,
330,
2271,
20457,
42318,
17174,
21074,
8850,
1306,
12701,
698,
36196,
567,
16,
1669,
6523,
17,
15,
698,
36196,
567,
17,
1669,
330,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.