text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func TestRequiresRegenerationServiceUIDMismatchBetaAnnotation(t *testing.T) {
tests := []struct {
name string
primeServices func(cache.Indexer)
secret *v1.Secret
expected bool
}{
{
name: "no service annotation",
primeServices: func(serviceCache cache.Indexer) {},
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1", Name: "mysecret",
Annotations: map[string]string{},
},
},
expected: false,
},
{
name: "missing service",
primeServices: func(serviceCache cache.Indexer) {},
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1", Name: "mysecret",
Annotations: map[string]string{
api.ServiceNameAnnotation: "foo",
},
},
},
expected: false,
},
{
name: "service-uid-mismatch",
primeServices: func(serviceCache cache.Indexer) {
serviceCache.Add(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Name: "foo", UID: types.UID("uid-2"), Annotations: map[string]string{api.ServingCertSecretAnnotation: "mysecret"}},
})
},
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1", Name: "mysecret",
Annotations: map[string]string{
api.ServiceNameAnnotation: "foo",
api.ServiceUIDAnnotation: "uid-1",
},
OwnerReferences: []metav1.OwnerReference{ownerRef(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("uid-2")}})},
},
},
expected: false,
},
{
name: "service secret name mismatch",
primeServices: func(serviceCache cache.Indexer) {
serviceCache.Add(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Name: "foo", UID: types.UID("uid-1"), Annotations: map[string]string{api.ServingCertSecretAnnotation: "mysecret2"}},
})
},
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1", Name: "mysecret",
Annotations: map[string]string{
api.ServiceNameAnnotation: "foo",
api.ServiceUIDAnnotation: "uid-1",
},
OwnerReferences: []metav1.OwnerReference{ownerRef(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("uid-1")}})},
},
},
expected: false,
},
{
name: "no expiry",
primeServices: func(serviceCache cache.Indexer) {
serviceCache.Add(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Name: "foo", UID: types.UID("uid-1"), Annotations: map[string]string{api.ServingCertSecretAnnotation: "mysecret"}},
})
},
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1", Name: "mysecret",
Annotations: map[string]string{
api.ServiceNameAnnotation: "foo",
api.ServiceUIDAnnotation: "uid-1",
},
OwnerReferences: []metav1.OwnerReference{ownerRef(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("uid-1")}})},
},
},
expected: true,
},
{
name: "bad expiry",
primeServices: func(serviceCache cache.Indexer) {
serviceCache.Add(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Name: "foo", UID: types.UID("uid-1"), Annotations: map[string]string{api.ServingCertSecretAnnotation: "mysecret"}},
})
},
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1", Name: "mysecret",
Annotations: map[string]string{
api.ServiceNameAnnotation: "foo",
api.ServiceUIDAnnotation: "uid-1",
api.ServingCertExpiryAnnotation: "bad-format",
},
OwnerReferences: []metav1.OwnerReference{ownerRef(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("uid-1")}})},
},
},
expected: true,
},
{
name: "expired expiry",
primeServices: func(serviceCache cache.Indexer) {
serviceCache.Add(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Name: "foo", UID: types.UID("uid-1"), Annotations: map[string]string{api.ServingCertSecretAnnotation: "mysecret"}},
})
},
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1", Name: "mysecret",
Annotations: map[string]string{
api.ServiceNameAnnotation: "foo",
api.ServiceUIDAnnotation: "uid-1",
api.ServingCertExpiryAnnotation: time.Now().Add(-30 * time.Minute).Format(time.RFC3339),
},
OwnerReferences: []metav1.OwnerReference{ownerRef(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("uid-1")}})},
},
},
expected: true,
},
{
name: "distant expiry",
primeServices: func(serviceCache cache.Indexer) {
serviceCache.Add(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Name: "foo", UID: types.UID("uid-1"), Annotations: map[string]string{api.ServingCertSecretAnnotation: "mysecret"}},
})
},
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1", Name: "mysecret",
Annotations: map[string]string{
api.ServiceNameAnnotation: "foo",
api.ServiceUIDAnnotation: "uid-1",
api.ServingCertExpiryAnnotation: time.Now().Add(10 * time.Minute).Format(time.RFC3339),
},
OwnerReferences: []metav1.OwnerReference{ownerRef(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("uid-1")}})},
},
},
expected: false,
},
{
name: "missing ownerref",
primeServices: func(serviceCache cache.Indexer) {
serviceCache.Add(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Name: "foo", UID: types.UID("uid-1"), Annotations: map[string]string{api.ServingCertSecretAnnotation: "mysecret"}},
})
},
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1", Name: "mysecret",
Annotations: map[string]string{
api.ServiceNameAnnotation: "foo",
api.ServiceUIDAnnotation: "uid-1",
api.ServingCertExpiryAnnotation: time.Now().Add(10 * time.Minute).Format(time.RFC3339),
},
OwnerReferences: []metav1.OwnerReference{ownerRef(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("uid-2")}})},
},
},
expected: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
index := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
c := &serviceServingCertUpdateController{
serviceLister: listers.NewServiceLister(index),
}
tc.primeServices(index)
actual, service := c.requiresRegeneration(tc.secret)
if tc.expected != actual {
t.Errorf("%s: expected %v, got %v", tc.name, tc.expected, actual)
}
if service == nil && tc.expected {
t.Errorf("%s: should have returned service", tc.name)
}
})
}
} | explode_data.jsonl/76916 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2892
} | [
2830,
3393,
46961,
3477,
17252,
1860,
6463,
82572,
64811,
19711,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
688,
914,
198,
197,
25653,
545,
11025,
2915,
31933,
18338,
261,
340,
197,
197,
20474,
286,
353,
85,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLogs_nologs_v1beta1(t *testing.T) {
var (
pipelineName = "nologs-pipeline"
prName = "nologs-run"
ns = "namespace"
taskName = "nologs-task"
)
nsList := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: ns,
},
},
}
prs := []*v1beta1.PipelineRun{
{
ObjectMeta: metav1.ObjectMeta{
Name: prName,
Namespace: ns,
Labels: map[string]string{"tekton.dev/pipeline": prName},
},
Spec: v1beta1.PipelineRunSpec{
PipelineRef: &v1beta1.PipelineRef{
Name: pipelineName,
},
},
Status: v1beta1.PipelineRunStatus{
Status: duckv1beta1.Status{
Conditions: duckv1beta1.Conditions{
{
Status: corev1.ConditionUnknown,
Message: "Running",
},
},
},
},
},
}
ps := []*v1beta1.Pipeline{
{
ObjectMeta: metav1.ObjectMeta{
Name: pipelineName,
Namespace: ns,
},
Spec: v1beta1.PipelineSpec{
Tasks: []v1beta1.PipelineTask{
{
Name: taskName,
TaskRef: &v1beta1.TaskRef{
Name: taskName,
},
},
},
},
},
}
cs, _ := test.SeedV1beta1TestData(t, pipelinev1beta1test.Data{PipelineRuns: prs, Pipelines: ps, Namespaces: nsList})
cs.Pipeline.Resources = cb.APIResourceList(versionB1, []string{"pipeline", "pipelinerun"})
tdc := testDynamic.Options{}
dc, err := tdc.Client(
cb.UnstructuredV1beta1P(ps[0], versionB1),
cb.UnstructuredV1beta1PR(prs[0], versionB1),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
prlo := logOptsv1beta1(prName, ns, cs, dc, fake.Streamer([]fake.Log{}), false, false)
output, err := fetchLogs(prlo)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
test.AssertOutput(t, "PipelineRun is still running: Running\n", output)
} | explode_data.jsonl/14871 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 899
} | [
2830,
3393,
51053,
1089,
1609,
82,
2273,
16,
19127,
16,
1155,
353,
8840,
836,
8,
341,
2405,
2399,
197,
3223,
8790,
675,
284,
330,
77,
1609,
82,
2268,
8790,
698,
197,
25653,
675,
981,
284,
330,
77,
1609,
82,
22973,
698,
197,
84041,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestNodeStatusWithCloudProviderNodeIP(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.hostname = testKubeletHostname
existingNode := v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Annotations: make(map[string]string)},
Spec: v1.NodeSpec{},
}
// TODO : is it possible to mock validateNodeIP() to avoid relying on the host interface addresses ?
addrs, err := net.InterfaceAddrs()
assert.NoError(t, err)
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip != nil && !ip.IsLoopback() && ip.To4() != nil {
kubelet.nodeIP = ip
break
}
}
assert.NotNil(t, kubelet.nodeIP)
fakeCloud := &fakecloud.FakeCloud{
Addresses: []v1.NodeAddress{
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
{
Type: v1.NodeExternalIP,
Address: kubelet.nodeIP.String(),
},
{
Type: v1.NodeInternalIP,
Address: "132.143.154.164",
},
{
Type: v1.NodeInternalIP,
Address: kubelet.nodeIP.String(),
},
{
Type: v1.NodeInternalIP,
Address: "132.143.154.165",
},
{
Type: v1.NodeHostName,
Address: testKubeletHostname,
},
},
Err: nil,
}
kubelet.cloud = fakeCloud
kubelet.setNodeAddress(&existingNode)
expectedAddresses := []v1.NodeAddress{
{
Type: v1.NodeExternalIP,
Address: kubelet.nodeIP.String(),
},
{
Type: v1.NodeInternalIP,
Address: kubelet.nodeIP.String(),
},
{
Type: v1.NodeHostName,
Address: testKubeletHostname,
},
}
assert.True(t, apiequality.Semantic.DeepEqual(expectedAddresses, existingNode.Status.Addresses), "%s", diff.ObjectDiff(expectedAddresses, existingNode.Status.Addresses))
} | explode_data.jsonl/69714 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 854
} | [
2830,
3393,
1955,
2522,
2354,
16055,
5179,
1955,
3298,
1155,
353,
8840,
836,
8,
341,
18185,
42,
3760,
1149,
1669,
501,
2271,
42,
3760,
1149,
1155,
11,
895,
1391,
6461,
30485,
89306,
5462,
639,
340,
16867,
1273,
42,
3760,
1149,
727,
60... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestStandardizeReferenceLink(t *testing.T) {
tests := []struct {
name string
storageRef string
want string
}{
{
name: "Only ID",
storageRef: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
want: "storage:aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
},
{
name: "storage:ID",
storageRef: "storage:aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
want: "storage:aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
},
{
name: "storage://ID",
storageRef: "storage://aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
want: "storage:aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
},
{
name: "storage:filename=dummy",
storageRef: "storage:filename=dummy",
want: "storage:filename=dummy",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := StandardizeReferenceLink(tt.storageRef); got != tt.want {
t.Errorf("StandardizeReferenceLink() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/79922 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 382
} | [
2830,
3393,
19781,
551,
8856,
3939,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
197,
16172,
3945,
914,
198,
197,
50780,
914,
198,
197,
59403,
197,
197,
515,
298,
11609,
25,
330,
7308,
3034,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGrant(t *testing.T) {
s := memStore.NewStore()
a := NewAuth(auth.Store(s))
res := &auth.Resource{Type: "service", Name: "Test", Endpoint: "Foo.Bar"}
if err := a.Grant("users.*", res); err != nil {
t.Fatalf("Grant returned an error: %v, expected nil", err)
}
recs, err := s.List()
if err != nil {
t.Fatalf("Could not read from the store: %v", err)
}
if len(recs) != 1 {
t.Errorf("Expected Grant to write 1 record, actually wrote %v", len(recs))
}
} | explode_data.jsonl/80450 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 187
} | [
2830,
3393,
67971,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
1833,
6093,
7121,
6093,
741,
11323,
1669,
1532,
5087,
27435,
38047,
1141,
4390,
10202,
1669,
609,
3242,
20766,
90,
929,
25,
330,
7936,
497,
3988,
25,
330,
2271,
497,
47269,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestLoadMultipleSecretsConfig(t *testing.T) {
portsCases := []struct {
name string
secretBase map[string]interface{}
secretOverride map[string]interface{}
expected []types.ServiceSecretConfig
}{
{
name: "no_override",
secretBase: map[string]interface{}{
"secrets": []interface{}{
"my_secret",
},
},
secretOverride: map[string]interface{}{},
expected: []types.ServiceSecretConfig{
{
Source: "my_secret",
},
},
},
{
name: "override_simple",
secretBase: map[string]interface{}{
"secrets": []interface{}{
"foo_secret",
},
},
secretOverride: map[string]interface{}{
"secrets": []interface{}{
"bar_secret",
},
},
expected: []types.ServiceSecretConfig{
{
Source: "bar_secret",
},
{
Source: "foo_secret",
},
},
},
{
name: "override_same_source",
secretBase: map[string]interface{}{
"secrets": []interface{}{
"foo_secret",
map[string]interface{}{
"source": "bar_secret",
"target": "waw_secret",
},
},
},
secretOverride: map[string]interface{}{
"secrets": []interface{}{
map[string]interface{}{
"source": "bar_secret",
"target": "bof_secret",
},
map[string]interface{}{
"source": "baz_secret",
"target": "waw_secret",
},
},
},
expected: []types.ServiceSecretConfig{
{
Source: "bar_secret",
Target: "bof_secret",
},
{
Source: "baz_secret",
Target: "waw_secret",
},
{
Source: "foo_secret",
},
},
},
}
for _, tc := range portsCases {
t.Run(tc.name, func(t *testing.T) {
configDetails := types.ConfigDetails{
ConfigFiles: []types.ConfigFile{
{
Filename: "base.yml",
Config: map[string]interface{}{
"services": map[string]interface{}{
"foo": tc.secretBase,
},
},
},
{
Filename: "override.yml",
Config: map[string]interface{}{
"services": map[string]interface{}{
"foo": tc.secretOverride,
},
},
},
},
}
config, err := loadTestProject(configDetails)
assert.NilError(t, err)
assert.DeepEqual(t, &types.Project{
Name: "",
WorkingDir: "",
Services: []types.ServiceConfig{
{
Name: "foo",
Secrets: tc.expected,
Environment: types.MappingWithEquals{},
Scale: 1,
},
},
Networks: types.Networks{},
Volumes: types.Volumes{},
Secrets: types.Secrets{},
Configs: types.Configs{},
Extensions: types.Extensions{},
}, config)
})
}
} | explode_data.jsonl/59631 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1324
} | [
2830,
3393,
5879,
32089,
19773,
82,
2648,
1155,
353,
8840,
836,
8,
341,
197,
3394,
37302,
1669,
3056,
1235,
341,
197,
11609,
1843,
914,
198,
197,
197,
20474,
3978,
257,
2415,
14032,
31344,
16094,
197,
197,
20474,
2177,
2415,
14032,
3134... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestParse_CreditInvoiceNotification(t *testing.T) {
result := MustParseFile("testdata/credit_invoice_notification.xml")
if n, ok := result.(*webhooks.CreditInvoiceNotification); !ok {
t.Fatalf("unexpected type: %T, result", n)
} else if diff := cmp.Diff(n, &webhooks.CreditInvoiceNotification{
Type: webhooks.NewCreditInvoice,
Account: webhooks.Account{
XMLName: xml.Name{Local: "account"},
Code: "1234",
},
Invoice: webhooks.CreditInvoice{
XMLName: xml.Name{Local: "invoice"},
UUID: "42fb74de65e9395eb004614144a7b91f",
State: recurly.CreditInvoiceStateClosed,
Origin: recurly.CreditInvoiceOriginWriteOff,
SubscriptionUUIDs: []string{"42fb74ba9efe4c6981c2064436a4e9cd"},
InvoiceNumber: 2404,
Currency: "USD",
BalanceInCents: 0,
TotalInCents: -4882,
TaxInCents: -382,
SubtotalInCents: -4500,
SubTotalBeforeDiscountInCents: -5000,
DiscountInCents: -500,
CreatedAt: recurly.NewTime(MustParseTime("2018-02-13T00:56:22Z")),
UpdatedAt: recurly.NewTime(MustParseTime("2018-02-13T00:56:22Z")),
ClosedAt: recurly.NewTime(MustParseTime("2018-02-13T00:56:22Z")),
},
}); diff != "" {
t.Fatal(diff)
}
} | explode_data.jsonl/76100 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 801
} | [
2830,
3393,
14463,
920,
10827,
34674,
11196,
1155,
353,
8840,
836,
8,
341,
9559,
1669,
15465,
14463,
1703,
445,
92425,
14,
23311,
39225,
34296,
9028,
1138,
743,
308,
11,
5394,
1669,
1102,
41399,
2911,
38560,
727,
10827,
34674,
11196,
1215... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestOSAP(t *testing.T) {
rwc := openTPMOrSkip(t)
defer rwc.Close()
// Try to run OSAP for the SRK.
osapc := &osapCommand{
EntityType: etSRK,
EntityValue: khSRK,
}
if _, err := rand.Read(osapc.OddOSAP[:]); err != nil {
t.Fatal("Couldn't get a random odd OSAP nonce")
}
_, err := osap(rwc, osapc)
if err != nil {
t.Fatal("Couldn't run OSAP:", err)
}
} | explode_data.jsonl/75346 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 178
} | [
2830,
3393,
3126,
2537,
1155,
353,
8840,
836,
8,
341,
7000,
24028,
1669,
1787,
4239,
44,
2195,
35134,
1155,
340,
16867,
435,
24028,
10421,
2822,
197,
322,
9735,
311,
1598,
10085,
2537,
369,
279,
20880,
42,
624,
25078,
391,
66,
1669,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestActivityService_DeleteRepositorySubscription(t *testing.T) {
client, mux, _, teardown := setup()
defer teardown()
mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, "DELETE")
w.WriteHeader(http.StatusNoContent)
})
ctx := context.Background()
_, err := client.Activity.DeleteRepositorySubscription(ctx, "o", "r")
if err != nil {
t.Errorf("Activity.DeleteRepositorySubscription returned error: %v", err)
}
const methodName = "DeleteRepositorySubscription"
testBadOptions(t, methodName, func() (err error) {
_, err = client.Activity.DeleteRepositorySubscription(ctx, "\n", "\n")
return err
})
testNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) {
return client.Activity.DeleteRepositorySubscription(ctx, "o", "r")
})
} | explode_data.jsonl/50054 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 288
} | [
2830,
3393,
4052,
1860,
57418,
4624,
33402,
1155,
353,
8840,
836,
8,
341,
25291,
11,
59807,
11,
8358,
49304,
1669,
6505,
741,
16867,
49304,
2822,
2109,
2200,
63623,
4283,
68354,
20271,
7382,
14,
34404,
497,
2915,
3622,
1758,
37508,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestProxy(t *testing.T) {
dynatraceServer, _ := createTestDynatraceClient(t, http.NotFoundHandler())
defer dynatraceServer.Close()
dtc := dynatraceClient{
url: dynatraceServer.URL,
apiToken: apiToken,
paasToken: paasToken,
httpClient: dynatraceServer.Client(),
hostCache: nil,
logger: log.Log.WithName("dtc"),
}
transport := dtc.httpClient.Transport.(*http.Transport)
rawURL := "working.url"
options := Proxy(rawURL)
assert.NotNil(t, options)
options(&dtc)
url, err := transport.Proxy(&http.Request{})
assert.NoError(t, err)
assert.NotNil(t, url)
assert.Equal(t, rawURL, url.Path)
options = Proxy("{!.*&%")
assert.NotNil(t, options)
options(&dtc)
} | explode_data.jsonl/3775 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 290
} | [
2830,
3393,
16219,
1155,
353,
8840,
836,
8,
341,
2698,
1872,
266,
41183,
5475,
11,
716,
1669,
1855,
2271,
95709,
266,
41183,
2959,
1155,
11,
1758,
67255,
3050,
2398,
16867,
31070,
266,
41183,
5475,
10421,
2822,
2698,
10413,
1669,
31070,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestConvertMapOfString(t *testing.T) {
lines, err := convertTypes(
"Foo", "Bar",
`struct Foo {
1: optional map<string, string> one
2: required map<string, string> two
}
struct Bar {
1: optional map<string, string> one
2: required map<string, string> two
}`,
nil,
nil,
)
assert.NoError(t, err)
assertPrettyEqual(t, trim(`
out.One = make(map[string]string, len(in.One))
for key1, value2 := range in.One {
out.One[key1] = string(value2)
}
out.Two = make(map[string]string, len(in.Two))
for key3, value4 := range in.Two {
out.Two[key3] = string(value4)
}
`), lines)
} | explode_data.jsonl/62051 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 270
} | [
2830,
3393,
12012,
2227,
66952,
1155,
353,
8840,
836,
8,
341,
78390,
11,
1848,
1669,
5508,
4173,
1006,
197,
197,
1,
40923,
497,
330,
3428,
756,
197,
197,
63,
1235,
33428,
341,
298,
197,
16,
25,
10101,
2415,
4947,
11,
914,
29,
825,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestFetchMetricset(t *testing.T) {
config := test.GetKubeletConfig(t, "volume")
metricSet := mbtest.NewFetcher(t, config)
events, errs := metricSet.FetchEvents()
if len(errs) > 0 {
t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs)
}
assert.NotEmpty(t, events)
} | explode_data.jsonl/55869 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 122
} | [
2830,
3393,
20714,
54310,
746,
1155,
353,
8840,
836,
8,
341,
25873,
1669,
1273,
2234,
42,
3760,
1149,
2648,
1155,
11,
330,
25060,
1138,
2109,
16340,
1649,
1669,
10016,
1944,
7121,
97492,
1155,
11,
2193,
340,
90873,
11,
70817,
1669,
1826... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestVersion(t *testing.T) {
expectedArgs := []string{"version", "--short"}
expectedOutput := "1.0.0"
helm, runner := createHelm(t, nil, expectedOutput)
output, err := helm.Version(false)
assert.NoError(t, err, "should get the version without any error")
verifyArgs(t, helm, runner, expectedArgs...)
assert.Equal(t, expectedOutput, output)
} | explode_data.jsonl/4654 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 117
} | [
2830,
3393,
5637,
1155,
353,
8840,
836,
8,
341,
42400,
4117,
1669,
3056,
917,
4913,
4366,
497,
14482,
8676,
16707,
42400,
5097,
1669,
330,
16,
13,
15,
13,
15,
698,
9598,
23162,
11,
22259,
1669,
1855,
39,
23162,
1155,
11,
2092,
11,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTransformBundleStdin(t *testing.T) {
t.Parallel()
output := runSyslWithOutput(t, ".sysl", bytes.NewReader(createBundle(t, transformScript)),
"transform", "../../tests/simple.sysl", "--script=-")
assert.Equal(t, transformOutput, output)
} | explode_data.jsonl/54907 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 89
} | [
2830,
3393,
8963,
8409,
22748,
258,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
21170,
1669,
1598,
32792,
75,
2354,
5097,
1155,
11,
5933,
7791,
75,
497,
5820,
68587,
32602,
8409,
1155,
11,
5165,
5910,
6965,
197,
197,
1,
470... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestMongo_Info(t *testing.T) {
m, skip := prepMongo(t, true) // adds two comments
if skip {
return
}
ts := func(min int) time.Time { return time.Date(2017, 12, 20, 15, 18, min, 0, time.Local).In(time.UTC) }
// add one more for https://radio-t.com/2
comment := store.Comment{
ID: "12345",
Text: `some text, <a href="http://radio-t.com">link</a>`,
Timestamp: time.Date(2017, 12, 20, 15, 18, 24, 0, time.Local),
Locator: store.Locator{URL: "https://radio-t.com/2", SiteID: "radio-t"},
User: store.User{ID: "user1", Name: "user name"},
}
_, err := m.Create(comment)
assert.Nil(t, err)
r, err := m.Info(store.Locator{URL: "https://radio-t.com/2", SiteID: "radio-t"}, 0)
require.Nil(t, err)
assert.Equal(t, store.PostInfo{URL: "https://radio-t.com/2", Count: 1, FirstTS: ts(24), LastTS: ts(24)}, r)
r, err = m.Info(store.Locator{URL: "https://radio-t.com/2", SiteID: "radio-t"}, 10)
require.Nil(t, err)
assert.Equal(t, store.PostInfo{URL: "https://radio-t.com/2", Count: 1, FirstTS: ts(24), LastTS: ts(24), ReadOnly: true}, r)
r, err = m.Info(store.Locator{URL: "https://radio-t.com", SiteID: "radio-t"}, 0)
require.Nil(t, err)
assert.Equal(t, store.PostInfo{URL: "https://radio-t.com", Count: 2, FirstTS: ts(22), LastTS: ts(23)}, r)
_, err = m.Info(store.Locator{URL: "https://radio-t.com/error", SiteID: "radio-t"}, 0)
require.NotNil(t, err)
_, err = m.Info(store.Locator{URL: "https://radio-t.com", SiteID: "radio-t-error"}, 0)
require.NotNil(t, err)
err = m.SetReadOnly(store.Locator{URL: "https://radio-t.com/2", SiteID: "radio-t"}, true)
require.Nil(t, err)
r, err = m.Info(store.Locator{URL: "https://radio-t.com/2", SiteID: "radio-t"}, 0)
require.Nil(t, err)
assert.Equal(t, store.PostInfo{URL: "https://radio-t.com/2", Count: 1, FirstTS: ts(24), LastTS: ts(24), ReadOnly: true}, r)
} | explode_data.jsonl/54200 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 815
} | [
2830,
3393,
54998,
39624,
1155,
353,
8840,
836,
8,
341,
2109,
11,
10706,
1669,
21327,
54998,
1155,
11,
830,
8,
442,
11367,
1378,
6042,
198,
743,
10706,
341,
197,
853,
198,
197,
532,
57441,
1669,
2915,
14146,
526,
8,
882,
16299,
314,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGrpc_GetBlockByHashes(t *testing.T) {
qapi.On("GetBlockByHashes", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil)
_, err := g.GetBlockByHashes(getOkCtx(), &types.ReqHashes{})
assert.NoError(t, err)
} | explode_data.jsonl/334 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 90
} | [
2830,
3393,
6464,
3992,
13614,
4713,
1359,
6370,
288,
1155,
353,
8840,
836,
8,
341,
18534,
2068,
8071,
445,
1949,
4713,
1359,
6370,
288,
497,
7860,
13311,
1596,
11,
7860,
13311,
1596,
11,
7860,
13311,
1596,
568,
5598,
27907,
11,
2092,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEffectsRequest(t *testing.T) {
hmock := httptest.NewClient()
client := &Client{
HorizonURL: "https://localhost/",
HTTP: hmock,
}
effectRequest := EffectRequest{}
// all effects
hmock.On(
"GET",
"https://localhost/effects",
).ReturnString(200, effectsResponse)
effects, err := client.Effects(effectRequest)
if assert.NoError(t, err) {
assert.IsType(t, effects, hProtocol.EffectsPage{})
}
effectRequest = EffectRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"}
hmock.On(
"GET",
"https://localhost/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/effects",
).ReturnString(200, effectsResponse)
effects, err = client.Effects(effectRequest)
if assert.NoError(t, err) {
assert.IsType(t, effects, hProtocol.EffectsPage{})
}
// too many parameters
effectRequest = EffectRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", ForLedger: "123"}
hmock.On(
"GET",
"https://localhost/effects",
).ReturnString(200, effectsResponse)
_, err = client.Effects(effectRequest)
// error case
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "Too many parameters")
}
} | explode_data.jsonl/34853 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 488
} | [
2830,
3393,
39782,
1900,
1155,
353,
8840,
836,
8,
341,
9598,
16712,
1669,
54320,
70334,
7121,
2959,
741,
25291,
1669,
609,
2959,
515,
197,
13292,
269,
16973,
3144,
25,
330,
2428,
1110,
8301,
35075,
197,
197,
9230,
25,
981,
305,
16712,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestResetCtx(t *testing.T) {
store, clean := realtikvtest.CreateMockStoreAndSetup(t)
defer clean()
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk1 := testkit.NewTestKit(t, store)
tk1.MustExec("use test")
tk.MustExec("create table t (i int auto_increment not null key);")
tk.MustExec("insert into t values (1);")
tk.MustExec("set @@tidb_disable_txn_auto_retry = 0")
tk.MustExec("begin;")
tk.MustExec("insert into t values (10);")
tk.MustExec("update t set i = i + row_count();")
tk.MustQuery("select * from t;").Check(testkit.Rows("2", "11"))
tk1.MustExec("update t set i = 0 where i = 1;")
tk1.MustQuery("select * from t;").Check(testkit.Rows("0"))
tk.MustExec("commit;")
tk.MustQuery("select * from t;").Check(testkit.Rows("1", "11"))
tk.MustExec("delete from t where i = 11;")
tk.MustExec("begin;")
tk.MustExec("insert into t values ();")
tk.MustExec("update t set i = i + last_insert_id() + 1;")
tk.MustQuery("select * from t;").Check(testkit.Rows("14", "25"))
tk1.MustExec("update t set i = 0 where i = 1;")
tk1.MustQuery("select * from t;").Check(testkit.Rows("0"))
tk.MustExec("commit;")
tk.MustQuery("select * from t;").Check(testkit.Rows("13", "25"))
} | explode_data.jsonl/5790 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 476
} | [
2830,
3393,
14828,
23684,
1155,
353,
8840,
836,
8,
341,
57279,
11,
4240,
1669,
1931,
83,
1579,
85,
1944,
7251,
11571,
6093,
3036,
21821,
1155,
340,
16867,
4240,
2822,
3244,
74,
1669,
1273,
8226,
7121,
2271,
7695,
1155,
11,
3553,
340,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestHistoryLog(t *testing.T) {
instance := flux.InstanceID("instance")
db := newSQL(t)
defer db.Close()
bailIfErr(t, db.LogEvent(instance, flux.Event{
ServiceIDs: []flux.ServiceID{flux.ServiceID("namespace/service")},
Type: "test",
Message: "event 1",
}))
bailIfErr(t, db.LogEvent(instance, flux.Event{
ServiceIDs: []flux.ServiceID{flux.ServiceID("namespace/other")},
Type: "test",
Message: "event 3",
}))
bailIfErr(t, db.LogEvent(instance, flux.Event{
ServiceIDs: []flux.ServiceID{flux.ServiceID("namespace/service")},
Type: "test",
Message: "event 2",
}))
es, err := db.EventsForService(instance, flux.ServiceID("namespace/service"))
if err != nil {
t.Fatal(err)
}
if len(es) != 2 {
t.Fatalf("Expected 2 events, got %d\n", len(es))
}
checkInDescOrder(t, es)
es, err = db.AllEvents(instance)
if err != nil {
t.Fatal(err)
}
if len(es) != 3 {
t.Fatalf("Expected 3 events, got %#v\n", es)
}
checkInDescOrder(t, es)
} | explode_data.jsonl/77666 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 440
} | [
2830,
3393,
13424,
2201,
1155,
353,
8840,
836,
8,
341,
56256,
1669,
30305,
12688,
915,
445,
4851,
1138,
20939,
1669,
501,
6688,
1155,
340,
16867,
2927,
10421,
2822,
2233,
604,
2679,
7747,
1155,
11,
2927,
5247,
1556,
21650,
11,
30305,
69... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestModifyEncryptionConfigs(t *testing.T) {
conf, cleanup := testutil.InitConfig(t)
defer cleanup(t)
eco := testutil.DummyEncryptionConfigOptions()
encryptionConfig := conf.AddEncryptionConfig(eco)
require.NotNil(t, encryptionConfig)
eco.KeySecretName += stringDelta
conf.ModifyEncryptionConfig(encryptionConfig, eco)
modifiedConfig := conf.EncryptionConfigs[eco.Name]
assert.Equal(t, eco.KeySecretName, modifiedConfig.KeySecretName)
eco.KeySecretNamespace += stringDelta
conf.ModifyEncryptionConfig(encryptionConfig, eco)
assert.Equal(t, eco.KeySecretNamespace, modifiedConfig.KeySecretNamespace)
eco.EncryptionKeyPath += stringDelta
conf.ModifyEncryptionConfig(encryptionConfig, eco)
assert.Equal(t, eco.EncryptionKeyPath, modifiedConfig.EncryptionKeyPath)
eco.DecryptionKeyPath += stringDelta
conf.ModifyEncryptionConfig(encryptionConfig, eco)
assert.Equal(t, eco.DecryptionKeyPath, modifiedConfig.DecryptionKeyPath)
} | explode_data.jsonl/57918 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 299
} | [
2830,
3393,
44427,
79239,
84905,
1155,
353,
8840,
836,
8,
341,
67850,
11,
21290,
1669,
1273,
1314,
26849,
2648,
1155,
340,
16867,
21290,
1155,
692,
197,
54297,
1669,
1273,
1314,
909,
8574,
79239,
2648,
3798,
741,
197,
79937,
2648,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCheckUpdate(t *testing.T) {
mocks, checkService := newCheckSvcStack()
ch := mocks.pipingCoordinator.taskUpdatedChan()
mocks.checkSvc.UpdateCheckFn = func(_ context.Context, _ platform.ID, c influxdb.CheckCreate) (influxdb.Check, error) {
c.SetTaskID(10)
return c, nil
}
deadman := &check.Deadman{}
deadman.SetTaskID(4)
cc := influxdb.CheckCreate{
Check: deadman,
Status: influxdb.Active,
}
check, err := checkService.UpdateCheck(context.Background(), 1, cc)
if err != nil {
t.Fatal(err)
}
select {
case task := <-ch:
if task.ID != check.GetTaskID() {
t.Fatalf("task sent to coordinator doesn't match expected")
}
default:
t.Fatal("didn't receive task")
}
} | explode_data.jsonl/72195 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 272
} | [
2830,
3393,
3973,
4289,
1155,
353,
8840,
836,
8,
341,
2109,
25183,
11,
1779,
1860,
1669,
501,
3973,
92766,
4336,
741,
23049,
1669,
68909,
556,
46095,
64304,
15034,
16196,
46019,
2822,
2109,
25183,
9093,
92766,
16689,
3973,
24911,
284,
291... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStreamReaderDialRequest(t *testing.T) {
for i, tt := range []streamType{streamTypeMessage, streamTypeMsgAppV2} {
tr := &roundTripperRecorder{}
sr := &streamReader{
tr: &Transport{streamRt: tr},
picker: mustNewURLPicker(t, []string{"http://localhost:2380"}),
local: types.ID(1),
remote: types.ID(2),
cid: types.ID(1),
}
sr.dial(tt)
req := tr.Request()
wurl := fmt.Sprintf("http://localhost:2380" + tt.endpoint() + "/1")
if req.URL.String() != wurl {
t.Errorf("#%d: url = %s, want %s", i, req.URL.String(), wurl)
}
if w := "GET"; req.Method != w {
t.Errorf("#%d: method = %s, want %s", i, req.Method, w)
}
if g := req.Header.Get("X-Etcd-Cluster-ID"); g != "1" {
t.Errorf("#%d: header X-Etcd-Cluster-ID = %s, want 1", i, g)
}
if g := req.Header.Get("X-Raft-To"); g != "2" {
t.Errorf("#%d: header X-Raft-To = %s, want 2", i, g)
}
}
} | explode_data.jsonl/68639 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 432
} | [
2830,
3393,
70525,
35,
530,
1900,
1155,
353,
8840,
836,
8,
341,
2023,
600,
11,
17853,
1669,
2088,
3056,
4027,
929,
90,
4027,
929,
2052,
11,
4269,
929,
6611,
2164,
53,
17,
92,
341,
197,
25583,
1669,
609,
1049,
21884,
6922,
47023,
160... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestTags_Clone(t *testing.T) {
tags := models.NewTags(map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"})
clone := tags.Clone()
for i := range tags {
tag := tags[i]
c := clone[i]
if &c.Key == &tag.Key || !bytes.Equal(c.Key, tag.Key) {
t.Fatalf("key %s should have been a clone of %s", c.Key, tag.Key)
}
if &c.Value == &tag.Value || !bytes.Equal(c.Value, tag.Value) {
t.Fatalf("value %s should have been a clone of %s", c.Value, tag.Value)
}
}
} | explode_data.jsonl/16895 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 217
} | [
2830,
3393,
15930,
85110,
603,
1155,
353,
8840,
836,
8,
341,
3244,
2032,
1669,
4119,
7121,
15930,
9147,
14032,
30953,
4913,
74,
16,
788,
330,
85,
16,
497,
330,
74,
17,
788,
330,
85,
17,
497,
330,
74,
18,
788,
330,
85,
18,
1,
882... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestWaitForNetworkReady(t *testing.T) {
testCases := map[string]struct {
IPs []net.IP
Setup func(t *testing.T) (context.Context, func())
AssertErr assert.ErrorAssertionFunc
}{
"no IPs": {
IPs: nil,
Setup: func(_ *testing.T) (context.Context, func()) {
return context.Background(), func() {}
},
AssertErr: assert.NoError,
},
"IPs not found time out": {
IPs: []net.IP{net.ParseIP("192.0.2.42")},
Setup: func(_ *testing.T) (context.Context, func()) {
ctx, cancelF := context.WithTimeout(context.Background(), time.Millisecond*200)
return ctx, cancelF
},
AssertErr: assert.Error,
},
"localhost": {
IPs: []net.IP{net.ParseIP("127.0.0.1")},
Setup: func(_ *testing.T) (context.Context, func()) {
ctx, cancelF := context.WithTimeout(context.Background(), time.Millisecond*500)
return ctx, cancelF
},
AssertErr: assert.NoError,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
ctx, cleanup := tc.Setup(t)
defer cleanup()
tc.AssertErr(t, launcher.WaitForNetworkReady(ctx, tc.IPs))
})
}
} | explode_data.jsonl/9329 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 484
} | [
2830,
3393,
14190,
2461,
12320,
19202,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
2415,
14032,
60,
1235,
341,
197,
197,
3298,
82,
981,
3056,
4711,
46917,
198,
197,
197,
21821,
257,
2915,
1155,
353,
8840,
836,
8,
320,
2147,
9328... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSetDefaultProfileImage(t *testing.T) {
th := Setup(t).InitBasic()
defer th.TearDown()
user := th.BasicUser
_, err := th.Client.SetDefaultProfileImage(user.Id)
require.NoError(t, err)
resp, err := th.Client.SetDefaultProfileImage(model.NewId())
require.Error(t, err)
CheckForbiddenStatus(t, resp)
// status code returns either forbidden or unauthorized
// note: forbidden is set as default at Client4.SetDefaultProfileImage when request is terminated early by server
th.Client.Logout()
resp, err = th.Client.SetDefaultProfileImage(user.Id)
require.Error(t, err)
if resp.StatusCode == http.StatusForbidden {
CheckForbiddenStatus(t, resp)
} else if resp.StatusCode == http.StatusUnauthorized {
CheckUnauthorizedStatus(t, resp)
} else {
require.Fail(t, "Should have failed either forbidden or unauthorized")
}
_, err = th.SystemAdminClient.SetDefaultProfileImage(user.Id)
require.NoError(t, err)
ruser, appErr := th.App.GetUser(user.Id)
require.Nil(t, appErr)
assert.Equal(t, int64(0), ruser.LastPictureUpdate, "Picture should have resetted to default")
info := &model.FileInfo{Path: "users/" + user.Id + "/profile.png"}
err = th.cleanupTestFile(info)
require.NoError(t, err)
} | explode_data.jsonl/47539 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 417
} | [
2830,
3393,
1649,
3675,
8526,
1906,
1155,
353,
8840,
836,
8,
341,
70479,
1669,
18626,
1155,
568,
3803,
15944,
741,
16867,
270,
836,
682,
4454,
741,
19060,
1669,
270,
48868,
1474,
271,
197,
6878,
1848,
1669,
270,
11716,
4202,
3675,
8526,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestDeleteHeadersFooters(t *testing.T) {
localFilePath := GetLocalPath(filepath.Join("DocumentElements", "HeaderFooters"), "HeadersFooters.doc")
remoteFolder := path.Join(remoteBaseTestDataFolder, "DocumentElements", "HeaderFooters")
remoteName := "TestDeleteHeadersFooters.docx"
sectionPath := "sections/0"
options := map[string]interface{}{
"folder": remoteFolder,
}
client, ctx := UploadFileToStorage(t, localFilePath, path.Join(remoteFolder, remoteName))
_, err := client.WordsApi.DeleteHeadersFooters(ctx, remoteName, sectionPath, options)
if err != nil {
t.Error(err)
}
} | explode_data.jsonl/23188 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 198
} | [
2830,
3393,
6435,
10574,
41820,
388,
1155,
353,
8840,
836,
8,
1476,
8854,
19090,
1669,
2126,
7319,
1820,
34793,
22363,
445,
7524,
11868,
497,
330,
4047,
41820,
388,
3975,
330,
10574,
41820,
388,
23671,
1138,
197,
18147,
13682,
1669,
1815,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestEncodeDecodeCurveSpecificationV1(t *testing.T) {
v := CurveSpecificationV1{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeCurveSpecificationV1 Msgsize() is inaccurate")
}
vn := CurveSpecificationV1{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
} | explode_data.jsonl/14248 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 202
} | [
2830,
3393,
32535,
32564,
31325,
56139,
53,
16,
1155,
353,
8840,
836,
8,
341,
5195,
1669,
53677,
56139,
53,
16,
16094,
2405,
6607,
5820,
22622,
198,
21169,
79,
50217,
2099,
5909,
11,
609,
85,
692,
2109,
1669,
348,
30365,
2141,
741,
74... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestServerMigration(t *testing.T) {
dir := "migrationTest"
os.RemoveAll(dir)
defer os.RemoveAll(dir)
os.Mkdir(dir, 0777)
db, err := dbutil.NewUserRegistrySQLLite3(filepath.Join(dir, "zigledger-ca-server.db"))
util.FatalError(t, err, "Failed to create db")
_, err = db.Exec("INSERT INTO users (id, token, type, affiliation, attributes, state, max_enrollments, level) VALUES ('registrar', '', 'user', 'org2', '[{\"name\":\"hf.Registrar.Roles\",\"value\":\"user,peer,client\"}]', '0', '-1', '0')")
assert.NoError(t, err, "Failed to insert user 'registrar' into database")
_, err = db.Exec("INSERT INTO users (id, token, type, affiliation, attributes, state, max_enrollments, level) VALUES ('notregistrar', '', 'user', 'org2', '[{\"name\":\"hf.Revoker\",\"value\":\"true\"}]', '0', '-1', '0')")
assert.NoError(t, err, "Failed to insert user 'notregistrar' into database")
server := TestGetServer2(false, rootPort, dir, "", -1, t)
if server == nil {
return
}
err = server.Start()
util.FatalError(t, err, "Server start failed")
defer func() {
err = server.Stop()
if err != nil {
t.Errorf("Failed to stop server: %s", err)
}
}()
registrar, err := server.CA.registry.GetUser("registrar", nil)
assert.NoError(t, err, "Failed to get user")
registrarAttr, err := registrar.GetAttribute("hf.Registrar.Attributes")
assert.NoError(t, err, "Failed to get attribute")
t.Logf("registrarAttr: '%+v'", registrarAttr)
if registrarAttr.Value == "" {
t.Error("Failed to correctly migrate user 'registrar'")
}
notregistrar, err := server.CA.registry.GetUser("notregistrar", nil)
assert.NoError(t, err, "Failed to get user")
_, err = notregistrar.GetAttribute("hf.Registrar.Attributes")
assert.Error(t, err, "Non-registrar user should not have this attribute, failed to correctly migrate user")
} | explode_data.jsonl/82709 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 693
} | [
2830,
3393,
5475,
20168,
1155,
353,
8840,
836,
8,
341,
48532,
1669,
330,
80227,
2271,
698,
25078,
84427,
14161,
340,
16867,
2643,
84427,
14161,
340,
25078,
1321,
12438,
14161,
11,
220,
15,
22,
22,
22,
340,
20939,
11,
1848,
1669,
2927,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestPatchOptionReturnsBadRequest(t *testing.T) {
t.Parallel()
Convey("Given a Dataset API instance with a mocked datastore GetInstance", t, func() {
w := httptest.NewRecorder()
mockedDataStore, isLocked := storeMockWithLock(false)
datasetAPI := getAPIWithCMDMocks(testContext, mockedDataStore, &mocks.DownloadsGeneratorMock{})
bodies := map[string]io.Reader{
"Then patch dimension option with an invalid body returns bad request": strings.NewReader(`wrong`),
"Then patch dimension option with a patch containing an unsupported method returns bad request": strings.NewReader(`[{"op": "remove", "path": "/node_id"}]`),
"Then patch dimension option with an unexpected path returns bad request": strings.NewReader(`[{"op": "add", "path": "unexpected", "value": "11"}]`),
"Then patch dimension option with an unexpected value type for /node_id path returns bad request": strings.NewReader(`[{"op": "add", "path": "/node_id", "value": 123.321}]`),
"Then patch dimension option with an unexpected value type for /order path returns bad request": strings.NewReader(`[{"op": "add", "path": "/order", "value": "notAnOrder"}]`),
}
for msg, body := range bodies {
Convey(msg, func() {
r, err := createRequestWithToken(http.MethodPatch, "http://localhost:21800/instances/123/dimensions/age/options/55", body)
So(err, ShouldBeNil)
datasetAPI.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(mockedDataStore.GetInstanceCalls(), ShouldHaveLength, 1)
So(*isLocked, ShouldBeFalse)
})
}
})
} | explode_data.jsonl/20827 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 586
} | [
2830,
3393,
43622,
5341,
16446,
46015,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
93070,
5617,
445,
22043,
264,
39183,
5333,
2867,
448,
264,
46149,
64986,
2126,
2523,
497,
259,
11,
2915,
368,
341,
197,
6692,
1669,
54320,
703... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNSERetryClient_Find(t *testing.T) {
t.Cleanup(func() { goleak.VerifyNone(t) })
var callCounter = &count.CallCounter{}
var counter = count.NewNetworkServiceEndpointRegistryClient(callCounter)
var client = chain.NewNetworkServiceEndpointRegistryClient(
retry.NewNetworkServiceEndpointRegistryClient(
context.Background(),
retry.WithInterval(time.Millisecond*10),
retry.WithTryTimeout(time.Second/30)),
counter,
injecterror.NewNetworkServiceEndpointRegistryClient(injecterror.WithFindErrorTimes(0, 1, 2, 3, 4)),
)
var _, err = client.Find(context.Background(), nil)
require.NoError(t, err)
require.Equal(t, 6, callCounter.Finds())
} | explode_data.jsonl/68615 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 234
} | [
2830,
3393,
2448,
640,
15149,
2959,
95245,
1155,
353,
8840,
836,
8,
341,
3244,
727,
60639,
18552,
368,
314,
728,
273,
585,
54853,
4064,
1155,
8,
9568,
2405,
1618,
14099,
284,
609,
1830,
27017,
14099,
16094,
2405,
5546,
284,
1760,
7121,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestChapter(t *testing.T) {
tt := []struct {
id int
title string
pages int
err bool
}{
{0, "", 0, true},
{517244, "Cool Day", 1, false},
}
for _, tc := range tt {
ctx := context.Background()
c, err := md.Chapter(ctx, tc.id, nil)
if !tc.err && err != nil {
t.Fatalf("expected chapter to exist, got %q", err)
}
if tc.err {
continue
}
if c.String() != tc.title {
t.Fatalf("expected title to be %s, got %s", tc.title, c.String())
}
if len(c.Pages) != tc.pages {
t.Fatalf("expected chapter to have %d pages, not %d", tc.pages, len(c.Pages))
}
if len(c.Images()) != tc.pages {
t.Fatalf("expected chapter to have %d images, not %d", tc.pages, len(c.Pages))
}
}
} | explode_data.jsonl/29417 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 319
} | [
2830,
3393,
25020,
1155,
353,
8840,
836,
8,
341,
3244,
83,
1669,
3056,
1235,
341,
197,
15710,
262,
526,
198,
197,
24751,
914,
198,
197,
3223,
1134,
526,
198,
197,
9859,
256,
1807,
198,
197,
59403,
197,
197,
90,
15,
11,
7342,
220,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestDaoAddReportLogs(t *testing.T) {
var (
c = context.TODO()
sqls = []string{}
)
convey.Convey("AddReportLogs", t, func(ctx convey.C) {
id, err := d.AddReportLogs(c, sqls)
ctx.Convey("Then err should be nil.id should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldNotBeNil)
ctx.So(id, convey.ShouldBeGreaterThanOrEqualTo, 0)
})
})
} | explode_data.jsonl/51299 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 169
} | [
2830,
3393,
12197,
2212,
10361,
51053,
1155,
353,
8840,
836,
8,
341,
2405,
2399,
197,
1444,
262,
284,
2266,
90988,
741,
197,
30633,
82,
284,
3056,
917,
16094,
197,
340,
37203,
5617,
4801,
5617,
445,
2212,
10361,
51053,
497,
259,
11,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestConfigEmptySiteName(t *testing.T) {
c1 := Config{
TeamSettings: TeamSettings{
SiteName: NewString(""),
},
}
c1.SetDefaults()
if *c1.TeamSettings.SiteName != TEAM_SETTINGS_DEFAULT_SITE_NAME {
t.Fatal("TeamSettings.SiteName should default to " + TEAM_SETTINGS_DEFAULT_SITE_NAME)
}
} | explode_data.jsonl/50667 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 121
} | [
2830,
3393,
2648,
3522,
17597,
675,
1155,
353,
8840,
836,
8,
341,
1444,
16,
1669,
5532,
515,
197,
197,
14597,
6086,
25,
7909,
6086,
515,
298,
7568,
632,
675,
25,
1532,
703,
445,
4461,
197,
197,
1583,
197,
532,
1444,
16,
4202,
16273,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFairEvaluationForNodes(t *testing.T) {
numAllNodes := 500
nodeNames := make([]string, 0, numAllNodes)
for i := 0; i < numAllNodes; i++ {
nodeNames = append(nodeNames, strconv.Itoa(i))
}
nodes := makeNodeList(nodeNames)
g := makeScheduler(
nodes,
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("TrueFilter", NewTrueFilterPlugin),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
)
// To make numAllNodes % nodesToFind != 0
g.percentageOfNodesToScore = 30
nodesToFind := int(g.numFeasibleNodesToFind(int32(numAllNodes)))
// Iterating over all nodes more than twice
for i := 0; i < 2*(numAllNodes/nodesToFind+1); i++ {
nodesThatFit, _, err := g.findNodesThatFitPod(context.Background(), framework.NewCycleState(), &v1.Pod{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(nodesThatFit) != nodesToFind {
t.Errorf("got %d nodes filtered, want %d", len(nodesThatFit), nodesToFind)
}
if g.nextStartNodeIndex != (i+1)*nodesToFind%numAllNodes {
t.Errorf("got %d lastProcessedNodeIndex, want %d", g.nextStartNodeIndex, (i+1)*nodesToFind%numAllNodes)
}
}
} | explode_data.jsonl/2399 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 462
} | [
2830,
3393,
60795,
82363,
2461,
12288,
1155,
353,
8840,
836,
8,
341,
22431,
2403,
12288,
1669,
220,
20,
15,
15,
198,
20831,
7980,
1669,
1281,
10556,
917,
11,
220,
15,
11,
1629,
2403,
12288,
340,
2023,
600,
1669,
220,
15,
26,
600,
36... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestServiceDiscoveryServices(t *testing.T) {
store, sd, _, stopFn := initServiceDiscovery()
defer stopFn()
expectedServices := []*model.Service{
makeService("*.google.com", "httpDNS", constants.UnspecifiedIP, map[string]int{"http-port": 80, "http-alt-port": 8080}, true, model.DNSLB),
makeService("tcpstatic.com", "tcpStatic", "172.217.0.1", map[string]int{"tcp-444": 444}, true, model.ClientSideLB),
}
createConfigs([]*model.Config{httpDNS, tcpStatic}, store, t)
services, err := sd.Services()
if err != nil {
t.Errorf("Services() encountered unexpected error: %v", err)
}
sortServices(services)
sortServices(expectedServices)
if err := compare(t, services, expectedServices); err != nil {
t.Error(err)
}
} | explode_data.jsonl/12833 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 261
} | [
2830,
3393,
1860,
67400,
11025,
1155,
353,
8840,
836,
8,
341,
57279,
11,
20585,
11,
8358,
2936,
24911,
1669,
2930,
1860,
67400,
741,
16867,
2936,
24911,
2822,
42400,
11025,
1669,
29838,
2528,
13860,
515,
197,
77438,
1860,
445,
19922,
1748... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestHashPartitionKeyMatchesKinesisHashing(t *testing.T) {
// The hash key range of the first of 2 kinesis shards.
var shard0Range, err = parseKinesisShardRange("0", "170141183460469231731687303715884105727")
require.NoError(t, err)
var testCases = []struct {
// The partition key that was provided in put-record
partitionKey string
// Whether the record was put into shard 0. False means it was put into shard 1
included bool
}{
{"canary", true},
{"fooo", false},
{"barticus", false},
{"snapple", false},
{"joseph", false},
{"jessica", false},
{"jeebus", true},
{"daffy", true},
{"fartition", true},
{"pancakes", false},
{"waffles", false},
{"crepes", false},
}
for _, tc := range testCases {
var keyHash = hashPartitionKey(tc.partitionKey)
require.Equalf(t, tc.included, shard0Range.Includes(keyHash), "TC: %#v", tc)
}
} | explode_data.jsonl/36859 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 336
} | [
2830,
3393,
6370,
49978,
1592,
42470,
42,
82789,
6370,
287,
1155,
353,
8840,
836,
8,
341,
197,
322,
576,
5175,
1376,
2088,
315,
279,
1156,
315,
220,
17,
595,
82789,
74110,
624,
2405,
52069,
15,
6046,
11,
1848,
284,
4715,
42,
82789,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestLoadToml(t *testing.T) {
provider := newTestProvider(t, "/etc/test.toml", `
[Test]
foo = "foobar"
`)
settings, err := provider.Load("test", []string{"/etc"})
fatal(t, err)
if got := getString(settings.Get("Test.foo")); got != "foobar" {
t.Fatalf("got %#v; want %#v", got, "foobar")
}
} | explode_data.jsonl/74506 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 128
} | [
2830,
3393,
5879,
24732,
75,
1155,
353,
8840,
836,
8,
341,
197,
19979,
1669,
501,
2271,
5179,
1155,
11,
3521,
12107,
12697,
73494,
75,
497,
22074,
57585,
921,
7975,
284,
330,
50267,
698,
24183,
62930,
11,
1848,
1669,
9109,
13969,
445,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestShouldBeNil(t *testing.T) {
assert := NewAssertion(t)
assert.ExpectedCountInvalid("actual", should.BeNil, "EXTRA")
assert.Pass(nil, should.BeNil)
assert.Pass([]string(nil), should.BeNil)
assert.Pass((*string)(nil), should.BeNil)
assert.Fail(notNil, should.BeNil)
} | explode_data.jsonl/22493 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 112
} | [
2830,
3393,
14996,
3430,
19064,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
1532,
68639,
1155,
692,
6948,
5121,
4046,
2507,
7928,
445,
11944,
497,
1265,
70923,
19064,
11,
330,
3257,
20252,
5130,
6948,
87768,
27907,
11,
1265,
70923,
19064,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNewIndexDatabase_err(t *testing.T) {
ctrl := gomock.NewController(t)
defer func() {
createBackend = newIDMappingBackend
createSeriesWAL = wal.NewSeriesWAL
_ = fileutil.RemoveDir(testPath)
ctrl.Finish()
}()
mockMetadata := metadb.NewMockMetadata(ctrl)
mockMetadata.EXPECT().DatabaseName().Return("test").AnyTimes()
backend := NewMockIDMappingBackend(ctrl)
createBackend = func(parent string) (IDMappingBackend, error) {
return backend, nil
}
// case 1: create series wal err
backend.EXPECT().Close().Return(fmt.Errorf("err"))
createSeriesWAL = func(path string) (wal.SeriesWAL, error) {
return nil, fmt.Errorf("err")
}
db, err := NewIndexDatabase(context.TODO(), testPath, mockMetadata, nil, nil)
assert.Error(t, err)
assert.Nil(t, db)
// case 2: series wal recovery err
mockSeriesWAl := wal.NewMockSeriesWAL(ctrl)
createSeriesWAL = func(path string) (wal.SeriesWAL, error) {
return mockSeriesWAl, nil
}
backend.EXPECT().Close().Return(fmt.Errorf("err"))
mockSeriesWAl.EXPECT().Recovery(gomock.Any(), gomock.Any())
mockSeriesWAl.EXPECT().NeedRecovery().Return(true)
db, err = NewIndexDatabase(context.TODO(), testPath, mockMetadata, nil, nil)
assert.Error(t, err)
assert.Nil(t, db)
} | explode_data.jsonl/33821 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 467
} | [
2830,
3393,
3564,
1552,
5988,
9266,
1155,
353,
8840,
836,
8,
341,
84381,
1669,
342,
316,
1176,
7121,
2051,
1155,
340,
16867,
2915,
368,
341,
197,
39263,
29699,
284,
501,
915,
6807,
29699,
198,
197,
39263,
25544,
54,
969,
284,
40826,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSqueezing(t *testing.T) {
testUnalignedAndGeneric(t, func(impl string) {
for functionName, newShakeHash := range testShakes {
d0 := newShakeHash()
d0.Write([]byte(testString))
ref := make([]byte, 32)
d0.Read(ref)
d1 := newShakeHash()
d1.Write([]byte(testString))
var multiple []byte
for _ = range ref {
one := make([]byte, 1)
d1.Read(one)
multiple = append(multiple, one...)
}
if !bytes.Equal(ref, multiple) {
t.Errorf("%s (%s): squeezing %d bytes one at a time failed", functionName, impl, len(ref))
}
}
})
} | explode_data.jsonl/67466 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 250
} | [
2830,
3393,
50,
591,
10125,
287,
1155,
353,
8840,
836,
8,
341,
18185,
1806,
47142,
3036,
19964,
1155,
11,
2915,
7,
6383,
914,
8,
341,
197,
2023,
90519,
11,
501,
2016,
726,
6370,
1669,
2088,
1273,
2016,
2050,
341,
298,
2698,
15,
1669... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestGetErrorRates(t *testing.T) {
tqs := initializeTestMetricsQueryService()
expectedErrorRates := &protometrics.MetricFamily{}
qParams := &metricsstore.ErrorRateQueryParameters{}
tqs.metricsReader.On("GetErrorRates", mock.Anything, qParams).Return(expectedErrorRates, nil).Times(1)
actualErrorRates, err := tqs.queryService.GetErrorRates(context.Background(), qParams)
assert.NoError(t, err)
assert.Equal(t, expectedErrorRates, actualErrorRates)
} | explode_data.jsonl/12527 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 157
} | [
2830,
3393,
1949,
1454,
82623,
1155,
353,
8840,
836,
8,
341,
3244,
26358,
1669,
9468,
2271,
27328,
2859,
1860,
741,
42400,
1454,
82623,
1669,
609,
4391,
91791,
1321,
16340,
15192,
16094,
18534,
4870,
1669,
609,
43262,
4314,
6141,
11564,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTargetQueue_Simple(t *testing.T) {
f := newTargetQueueFixture(t)
t1 := model.NewImageTarget(container.MustParseSelector("vigoda"))
s1 := store.BuildState{}
targets := []model.ImageTarget{t1}
buildStateSet := store.BuildStateSet{
t1.ID(): s1,
}
f.run(targets, buildStateSet)
expectedCalls := map[model.TargetID]fakeBuildHandlerCall{
t1.ID(): newFakeBuildHandlerCall(t1, s1, 1, []store.BuildResult{}),
}
assert.Equal(t, expectedCalls, f.handler.calls)
} | explode_data.jsonl/2246 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 188
} | [
2830,
3393,
6397,
7554,
1098,
6456,
1155,
353,
8840,
836,
8,
341,
1166,
1669,
501,
6397,
7554,
18930,
1155,
692,
3244,
16,
1669,
1614,
7121,
1906,
6397,
28168,
50463,
14463,
5877,
445,
91247,
13993,
5455,
1903,
16,
1669,
3553,
25212,
13... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestClaimONG(t *testing.T) {
endpoint := "http://dappnode2.ont.io:20336"
wif, _ := neoutils.NEP2Decrypt("", "")
if wif == "" {
log.Printf("No wif")
return
}
gasPrice := int(500)
gasLimit := int(20000)
txid, err := neoutils.ClaimONG(endpoint, gasPrice, gasLimit, wif)
if err != nil {
log.Printf("err %v", err)
return
}
log.Printf("tx id =%v", txid)
} | explode_data.jsonl/20397 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 170
} | [
2830,
3393,
45544,
7539,
1155,
353,
8840,
836,
8,
341,
6246,
2768,
1669,
330,
1254,
1110,
67,
676,
3509,
17,
13,
544,
4245,
25,
17,
15,
18,
18,
21,
698,
6692,
333,
11,
716,
1669,
834,
411,
8669,
2067,
9197,
17,
89660,
19814,
14676... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestNewRequestSender(t *testing.T) {
assert := assert.New(t)
rs := NewRequestSender(MustParseURL("https://foo.com/bar"))
assert.NotNil(rs.Transport())
assert.NotNil(rs.Headers())
assert.Equal(DefaultRequestTimeout, rs.Client().Timeout)
assert.NotEqual("bar", rs.Headers().Get("foo"))
rs.WithHeader("foo", "bar")
assert.Equal("bar", rs.Headers().Get("foo"))
assert.Equal(DefaultRequestMethod, rs.Method())
rs.WithMethod("GET")
assert.Equal("GET", rs.Method())
assert.Nil(rs.Tracer())
rs.WithTracer(mockRequestTracer{})
assert.NotNil(rs.Tracer())
} | explode_data.jsonl/8492 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 217
} | [
2830,
3393,
3564,
1900,
20381,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
692,
41231,
1669,
1532,
1900,
20381,
3189,
590,
14463,
3144,
445,
2428,
1110,
7975,
905,
49513,
5455,
6948,
93882,
17027,
87669,
2398,
6948,
93882,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEndpointAddressNodeNameUpdateRestrictions(t *testing.T) {
oldEndpoint := newNodeNameEndpoint("kubernetes-node-setup-by-backend")
updatedEndpoint := newNodeNameEndpoint("kubernetes-changed-nodename")
// Check that NodeName can be changed during update, this is to accommodate the case where nodeIP or PodCIDR is reused.
// The same ip will now have a different nodeName.
errList := ValidateEndpoints(updatedEndpoint)
errList = append(errList, ValidateEndpointsUpdate(updatedEndpoint, oldEndpoint)...)
if len(errList) != 0 {
t.Error("Endpoint should allow changing of Subset.Addresses.NodeName on update")
}
} | explode_data.jsonl/1064 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 191
} | [
2830,
3393,
27380,
4286,
1955,
675,
4289,
50360,
21439,
1155,
353,
8840,
836,
8,
341,
61828,
27380,
1669,
33560,
675,
27380,
445,
74,
29827,
39054,
78097,
14319,
89987,
1138,
197,
11768,
27380,
1669,
33560,
675,
27380,
445,
74,
29827,
115... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestToDomainPrefix(t *testing.T) {
input := []byte("tonsnandtonsofbytesallinarowwhenwilltheystopnobodyknowsoktheymayaswellstopnow")
encoded, err := ToDomainPrefix(input)
if err != nil {
t.Error("Unexpected error: ", err)
}
decoded, err := FromDomainPrefix(encoded)
if err != nil {
t.Error("Unexpected error: ", err)
}
if !bytes.Equal(input, decoded) {
t.Error("Expected decode to input")
}
} | explode_data.jsonl/47699 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 159
} | [
2830,
3393,
1249,
13636,
14335,
1155,
353,
8840,
836,
8,
341,
22427,
1669,
3056,
3782,
445,
777,
9613,
437,
777,
704,
69,
9651,
541,
13762,
363,
9309,
14387,
1782,
597,
453,
77,
42340,
32034,
82,
562,
20069,
18358,
300,
9157,
9495,
33... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestX12EncodeChar(t *testing.T) {
_, e := x12EncodeChar('^', []byte{})
if e == nil {
t.Fatalf("x12EncodeChar must be error")
}
testX12EncodeChar(t, '\r', 0)
testX12EncodeChar(t, '*', 1)
testX12EncodeChar(t, '>', 2)
testX12EncodeChar(t, ' ', 3)
testX12EncodeChar(t, '0', 4)
testX12EncodeChar(t, '9', 13)
testX12EncodeChar(t, 'A', 14)
testX12EncodeChar(t, 'Z', 39)
} | explode_data.jsonl/49804 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 190
} | [
2830,
3393,
55,
16,
17,
32535,
4768,
1155,
353,
8840,
836,
8,
341,
197,
6878,
384,
1669,
856,
16,
17,
32535,
4768,
86085,
516,
3056,
3782,
37790,
743,
384,
621,
2092,
341,
197,
3244,
30762,
445,
87,
16,
17,
32535,
4768,
1969,
387,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestClock_DelJob(t *testing.T) {
//思路:
//新增一定数量的任务,延时1秒开始执行
//在一秒内,删除所有的任务。
//如果执行次数=0,说明一秒内无法满足对应条数的增删
var (
jobsNum = 20000
randscope = 1 * 1000 * 1000 * 1000
jobs = make([]Job, jobsNum)
delmod = r.Intn(jobsNum)
myClock = Default().Reset()
)
fn := func() {
//do nothing
}
for i := 0; i < jobsNum; i++ {
delay := time.Second + time.Duration(r.Intn(randscope)) //增加一秒作为延迟,以避免删除的时候,已经存在任务被通知执行,导致后续判断失误
job, _ := myClock.AddJobWithInterval(delay, fn)
jobs[i] = job
}
readyCancelJob := jobs[delmod]
readyCancelJob.Cancel()
if myClock.WaitJobs() != uint64(jobsNum-1) {
t.Errorf("任务删除后,应该只剩下%v条任务,实际还有%v条\n", myClock.Count(), jobsNum-1)
}
} | explode_data.jsonl/2024 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 515
} | [
2830,
3393,
26104,
1557,
301,
12245,
1155,
353,
8840,
836,
8,
341,
197,
322,
104337,
28311,
197,
322,
93620,
99623,
81800,
108530,
3837,
99771,
13343,
16,
45918,
55286,
75117,
198,
197,
322,
18493,
114825,
31843,
3837,
28606,
104152,
88802,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_stdio(t *testing.T) {
buf, err := os.ReadFile("wasm/stdio.wasm")
require.NoError(t, err)
mod, err := wasm.DecodeModule(buf)
require.NoError(t, err)
stdinBuf := bytes.NewBuffer([]byte("WASI\n"))
stdoutBuf := bytes.NewBuffer(nil)
stderrBuf := bytes.NewBuffer(nil)
wasiEnv := wasi.NewEnvironment(
wasi.Stdin(stdinBuf),
wasi.Stdout(stdoutBuf),
wasi.Stderr(stderrBuf),
)
store := wasm.NewStore(wazeroir.NewEngine())
err = wasiEnv.Register(store)
require.NoError(t, err)
err = store.Instantiate(mod, "test")
require.NoError(t, err)
_, _, err = store.CallFunction("test", "_start")
require.NoError(t, err)
require.Equal(t, "Hello, WASI!", strings.TrimSpace(stdoutBuf.String()))
require.Equal(t, "Error Message", strings.TrimSpace(stderrBuf.String()))
} | explode_data.jsonl/54732 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 336
} | [
2830,
3393,
37227,
1155,
353,
8840,
836,
8,
341,
26398,
11,
1848,
1669,
2643,
78976,
445,
86,
10530,
14,
10345,
1418,
10530,
1138,
17957,
35699,
1155,
11,
1848,
340,
42228,
11,
1848,
1669,
98263,
56372,
3332,
10731,
340,
17957,
35699,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestModBusSimulator(t *testing.T) {
if sim, err := OpenSimulator(memory.ComplexNonZeroSlab, 40000); err != nil {
t.Fatal(err)
} else {
if arr, err := Open(sim); err != nil {
t.Fatal(err)
} else {
arr.Do(func(d sunspec.Device) {})
}
}
} | explode_data.jsonl/11099 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 116
} | [
2830,
3393,
4459,
15073,
14027,
10511,
1155,
353,
8840,
836,
8,
341,
743,
1643,
11,
1848,
1669,
5264,
14027,
10511,
63230,
2961,
9111,
8121,
17999,
7442,
370,
11,
220,
19,
15,
15,
15,
15,
1215,
1848,
961,
2092,
341,
197,
3244,
26133,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMapper_SliceFromTable(t *testing.T) {
type Emb struct {
B string `db:"b"`
Map map[string]int `db:"m"`
}
type item struct {
A int `db:"a"`
Emb
}
data := [][]string{
{"a", "b"},
{"1", "b1"},
{"2", "b2"},
}
m := &dbsteps.TableMapper{
Decoder: form.NewDecoder(),
}
m.Decoder.SetTagName("db")
res, err := m.SliceFromTable(data, new(item))
assert.NoError(t, err)
result, ok := res.([]item)
assert.True(t, ok)
assert.Len(t, result, 2)
assert.Equal(t, 1, result[0].A)
assert.Equal(t, "b1", result[0].B)
assert.Equal(t, 2, result[1].A)
assert.Equal(t, "b2", result[1].B)
} | explode_data.jsonl/54744 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 304
} | [
2830,
3393,
10989,
1098,
4754,
3830,
2556,
1155,
353,
8840,
836,
8,
341,
13158,
29127,
2036,
341,
197,
12791,
256,
914,
260,
1565,
1999,
2974,
65,
8805,
197,
26873,
2415,
14032,
63025,
1565,
1999,
2974,
76,
8805,
197,
630,
13158,
1509,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEntryPanicln(t *testing.T) {
errBoom := fmt.Errorf("boom time")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicln("kaboom")
} | explode_data.jsonl/13865 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 213
} | [
2830,
3393,
5874,
47,
31270,
2261,
1155,
353,
8840,
836,
8,
341,
9859,
94234,
1669,
8879,
13080,
445,
95316,
882,
5130,
16867,
2915,
368,
341,
197,
3223,
1669,
11731,
741,
197,
6948,
93882,
1155,
11,
281,
692,
197,
8961,
281,
2208,
16... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestTriggerRepairOnIdle(t *testing.T) {
Convey("with one known bot with no task history", t, func() {
tf, validate := newTestFixture(t)
defer validate()
setKnownReadyBots(tf, []string{"dut_1"})
expectListRecentTasks(tf, 0, "PENDING")
expectListSortedRecentTasksForBot(tf, "dut_1")
Convey("TriggerRepairOnIdle triggers a task for the dut", func() {
expectTaskCreation(tf, "task_1", "dut_1", "", "admin_repair", 0)
resp, err := tf.Tasker.TriggerRepairOnIdle(tf.C, &fleet.TriggerRepairOnIdleRequest{
Selectors: []*fleet.BotSelector{},
IdleDuration: google.NewDuration(4),
Priority: 20,
})
So(err, ShouldBeNil)
assertBotsWithTaskCount(resp.BotTasks, map[string]int{"dut_1": 1})
})
})
Convey("with one known bot with one task long ago", t, func() {
tf, validate := newTestFixture(t)
defer validate()
setKnownReadyBots(tf, []string{"dut_1"})
expectListRecentTasks(tf, 0, "PENDING")
expectListSortedRecentTasksForBot(tf, "dut_1", &swarming.SwarmingRpcsTaskResult{
State: "COMPLETED",
CompletedTs: "2016-01-02T10:04:05.999999999",
})
Convey("TriggerRepairOnIdle triggers a task for the dut", func() {
expectTaskCreation(tf, "task_1", "dut_1", "", "admin_repair", 0)
resp, err := tf.Tasker.TriggerRepairOnIdle(tf.C, &fleet.TriggerRepairOnIdleRequest{
Selectors: []*fleet.BotSelector{},
IdleDuration: google.NewDuration(4),
Priority: 20,
})
So(err, ShouldBeNil)
assertBotsWithTaskCount(resp.BotTasks, map[string]int{"dut_1": 1})
})
})
Convey("with one known bot with one task in recent past", t, func() {
tf, validate := newTestFixture(t)
defer validate()
setKnownReadyBots(tf, []string{"dut_1"})
expectListRecentTasks(tf, 0, "PENDING")
expectListSortedRecentTasksForBot(tf, "dut_1", &swarming.SwarmingRpcsTaskResult{
State: "COMPLETED",
CompletedTs: timeOffsetFromNowInSwarmingFormat(-5 * time.Second),
})
Convey("TriggerRepairOnIdle does not trigger a task", func() {
resp, err := tf.Tasker.TriggerRepairOnIdle(tf.C, &fleet.TriggerRepairOnIdleRequest{
Selectors: []*fleet.BotSelector{},
IdleDuration: google.NewDuration(4 * 24 * time.Hour),
Priority: 20,
})
So(err, ShouldBeNil)
assertBotsWithTaskCount(resp.BotTasks, map[string]int{"dut_1": 0})
})
})
Convey("with one known bot with one running task", t, func() {
tf, validate := newTestFixture(t)
defer validate()
setKnownReadyBots(tf, []string{"dut_1"})
expectListRecentTasks(tf, 0, "PENDING")
expectListSortedRecentTasksForBot(tf, "dut_1", &swarming.SwarmingRpcsTaskResult{State: "RUNNING"})
Convey("TriggerRepairOnIdle does not trigger a task", func() {
resp, err := tf.Tasker.TriggerRepairOnIdle(tf.C, &fleet.TriggerRepairOnIdleRequest{
Selectors: []*fleet.BotSelector{},
IdleDuration: google.NewDuration(4 * 24 * time.Hour),
Priority: 20,
})
So(err, ShouldBeNil)
assertBotsWithTaskCount(resp.BotTasks, map[string]int{"dut_1": 0})
})
})
} | explode_data.jsonl/60462 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1279
} | [
2830,
3393,
17939,
98386,
1925,
41370,
1155,
353,
8840,
836,
8,
341,
93070,
5617,
445,
4197,
825,
3881,
10924,
448,
902,
3383,
3840,
497,
259,
11,
2915,
368,
341,
197,
3244,
69,
11,
9593,
1669,
501,
69356,
1155,
340,
197,
16867,
9593,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestComputeToy(t *testing.T) {
fixture := &fixtureToy
input, output := &fixture.input, &fixture.output
evaluations := []float64{
0.0000000000000000e+00, 2.0000000000000004e-02, 2.9999999999999999e-02,
8.0000000000000016e-02, 8.8888888888888892e-02, 1.0000000000000001e-01,
1.0000000000000001e-01, 1.2000000000000001e-01, 1.3000000000000000e-01,
1.8000000000000002e-01, 1.8888888888888888e-01, 2.0000000000000001e-01,
2.0000000000000001e-01, 2.2000000000000003e-01, 2.3000000000000001e-01,
2.8000000000000003e-01, 2.8888888888888892e-01, 3.0000000000000004e-01,
3.0000000000000004e-01, 3.2000000000000006e-01, 3.3000000000000007e-01,
3.8000000000000006e-01, 3.8888888888888895e-01, 4.0000000000000002e-01,
4.0000000000000002e-01, 4.2000000000000004e-01, 4.3000000000000005e-01,
4.8000000000000004e-01, 4.8888888888888893e-01, 5.0000000000000000e-01,
5.0000000000000000e-01, 5.2000000000000002e-01, 5.3000000000000003e-01,
5.8000000000000007e-01, 5.8888888888888891e-01, 5.9999999999999998e-01,
5.9999999999999998e-01, 6.2000000000000000e-01, 6.3000000000000000e-01,
6.7999999999999994e-01, 6.8888888888888888e-01, 6.9999999999999996e-01,
6.9999999999999996e-01, 7.1999999999999997e-01, 7.2999999999999998e-01,
7.8000000000000003e-01, 7.8888888888888886e-01, 7.9999999999999993e-01,
7.9999999999999993e-01, 8.1999999999999995e-01, 8.2999999999999996e-01,
8.7999999999999989e-01, 8.8888888888888884e-01, 8.9999999999999991e-01,
8.9999999999999991e-01, 9.1999999999999993e-01, 9.2999999999999994e-01,
9.7999999999999998e-01, 9.8888888888888893e-01, 1.0000000000000000e+00,
1.0000000000000000e+00,
}
dydx := func(x float64, y, f []float64) {
assert.Equal(x, evaluations[0], t)
evaluations = evaluations[1:]
input.dydx(x, y, f)
}
integrator, _ := New(fixture.configure())
ys, _, stats, _ := integrator.ComputeWithStats(dydx, input.y0, input.xs)
assert.Close(ys, output.ys, 1e-15, t)
assert.Equal(*stats, Stats{Evaluations: 61, Rejections: 0, Steps: 10}, t)
} | explode_data.jsonl/80694 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 913
} | [
2830,
3393,
46254,
67970,
1155,
353,
8840,
836,
8,
341,
1166,
12735,
1669,
609,
59612,
67970,
198,
22427,
11,
2550,
1669,
609,
59612,
10046,
11,
609,
59612,
13413,
271,
7727,
25510,
804,
1669,
3056,
3649,
21,
19,
515,
197,
197,
15,
13... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIf1(t *testing.T) {
label1 := defaultImpl.NewLabel("a").(*Label)
label2 := NewLabel("b")
code := newBuilder().
Push(true).
JmpIf(exec.JcNil, label1).
Push(50).
Push(6).
BuiltinOp(Int, OpQuo).
Jmp(label2).
Label(label1).
Push(5).
Push(2).
BuiltinOp(Int, OpMod).
Label(label2).
Resolve()
ctx := NewContext(code)
ctx.Exec(0, code.Len())
if v := checkPop(ctx); v != 8 {
t.Fatal("50 6 div != 8, ret =", v)
}
_ = label1.Name()
} | explode_data.jsonl/74958 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 221
} | [
2830,
3393,
2679,
16,
1155,
353,
8840,
836,
8,
341,
29277,
16,
1669,
1638,
9673,
7121,
2476,
445,
64,
1827,
4071,
2476,
340,
29277,
17,
1669,
1532,
2476,
445,
65,
1138,
43343,
1669,
501,
3297,
25829,
197,
10025,
1116,
3715,
4292,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestClient_reattachGRPC(t *testing.T) {
process := helperProcess("test-grpc")
c := NewClient(&ClientConfig{
Cmd: process,
HandshakeConfig: testHandshake,
Plugins: testGRPCPluginMap,
AllowedProtocols: []Protocol{ProtocolGRPC},
})
defer c.Kill()
// Grab the RPC client
_, err := c.Client()
if err != nil {
t.Fatalf("err should be nil, got %s", err)
}
// Get the reattach configuration
reattach := c.ReattachConfig()
// Create a new client
c = NewClient(&ClientConfig{
Reattach: reattach,
HandshakeConfig: testHandshake,
Plugins: testGRPCPluginMap,
AllowedProtocols: []Protocol{ProtocolGRPC},
})
// Grab the RPC client
client, err := c.Client()
if err != nil {
t.Fatalf("err should be nil, got %s", err)
}
// Grab the impl
raw, err := client.Dispense("test")
if err != nil {
t.Fatalf("err should be nil, got %s", err)
}
impl, ok := raw.(testInterface)
if !ok {
t.Fatalf("bad: %#v", raw)
}
result := impl.Double(21)
if result != 42 {
t.Fatalf("bad: %#v", result)
}
// Kill it
c.Kill()
// Test that it knows it is exited
if !c.Exited() {
t.Fatal("should say client has exited")
}
if c.killed() {
t.Fatal("process failed to exit gracefully")
}
} | explode_data.jsonl/57842 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 518
} | [
2830,
3393,
2959,
62,
1222,
83,
610,
8626,
4872,
1155,
353,
8840,
836,
8,
341,
53314,
1669,
13137,
7423,
445,
1944,
24321,
3992,
1138,
1444,
1669,
1532,
2959,
2099,
2959,
2648,
515,
197,
6258,
2277,
25,
1060,
1882,
345,
197,
197,
2314... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestParse_ValidLines(t *testing.T) {
s := NewTestStatsd()
validLines := []string{
"valid:45|c",
"valid:45|s",
"valid:45|g",
"valid.timer:45|ms",
"valid.timer:45|h",
}
for _, line := range validLines {
err := s.parseStatsdLine(line)
if err != nil {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
}
}
} | explode_data.jsonl/14362 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 161
} | [
2830,
3393,
14463,
97279,
16794,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
1532,
2271,
16635,
67,
741,
56322,
16794,
1669,
3056,
917,
515,
197,
197,
1,
1891,
25,
19,
20,
91,
66,
756,
197,
197,
1,
1891,
25,
19,
20,
91,
82,
756,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestContainerWait(t *testing.T) {
assert := asrt.New(t)
labels := map[string]string{
"com.ddev.site-name": testContainerName,
}
// Try a zero-wait, should show timed-out
_, err := ContainerWait(0, labels)
assert.Error(err)
if err != nil {
assert.Contains(err.Error(), "health check timed out")
}
// Try 15-second wait for "healthy", should show OK
healthDetail, err := ContainerWait(30, labels)
assert.NoError(err)
assert.Contains(healthDetail, "phpstatus: OK")
// Try a nonexistent container, should get error
labels = map[string]string{"com.ddev.site-name": "nothing-there"}
_, err = ContainerWait(1, labels)
require.Error(t, err)
assert.Contains(err.Error(), "failed to query container")
// If we just run a quick container and it immediately exits, ContainerWait should find it not there
// and note that it exited.
labels = map[string]string{"test": "quickexit"}
_ = RemoveContainersByLabels(labels)
cID, _, err := RunSimpleContainer("busybox:latest", t.Name()+util.RandString(5), []string{"ls"}, nil, nil, nil, "0", false, true, labels)
t.Cleanup(func() {
_ = RemoveContainer(cID, 0)
})
require.NoError(t, err)
_, err = ContainerWait(5, labels)
assert.Error(err)
assert.Contains(err.Error(), "container exited")
_ = RemoveContainer(cID, 0)
// If we run a container that does not have a healthcheck
// it should be found as good immediately
labels = map[string]string{"test": "nohealthcheck"}
_ = RemoveContainersByLabels(labels)
cID, _, err = RunSimpleContainer("busybox:latest", t.Name()+util.RandString(5), []string{"sleep", "60"}, nil, nil, nil, "0", false, true, labels)
t.Cleanup(func() {
_ = RemoveContainer(cID, 0)
})
require.NoError(t, err)
_, err = ContainerWait(5, labels)
assert.NoError(err)
_ = RemoveContainer(cID, 0)
ddevWebserver := version.WebImg + ":" + version.WebTag
// If we run a container that *does* have a healthcheck but it's unhealthy
// then ContainerWait shouldn't return until specified wait, and should fail
// Use ddev-webserver for this; it won't have good health on normal run
labels = map[string]string{"test": "hashealthcheckbutbad"}
_ = RemoveContainersByLabels(labels)
cID, _, err = RunSimpleContainer(ddevWebserver, t.Name()+util.RandString(5), []string{"sleep", "5"}, nil, nil, nil, "0", false, true, labels)
t.Cleanup(func() {
_ = RemoveContainer(cID, 0)
})
require.NoError(t, err)
_, err = ContainerWait(3, labels)
assert.Error(err)
assert.Contains(err.Error(), "timed out without becoming healthy")
_ = RemoveContainer(cID, 0)
// If we run a container that *does* have a healthcheck but it's not healthy for a while
// then ContainerWait should detect failure early, but should succeed later
labels = map[string]string{"test": "hashealthcheckbutbad"}
_ = RemoveContainersByLabels(labels)
cID, _, err = RunSimpleContainer(ddevWebserver, t.Name()+util.RandString(5), []string{"bash", "-c", "sleep 5 && /start.sh"}, nil, nil, nil, "0", false, true, labels)
t.Cleanup(func() {
_ = RemoveContainer(cID, 0)
})
require.NoError(t, err)
_, err = ContainerWait(3, labels)
assert.Error(err)
assert.Contains(err.Error(), "timed out without becoming healthy")
// Try it again, wait 10s for health; on macOS it usually takes about 2s for ddev-webserver to become healthy
_, err = ContainerWait(20, labels)
assert.NoError(err)
_ = RemoveContainer(cID, 0)
} | explode_data.jsonl/41373 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1168
} | [
2830,
3393,
4502,
14190,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
438,
3342,
7121,
1155,
692,
95143,
1669,
2415,
14032,
30953,
515,
197,
197,
1,
874,
950,
3583,
22115,
11494,
788,
1273,
4502,
675,
345,
197,
630,
197,
322,
9735,
264,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSlotWrapCallable(t *testing.T) {
// fun returns a tuple: (ret, args, kwargs) where ret is the return
// value of the slot, args and kwargs are the positional and keyword
// parameters passed to it.
fun := wrapFuncForTest(func(f *Frame, s slot, ret *Object, args ...*Object) (*Object, *BaseException) {
gotArgs := None
gotKWArgs := None
wrapped := newBuiltinFunction("wrapped", func(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if ret.isInstance(TypeType) && toTypeUnsafe(ret).isSubclass(BaseExceptionType) {
return nil, f.Raise(ret, nil, nil)
}
gotArgs = NewTuple(args.makeCopy()...).ToObject()
gotKWArgs = kwargs.makeDict().ToObject()
return ret, nil
}).ToObject()
s.wrapCallable(wrapped)
fnField := reflect.ValueOf(s).Elem().Field(0)
if fnField.IsNil() {
// Return None to denote the slot was empty.
return None, nil
}
// Wrap the underlying slot function (s.Fn) and call it. This
// is more convenient than using reflection to call it.
fn, raised := WrapNative(f, fnField)
if raised != nil {
return nil, raised
}
result, raised := fn.Call(f, append(Args{f.ToObject()}, args...), nil)
if raised != nil {
return nil, raised
}
return NewTuple(result, gotArgs, gotKWArgs).ToObject(), nil
})
o := newObject(ObjectType)
cases := []invokeTestCase{
{args: wrapArgs(&basisSlot{}, "no"), want: None},
{args: wrapArgs(&binaryOpSlot{}, "ret", "foo", "bar"), want: newTestTuple("ret", newTestTuple("foo", "bar"), NewDict()).ToObject()},
{args: wrapArgs(&binaryOpSlot{}, RuntimeErrorType, "foo", "bar"), wantExc: mustCreateException(RuntimeErrorType, "")},
{args: wrapArgs(&callSlot{}, "ret", true, wrapArgs(1, 2), None), want: newTestTuple("ret", newTestTuple(true, 1, 2), NewDict()).ToObject()},
{args: wrapArgs(&callSlot{}, "ret", "foo", None, wrapKWArgs("a", "b")), want: newTestTuple("ret", newTestTuple("foo"), newTestDict("a", "b")).ToObject()},
{args: wrapArgs(&callSlot{}, "ret", 3.14, wrapArgs(false), wrapKWArgs("foo", 42)), want: newTestTuple("ret", newTestTuple(3.14, false), newTestDict("foo", 42)).ToObject()},
{args: wrapArgs(&callSlot{}, RuntimeErrorType, true, wrapArgs(1, 2), None), wantExc: mustCreateException(RuntimeErrorType, "")},
{args: wrapArgs(&delAttrSlot{}, "ret", o, "foo"), want: newTestTuple(None, newTestTuple(o, "foo"), NewDict()).ToObject()},
{args: wrapArgs(&delAttrSlot{}, RuntimeErrorType, o, "foo"), wantExc: mustCreateException(RuntimeErrorType, "")},
{args: wrapArgs(&deleteSlot{}, "ret", o, 3.14), want: newTestTuple(None, newTestTuple(o, 3.14), NewDict()).ToObject()},
{args: wrapArgs(&deleteSlot{}, RuntimeErrorType, o, 3.14), wantExc: mustCreateException(RuntimeErrorType, "")},
{args: wrapArgs(&delItemSlot{}, "ret", o, false), want: newTestTuple(None, newTestTuple(o, false), NewDict()).ToObject()},
{args: wrapArgs(&delItemSlot{}, RuntimeErrorType, o, false), wantExc: mustCreateException(RuntimeErrorType, "")},
{args: wrapArgs(&getAttributeSlot{}, "ret", o, "foo"), want: newTestTuple("ret", newTestTuple(o, "foo"), NewDict()).ToObject()},
{args: wrapArgs(&getAttributeSlot{}, RuntimeErrorType, o, "foo"), wantExc: mustCreateException(RuntimeErrorType, "")},
{args: wrapArgs(&getSlot{}, "ret", o, "foo", SetType), want: newTestTuple("ret", newTestTuple(o, "foo", SetType), NewDict()).ToObject()},
{args: wrapArgs(&getSlot{}, RuntimeErrorType, o, "foo", SetType), wantExc: mustCreateException(RuntimeErrorType, "")},
{args: wrapArgs(&initSlot{}, "ret", true, wrapArgs(1, 2), None), want: newTestTuple("ret", newTestTuple(true, 1, 2), NewDict()).ToObject()},
{args: wrapArgs(&initSlot{}, "ret", "foo", None, wrapKWArgs("a", "b")), want: newTestTuple("ret", newTestTuple("foo"), newTestDict("a", "b")).ToObject()},
{args: wrapArgs(&initSlot{}, "ret", 3.14, wrapArgs(false), wrapKWArgs("foo", 42)), want: newTestTuple("ret", newTestTuple(3.14, false), newTestDict("foo", 42)).ToObject()},
{args: wrapArgs(&initSlot{}, RuntimeErrorType, true, wrapArgs(1, 2), None), wantExc: mustCreateException(RuntimeErrorType, "")},
{args: wrapArgs(&nativeSlot{}, "no"), want: None},
{args: wrapArgs(&newSlot{}, "ret", StrType, wrapArgs(1, 2), None), want: newTestTuple("ret", newTestTuple(StrType, 1, 2), NewDict()).ToObject()},
{args: wrapArgs(&newSlot{}, "ret", ObjectType, None, wrapKWArgs("a", "b")), want: newTestTuple("ret", newTestTuple(ObjectType), newTestDict("a", "b")).ToObject()},
{args: wrapArgs(&newSlot{}, "ret", ListType, wrapArgs(false), wrapKWArgs("foo", 42)), want: newTestTuple("ret", newTestTuple(ListType, false), newTestDict("foo", 42)).ToObject()},
{args: wrapArgs(&newSlot{}, RuntimeErrorType, IntType, wrapArgs(1, 2), None), wantExc: mustCreateException(RuntimeErrorType, "")},
{args: wrapArgs(&setAttrSlot{}, "ret", o, "foo", 42), want: newTestTuple(None, newTestTuple(o, "foo", 42), NewDict()).ToObject()},
{args: wrapArgs(&setAttrSlot{}, RuntimeErrorType, o, "foo", 42), wantExc: mustCreateException(RuntimeErrorType, "")},
{args: wrapArgs(&setItemSlot{}, "ret", o, "foo", 42), want: newTestTuple(None, newTestTuple(o, "foo", 42), NewDict()).ToObject()},
{args: wrapArgs(&setItemSlot{}, RuntimeErrorType, o, "foo", 42), wantExc: mustCreateException(RuntimeErrorType, "")},
{args: wrapArgs(&setSlot{}, "ret", o, "foo", 42), want: newTestTuple(None, newTestTuple(o, "foo", 42), NewDict()).ToObject()},
{args: wrapArgs(&setSlot{}, RuntimeErrorType, o, "foo", 42), wantExc: mustCreateException(RuntimeErrorType, "")},
{args: wrapArgs(&unaryOpSlot{}, "ret", "foo"), want: newTestTuple("ret", newTestTuple("foo"), NewDict()).ToObject()},
{args: wrapArgs(&unaryOpSlot{}, RuntimeErrorType, "foo"), wantExc: mustCreateException(RuntimeErrorType, "")},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
} | explode_data.jsonl/3223 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2169
} | [
2830,
3393,
19877,
26787,
40410,
1155,
353,
8840,
836,
8,
341,
197,
322,
2464,
4675,
264,
14405,
25,
320,
2122,
11,
2827,
11,
16494,
8,
1380,
2112,
374,
279,
470,
198,
197,
322,
897,
315,
279,
9446,
11,
2827,
323,
16494,
525,
279,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestAddEmptyTx(t *testing.T) {
q, mem := initEnv(0)
defer q.Close()
defer mem.Close()
msg := mem.client.NewMessage("mempool", types.EventTx, nil)
mem.client.Send(msg, true)
resp, err := mem.client.Wait(msg)
if err != nil {
t.Error(err)
return
}
if string(resp.GetData().(*types.Reply).GetMsg()) != types.ErrEmptyTx.Error() {
t.Error("TestAddEmptyTx failed")
}
} | explode_data.jsonl/16816 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 158
} | [
2830,
3393,
2212,
3522,
31584,
1155,
353,
8840,
836,
8,
341,
18534,
11,
1833,
1669,
2930,
14359,
7,
15,
340,
16867,
2804,
10421,
741,
16867,
1833,
10421,
2822,
21169,
1669,
1833,
6581,
7121,
2052,
445,
76,
3262,
1749,
497,
4494,
6904,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestUpdateNotExistingInstance(t *testing.T) {
// given
suite := NewBrokerSuiteTest(t)
defer suite.TearDown()
iid := uuid.New().String()
resp := suite.CallAPI("PUT", fmt.Sprintf("oauth/cf-eu10/v2/service_instances/%s?accepts_incomplete=true&plan_id=7d55d31d-35ae-4438-bf13-6ffdfa107d9f&service_id=47c9dcbf-ff30-448e-ab36-d3bad66ba281", iid),
`{
"service_id": "47c9dcbf-ff30-448e-ab36-d3bad66ba281",
"plan_id": "7d55d31d-35ae-4438-bf13-6ffdfa107d9f",
"context": {
"sm_platform_credentials": {
"url": "https://sm.url",
"credentials": {}
},
"globalaccount_id": "g-account-id",
"subaccount_id": "sub-id",
"user_id": "john.smith@email.com"
},
"parameters": {
"name": "testing-cluster",
"oidc": {
"clientID": "id-ooo",
"signingAlgs": ["RSA256"],
"issuerURL": "https://issuer.url.com"
}
}
}`)
opID := suite.DecodeOperationID(resp)
suite.processProvisioningByOperationID(opID)
// provisioning done, let's start an update
// when
resp = suite.CallAPI("PATCH", fmt.Sprintf("oauth/cf-eu10/v2/service_instances/not-existing"),
`{
"service_id": "47c9dcbf-ff30-448e-ab36-d3bad66ba281",
"plan_id": "4deee563-e5ec-4731-b9b1-53b42d855f0c",
"context": {
"globalaccount_id": "g-account-id",
"user_id": "john.smith@email.com"
}
}`)
assert.Equal(t, http.StatusNotFound, resp.StatusCode)
} | explode_data.jsonl/8120 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 772
} | [
2830,
3393,
4289,
2623,
53067,
2523,
1155,
353,
8840,
836,
8,
341,
197,
322,
2661,
198,
96572,
1669,
1532,
65545,
28000,
2271,
1155,
340,
16867,
16182,
836,
682,
4454,
741,
8230,
307,
1669,
16040,
7121,
1005,
703,
2822,
34653,
1669,
161... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestApplicationRouterFailure(t *testing.T) {
router := createApplicationRouter(ctx, testSettings)
req, err := http.NewRequest(http.MethodGet, "/trouble", nil)
require.NoError(t, err)
resp := httptest.NewRecorder()
router.ServeHTTP(resp, req)
require.Equal(t, http.StatusNotFound, resp.Code)
} | explode_data.jsonl/20252 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 109
} | [
2830,
3393,
4988,
9523,
17507,
1155,
353,
8840,
836,
8,
341,
67009,
1669,
1855,
4988,
9523,
7502,
11,
1273,
6086,
692,
24395,
11,
1848,
1669,
1758,
75274,
19886,
20798,
1949,
11,
3521,
376,
1201,
497,
2092,
340,
17957,
35699,
1155,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBlobClientDeleteFrom(t *testing.T) {
localNodeID := roachpb.NodeID(1)
remoteNodeID := roachpb.NodeID(2)
localExternalDir, remoteExternalDir, stopper, cleanUpFn := createTestResources(t)
defer cleanUpFn()
clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond)
rpcContext := rpc.NewInsecureTestingContext(clock, stopper)
rpcContext.TestingAllowNamedRPCToAnonymousServer = true
blobClientFactory := setUpService(t, rpcContext, localNodeID, remoteNodeID, localExternalDir, remoteExternalDir)
localFileContent := []byte("local_file")
remoteFileContent := []byte("remote_file")
writeTestFile(t, filepath.Join(localExternalDir, "test/local.csv"), localFileContent)
writeTestFile(t, filepath.Join(remoteExternalDir, "test/remote.csv"), remoteFileContent)
writeTestFile(t, filepath.Join(remoteExternalDir, "test/remote2.csv"), remoteFileContent)
for _, tc := range []struct {
name string
nodeID roachpb.NodeID
filename string
err string
}{
{
"delete-remote-file",
remoteNodeID,
"test/remote.csv",
"",
},
{
"delete-local-file",
localNodeID,
"test/local.csv",
"",
},
{
"delete-remote-file-does-not-exist",
remoteNodeID,
"test/doesnotexist",
"no such file",
},
{
"delete-directory-not-empty",
remoteNodeID,
"test",
"directory not empty",
},
{
"delete-directory-empty", // this should work
localNodeID,
"test",
"",
},
} {
t.Run(tc.name, func(t *testing.T) {
ctx := context.TODO()
blobClient, err := blobClientFactory(ctx, tc.nodeID)
if err != nil {
t.Fatal(err)
}
err = blobClient.Delete(ctx, tc.filename)
if err != nil {
if tc.err != "" && testutils.IsError(err, tc.err) {
// the correct error was returned
return
}
t.Fatal(err)
}
_, err = ioutil.ReadFile(filepath.Join(localExternalDir, tc.filename))
if err == nil {
t.Fatal(err, "file should have been deleted")
}
})
}
} | explode_data.jsonl/82493 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 810
} | [
2830,
3393,
37985,
2959,
6435,
3830,
1155,
353,
8840,
836,
8,
341,
8854,
1955,
915,
1669,
926,
610,
16650,
21714,
915,
7,
16,
340,
197,
18147,
1955,
915,
1669,
926,
610,
16650,
21714,
915,
7,
17,
340,
8854,
25913,
6184,
11,
8699,
25... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestGetErrorRatesNilReader(t *testing.T) {
qs := NewMetricsQueryService(nil)
qParams := &metricsstore.ErrorRateQueryParameters{}
r, err := qs.GetErrorRates(context.Background(), qParams)
assert.Zero(t, r)
assert.EqualError(t, err, errNilReader.Error())
} | explode_data.jsonl/12528 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 99
} | [
2830,
3393,
1949,
1454,
82623,
19064,
5062,
1155,
353,
8840,
836,
8,
341,
18534,
82,
1669,
1532,
27328,
2859,
1860,
27907,
340,
18534,
4870,
1669,
609,
43262,
4314,
6141,
11564,
2859,
9706,
16094,
7000,
11,
1848,
1669,
32421,
2234,
1454,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_templates_001(t *testing.T) {
tool.Test(t, nil, new(TApp), func(app *TApp) {
if app.TemplateCache == nil {
t.Error("nil Templates unit")
} else {
t.Log(app.Templates)
}
})
} | explode_data.jsonl/43645 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 90
} | [
2830,
3393,
49526,
62,
15,
15,
16,
1155,
353,
8840,
836,
8,
341,
197,
14172,
8787,
1155,
11,
2092,
11,
501,
4140,
2164,
701,
2915,
11462,
353,
51,
2164,
8,
341,
197,
743,
906,
52530,
8233,
621,
2092,
341,
298,
3244,
6141,
445,
838... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFloat64SetIsSorted(t *testing.T) {
const N = 1000
z := NewFloat64()
rand.Seed(time.Now().Unix())
// Test whether the ramdom inserted values sorted
for i := 0; i < N; i++ {
z.Add(fastrand.Float64(), fmt.Sprint(i))
}
testIsSorted(t, z)
testInternalSpan(t, z)
// Randomly update score
for i := 0; i < N; i++ {
// 1/2
if rand.Float64() > 0.5 {
continue
}
z.Add(float64(i), fmt.Sprint(i))
}
testIsSorted(t, z)
testInternalSpan(t, z)
// Randomly add or delete value
for i := 0; i < N; i++ {
// 1/2
if rand.Float64() > 0.5 {
continue
}
z.Remove(fmt.Sprint(i))
}
testIsSorted(t, z)
testInternalSpan(t, z)
} | explode_data.jsonl/24994 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 303
} | [
2830,
3393,
5442,
21,
19,
1649,
3872,
51051,
1155,
353,
8840,
836,
8,
341,
4777,
451,
284,
220,
16,
15,
15,
15,
198,
20832,
1669,
1532,
5442,
21,
19,
741,
7000,
437,
5732,
291,
9730,
13244,
1005,
55832,
12367,
197,
322,
3393,
3425,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestCreateDir(t *testing.T) {
dir, err := ioutil.TempDir("", "wlb-checkpoint-test")
if err != nil {
t.Fatal(err)
}
defer func() {
err := os.RemoveAll(dir)
if err != nil {
t.Fatal(err)
}
}()
stateDir := filepath.Join(dir, "state", "dir", "does", "not", "exists")
file := filepath.Join(stateDir, ".winlogbeat.yml")
cp := &Checkpoint{file: file}
if !assert.False(t, fileExists(file), "%s should not exist", file) {
return
}
if err = cp.createDir(); err != nil {
t.Fatal("createDir", err)
}
if !assert.True(t, fileExists(stateDir), "%s should exist", file) {
return
}
// mkdir on Windows does not pass the POSIX mode to the CreateDirectory
// syscall so doesn't test the mode.
if runtime.GOOS != "windows" {
fileInfo, err := os.Stat(stateDir)
if assert.NoError(t, err) {
assert.Equal(t, true, fileInfo.IsDir())
assert.Equal(t, os.FileMode(0750), fileInfo.Mode().Perm())
}
}
} | explode_data.jsonl/17784 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 382
} | [
2830,
3393,
4021,
6184,
1155,
353,
8840,
836,
8,
341,
48532,
11,
1848,
1669,
43144,
65009,
6184,
19814,
330,
86,
21123,
15934,
2768,
16839,
1138,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
532,
16867,
2915,
368,
341,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestStripRepoURLCredentials(t *testing.T) {
for _, testCase := range []struct {
url string
expected string
}{
{"https://github.com/org/repo.git", "https://github.com/org/repo.git"},
{"https://USER:PASS@github.com/org/repo.git", "https://github.com/org/repo.git"},
{"https://PASS:@github.com/org/repo.git", "https://github.com/org/repo.git"},
{"https://:PASS@gitlab.com/org/repo", "https://gitlab.com/org/repo"},
{"http://USER:PASS@github.com/org/repo.git", "http://github.com/org/repo.git"},
{"ssh://USER:PASS@github.com/org/repo.git", "ssh://github.com/org/repo.git"},
{"git@github.com:org/repo.git", "ssh://github.com/org/repo.git"},
{"github.com/org/repo.git", "https://github.com/org/repo.git"},
{"bitbucket.org/org/repo", "https://bitbucket.org/org/repo"},
{"gitlab.com/org/repo", "https://gitlab.com/org/repo"},
{"10.3.1.5/foo/bar.git", "https://10.3.1.5/foo/bar.git"},
{"localhost:8888/foo/bar.git", "http://localhost:8888/foo/bar.git"},
{"/home/user/local-repo", "file:///home/user/local-repo"},
{"unknown", "https://unknown"},
} {
str := gitutil.StripRepoURLCredentials(testCase.url)
assert.Equal(t, testCase.expected, str)
}
} | explode_data.jsonl/67878 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 516
} | [
2830,
3393,
5901,
25243,
3144,
27025,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
54452,
1669,
2088,
3056,
1235,
341,
197,
19320,
414,
914,
198,
197,
42400,
914,
198,
197,
59403,
197,
197,
4913,
2428,
1110,
5204,
905,
41361,
10758,
5368,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestSlowCreateFail(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
p := NewResourcePool(SlowFailFactory, 2, 2, time.Second)
defer p.Close()
ch := make(chan bool)
// The third Get should not wait indefinitely
for i := 0; i < 3; i++ {
go func() {
p.Get(ctx)
ch <- true
}()
}
for i := 0; i < 3; i++ {
<-ch
}
if p.Available() != 2 {
t.Errorf("Expecting 2, received %d", p.Available())
}
} | explode_data.jsonl/663 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 189
} | [
2830,
3393,
58289,
4021,
19524,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
2266,
19047,
741,
33096,
915,
4202,
7,
15,
340,
18032,
4202,
7,
15,
340,
3223,
1669,
1532,
4783,
10551,
3759,
10303,
19524,
4153,
11,
220,
17,
11,
220,
17,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestContext2Plan_targetedModuleOrphan(t *testing.T) {
m := testModule(t, "plan-targeted-module-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[string]providers.Factory{
"aws": testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.orphan": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-789xyz",
},
Provider: "provider.aws",
},
"aws_instance.nottargeted": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
Provider: "provider.aws",
},
},
},
},
}),
Destroy: true,
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("child", addrs.NoKey).Resource(
addrs.ManagedResourceMode, "aws_instance", "orphan",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
res := plan.Changes.Resources[0]
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if ric.Addr.String() != "module.child.aws_instance.orphan" {
t.Fatalf("unexpected resource :%s", ric.Addr)
}
if res.Action != plans.Delete {
t.Fatalf("resource %s should be deleted", ric.Addr)
}
} | explode_data.jsonl/28707 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 743
} | [
2830,
3393,
1972,
17,
20485,
11123,
291,
3332,
2195,
9943,
1155,
353,
8840,
836,
8,
341,
2109,
1669,
1273,
3332,
1155,
11,
330,
10393,
18489,
291,
46718,
26511,
9943,
1138,
3223,
1669,
1273,
5179,
445,
8635,
1138,
3223,
98063,
24911,
28... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestUserAgentWithDefault(t *testing.T) {
txp := httptransport.UserAgentTransport{
RoundTripper: httptransport.FakeTransport{
Resp: &http.Response{StatusCode: 200},
},
}
req := &http.Request{URL: &url.URL{
Scheme: "https",
Host: "www.google.com",
Path: "/",
}}
req.Header = http.Header{}
resp, err := txp.RoundTrip(req)
if err != nil {
t.Fatal(err)
}
if resp.Request.Header.Get("User-Agent") != "miniooni/0.1.0-dev" {
t.Fatal("not the User-Agent we expected")
}
} | explode_data.jsonl/65210 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 216
} | [
2830,
3393,
1474,
16810,
2354,
3675,
1155,
353,
8840,
836,
8,
341,
3244,
35725,
1669,
1758,
26445,
7344,
16810,
27560,
515,
197,
11143,
795,
21884,
6922,
25,
1758,
26445,
991,
726,
27560,
515,
298,
197,
36555,
25,
609,
1254,
12574,
90,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestExpr_map_default_values(t *testing.T) {
env := map[string]interface{}{
"foo": map[string]string{},
"bar": map[string]*string{},
}
input := `foo['missing'] == '' && bar['missing'] == nil`
program, err := expr.Compile(input, expr.Env(env))
require.NoError(t, err)
output, err := expr.Run(program, env)
require.NoError(t, err)
require.Equal(t, true, output)
} | explode_data.jsonl/36905 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 149
} | [
2830,
3393,
16041,
5376,
9993,
9146,
1155,
353,
8840,
836,
8,
341,
57538,
1669,
2415,
14032,
31344,
67066,
197,
197,
1,
7975,
788,
2415,
14032,
30953,
38837,
197,
197,
1,
2257,
788,
2415,
14032,
8465,
917,
38837,
197,
630,
22427,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestHeadTracker_Start_NewHeads(t *testing.T) {
t.Parallel()
db := pgtest.NewGormDB(t)
config := newCfg(t)
orm := headtracker.NewORM(db, cltest.FixtureChainID)
ethClient, sub := cltest.NewEthClientAndSubMockWithDefaultChain(t)
sub.On("Err").Return(nil)
sub.On("Unsubscribe").Return(nil)
chStarted := make(chan struct{})
ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(0), nil)
ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).
Run(func(mock.Arguments) { close(chStarted) }).
Return(sub, nil)
ht := createHeadTracker(ethClient, config, orm)
assert.NoError(t, ht.Start())
<-chStarted
ht.Stop()
ethClient.AssertExpectations(t)
} | explode_data.jsonl/63748 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 274
} | [
2830,
3393,
12346,
31133,
38056,
39582,
1519,
7664,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
20939,
1669,
17495,
1944,
7121,
38,
493,
3506,
1155,
340,
25873,
1669,
501,
42467,
1155,
340,
197,
493,
1669,
1968,
50395,
7121,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCall_GetTransactionReceipt(t *testing.T) {
mockJSONRPC := &mocks.JSONRPC{}
mockGraphQL := &mocks.GraphQL{}
c := &Client{
c: mockJSONRPC,
g: mockGraphQL,
traceSemaphore: semaphore.NewWeighted(100),
}
ctx := context.Background()
mockJSONRPC.On(
"CallContext",
ctx,
mock.Anything,
"eth_getTransactionReceipt",
common.HexToHash("0x5e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a"),
).Return(
nil,
).Run(
func(args mock.Arguments) {
r := args.Get(1).(**types.Receipt)
file, err := ioutil.ReadFile(
"testdata/tx_receipt_1.json",
)
assert.NoError(t, err)
*r = new(types.Receipt)
assert.NoError(t, (*r).UnmarshalJSON(file))
},
).Once()
resp, err := c.Call(
ctx,
&RosettaTypes.CallRequest{
Method: "eth_getTransactionReceipt",
Parameters: map[string]interface{}{
"tx_hash": "0x5e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a",
},
},
)
assert.NoError(t, err)
file, err := ioutil.ReadFile("testdata/tx_receipt_1.json")
assert.NoError(t, err)
var receiptMap map[string]interface{}
assert.NoError(t, json.Unmarshal(file, &receiptMap))
// set null fields
receiptMap["root"] = "0x"
receiptMap["contractAddress"] = "0x0000000000000000000000000000000000000000"
delete(receiptMap, "from")
delete(receiptMap, "to")
assert.Equal(t, &RosettaTypes.CallResponse{
Result: receiptMap,
Idempotent: false,
}, resp)
assert.NoError(t, err)
mockJSONRPC.AssertExpectations(t)
mockGraphQL.AssertExpectations(t)
} | explode_data.jsonl/55496 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 724
} | [
2830,
3393,
7220,
13614,
8070,
67461,
1155,
353,
8840,
836,
8,
341,
77333,
5370,
29528,
1669,
609,
16712,
82,
18009,
29528,
16094,
77333,
88637,
1669,
609,
16712,
82,
40237,
3588,
31483,
1444,
1669,
609,
2959,
515,
197,
1444,
25,
1060,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
testHookCollectGarbageChan := make(chan uint64)
defer setTestHookCollectGarbage(func(collectedCount uint64) {
// don't trigger if we haven't collected anything - this may
// result in a race condition when we inspect the gcsize below,
// causing the database to shut down while the cleanup to happen
// before the correct signal has been communicated here.
if collectedCount == 0 {
return
}
testHookCollectGarbageChan <- collectedCount
})()
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
db := newTestDB(t, &Options{
Capacity: 100,
})
addrs := make([]swarm.Address, 0)
// upload random chunks just up to the capacity
for i := 0; i < int(db.cacheCapacity)-1; i++ {
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
err = db.Set(context.Background(), storage.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
addrs = append(addrs, ch.Address())
}
// set update gc test hook to signal when
// update gc goroutine is done by closing
// testHookUpdateGCChan channel
testHookUpdateGCChan := make(chan struct{})
resetTestHookUpdateGC := setTestHookUpdateGC(func() {
close(testHookUpdateGCChan)
})
// request the oldest synced chunk
// to prioritize it in the gc index
// not to be collected
_, err := db.Get(context.Background(), storage.ModeGetRequest, addrs[0])
if err != nil {
t.Fatal(err)
}
// wait for update gc goroutine to finish for garbage
// collector to be correctly triggered after the last upload
select {
case <-testHookUpdateGCChan:
case <-time.After(10 * time.Second):
t.Fatal("updateGC was not called after getting chunk with ModeGetRequest")
}
// no need to wait for update gc hook anymore
resetTestHookUpdateGC()
// upload and sync another chunk to trigger
// garbage collection
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
_, err = db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
err = db.Set(context.Background(), storage.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
addrs = append(addrs, ch.Address())
// wait for garbage collection
gcTarget := db.gcTarget()
var totalCollectedCount uint64
for {
select {
case c := <-testHookCollectGarbageChan:
totalCollectedCount += c
case <-time.After(10 * time.Second):
t.Error("collect garbage timeout")
}
gcSize, err := db.gcSize.Get()
if err != nil {
t.Fatal(err)
}
if gcSize == gcTarget {
break
}
}
wantTotalCollectedCount := uint64(len(addrs)) - gcTarget
if totalCollectedCount != wantTotalCollectedCount {
t.Errorf("total collected chunks %v, want %v", totalCollectedCount, wantTotalCollectedCount)
}
t.Run("pull index count", newItemsCountTest(db.pullIndex, int(gcTarget)))
t.Run("gc index count", newItemsCountTest(db.gcIndex, int(gcTarget)))
t.Run("gc size", newIndexGCSizeTest(db))
// requested chunk should not be removed
t.Run("get requested chunk", func(t *testing.T) {
_, err := db.Get(context.Background(), storage.ModeGetRequest, addrs[0])
if err != nil {
t.Fatal(err)
}
})
// the second synced chunk should be removed
t.Run("get gc-ed chunk", func(t *testing.T) {
_, err := db.Get(context.Background(), storage.ModeGetRequest, addrs[1])
if !errors.Is(err, storage.ErrNotFound) {
t.Errorf("got error %v, want %v", err, storage.ErrNotFound)
}
})
// last synced chunk should not be removed
t.Run("get most recent synced chunk", func(t *testing.T) {
_, err := db.Get(context.Background(), storage.ModeGetRequest, addrs[len(addrs)-1])
if err != nil {
t.Fatal(err)
}
})
} | explode_data.jsonl/11632 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1440
} | [
2830,
3393,
3506,
68140,
43930,
20652,
21936,
6615,
35295,
1155,
353,
8840,
836,
8,
341,
18185,
31679,
47504,
43930,
20652,
46019,
1669,
1281,
35190,
2622,
21,
19,
340,
16867,
738,
2271,
31679,
47504,
43930,
20652,
18552,
19611,
2209,
2507,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_SQLite_001(t *testing.T) {
version, number, id := sqlite3.Version()
t.Logf("Version: %v", version)
t.Logf("Number: %v", number)
t.Logf("ID: %v", id)
} | explode_data.jsonl/48725 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 73
} | [
2830,
3393,
46625,
632,
62,
15,
15,
16,
1155,
353,
8840,
836,
8,
341,
74954,
11,
1372,
11,
877,
1669,
22003,
18,
35842,
741,
3244,
98954,
445,
5637,
25,
1018,
85,
497,
2319,
340,
3244,
98954,
445,
2833,
25,
1018,
85,
497,
1372,
34... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestForceNewCluster(t *testing.T) {
c := NewCluster(t, 3)
c.Launch(t)
cc := MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
kapi := client.NewKeysAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
resp, err := kapi.Create(ctx, "/foo", "bar")
if err != nil {
t.Fatalf("unexpected create error: %v", err)
}
cancel()
// ensure create has been applied in this machine
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
if _, err = kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
t.Fatalf("unexpected watch error: %v", err)
}
cancel()
c.Members[0].Stop(t)
c.Members[1].Terminate(t)
c.Members[2].Terminate(t)
c.Members[0].ForceNewCluster = true
err = c.Members[0].Restart(t)
if err != nil {
t.Fatalf("unexpected ForceRestart error: %v", err)
}
defer c.Members[0].Terminate(t)
c.waitLeader(t, c.Members[:1])
// use new http client to init new connection
cc = MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
kapi = client.NewKeysAPI(cc)
// ensure force restart keep the old data, and new cluster can make progress
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
if _, err := kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
t.Fatalf("unexpected watch error: %v", err)
}
cancel()
clusterMustProgress(t, c.Members[:1])
} | explode_data.jsonl/16297 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 543
} | [
2830,
3393,
18573,
3564,
28678,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
1532,
28678,
1155,
11,
220,
18,
340,
1444,
1214,
18423,
1155,
340,
63517,
1669,
15465,
3564,
9230,
2959,
1155,
11,
3056,
917,
90,
66,
91758,
58,
15,
936,
3144,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestBadSeparator(t *testing.T) {
type config struct {
WontWork []int `env:"WONTWORK" envSeparator:":"`
}
cfg := &config{}
os.Setenv("WONTWORK", "1,2,3,4")
defer os.Clearenv()
assert.Error(t, env.Parse(cfg))
} | explode_data.jsonl/7492 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 100
} | [
2830,
3393,
17082,
16409,
1155,
353,
8840,
836,
8,
341,
13158,
2193,
2036,
341,
197,
17300,
544,
6776,
3056,
396,
1565,
3160,
2974,
54,
10232,
18470,
1,
6105,
16409,
25,
3252,
3989,
197,
630,
50286,
1669,
609,
1676,
16094,
25078,
4202,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestHasNaN(t *testing.T) {
t.Parallel()
for i, test := range []struct {
s []float64
ans bool
}{
{},
{
s: []float64{1, 2, 3, 4},
},
{
s: []float64{1, math.NaN(), 3, 4},
ans: true,
},
{
s: []float64{1, 2, 3, math.NaN()},
ans: true,
},
} {
b := HasNaN(test.s)
if b != test.ans {
t.Errorf("HasNaN mismatch case %d. Expected %v, Found %v", i, test.ans, b)
}
}
} | explode_data.jsonl/1225 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 228
} | [
2830,
3393,
10281,
22831,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
2023,
600,
11,
1273,
1669,
2088,
3056,
1235,
341,
197,
1903,
256,
3056,
3649,
21,
19,
198,
197,
43579,
1807,
198,
197,
59403,
197,
197,
38837,
197,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestSimpleProducer_WithTLS(t *testing.T) {
_, err := SimpleProducer([]string{"127.0.0.1:9092"}, 0, 0, makeTLSConfiguration(t))
// without a running kafka this should result in an error
if err == nil {
t.Error("SimpleProducer() expected error got nil")
} else {
t.Logf("SimpleProducer() error = %v", err)
}
} | explode_data.jsonl/68626 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 119
} | [
2830,
3393,
16374,
45008,
62,
2354,
45439,
1155,
353,
8840,
836,
8,
341,
197,
6878,
1848,
1669,
8993,
45008,
10556,
917,
4913,
16,
17,
22,
13,
15,
13,
15,
13,
16,
25,
24,
15,
24,
17,
14345,
220,
15,
11,
220,
15,
11,
1281,
45439,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFeatureCollection(t *testing.T) {
for _, tc := range []struct {
fc *FeatureCollection
s string
}{
{
fc: &FeatureCollection{
Features: []*Feature{
{
Geometry: geom.NewPoint(geom.XY).MustSetCoords([]float64{125.6, 10.1}),
Properties: map[string]interface{}{
"name": "Dinagat Islands",
},
},
},
},
s: `{"type":"FeatureCollection","features":[{"type":"Feature","geometry":{"type":"Point","coordinates":[125.6,10.1]},"properties":{"name":"Dinagat Islands"}}]}`,
},
{
fc: &FeatureCollection{
Features: []*Feature{
{
Geometry: geom.NewPoint(geom.XY).MustSetCoords([]float64{125.6, 10.1}),
Properties: map[string]interface{}{
"name": "Dinagat Islands",
},
},
{
Geometry: geom.NewLineString(geom.XY).MustSetCoords([]geom.Coord{{102, 0}, {103, 1}, {104, 0}, {105, 1}}),
Properties: map[string]interface{}{
"prop0": "value0",
"prop1": 0.0,
},
},
{
Geometry: geom.NewPolygon(geom.XY).MustSetCoords([][]geom.Coord{{{100, 0}, {101, 0}, {101, 1}, {100, 1}, {100, 0}}}),
Properties: map[string]interface{}{
"prop0": "value0",
"prop1": map[string]interface{}{
"this": "that",
},
},
},
},
},
s: `{"type":"FeatureCollection","features":[{"type":"Feature","geometry":{"type":"Point","coordinates":[125.6,10.1]},"properties":{"name":"Dinagat Islands"}},{"type":"Feature","geometry":{"type":"LineString","coordinates":[[102,0],[103,1],[104,0],[105,1]]},"properties":{"prop0":"value0","prop1":0}},{"type":"Feature","geometry":{"type":"Polygon","coordinates":[[[100,0],[101,0],[101,1],[100,1],[100,0]]]},"properties":{"prop0":"value0","prop1":{"this":"that"}}}]}`,
},
} {
if got, err := json.Marshal(tc.fc); err != nil || string(got) != tc.s {
t.Errorf("json.Marshal(%+v) == %v, %v, want %v, nil", tc.fc, string(got), err, tc.s)
}
fc := &FeatureCollection{}
if err := json.Unmarshal([]byte(tc.s), fc); err != nil {
t.Errorf("json.Unmarshal(%v, ...) == %v, want nil", tc.s, err)
}
if diff, equal := messagediff.PrettyDiff(tc.fc, fc); !equal {
t.Errorf("json.Unmarshal(%v, ...), diff\n%s", tc.s, diff)
}
}
} | explode_data.jsonl/73813 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1023
} | [
2830,
3393,
13859,
6482,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
17130,
1669,
2088,
3056,
1235,
341,
197,
1166,
66,
353,
13859,
6482,
198,
197,
1903,
220,
914,
198,
197,
59403,
197,
197,
515,
298,
1166,
66,
25,
609,
13859,
6482,
5... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestCreateControllerPublishVolumeResponse(t *testing.T) {
testCases := []struct {
testCaseName string
requestVolAttResponse provider.VolumeAttachmentResponse
extraPublishInfo map[string]string
expectedCtlPubVolResponse *csi.ControllerPublishVolumeResponse
expectedStatus bool
}{
{
testCaseName: "Valid controller volume response",
requestVolAttResponse: provider.VolumeAttachmentResponse{
Status: "available",
VolumeAttachmentRequest: provider.VolumeAttachmentRequest{
VolumeID: "volumeID",
InstanceID: "instanceID",
VPCVolumeAttachment: &provider.VolumeAttachment{DevicePath: "/dev/xbv"},
},
},
extraPublishInfo: map[string]string{},
expectedCtlPubVolResponse: &csi.ControllerPublishVolumeResponse{
PublishContext: map[string]string{
PublishInfoVolumeID: "volumeID",
PublishInfoNodeID: "instanceID",
PublishInfoStatus: "available",
PublishInfoDevicePath: "/dev/xbv",
},
},
expectedStatus: true,
},
}
for _, testcase := range testCases {
t.Run(testcase.testCaseName, func(t *testing.T) {
actualCtlPubVol := createControllerPublishVolumeResponse(testcase.requestVolAttResponse, testcase.extraPublishInfo)
assert.Equal(t, testcase.expectedStatus, isControllerPublishVolume(testcase.expectedCtlPubVolResponse, actualCtlPubVol))
})
}
} | explode_data.jsonl/51261 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 538
} | [
2830,
3393,
4021,
2051,
50145,
18902,
2582,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
18185,
4207,
675,
1060,
914,
198,
197,
23555,
36361,
10456,
2582,
257,
9109,
79106,
33569,
2582,
198,
197,
8122,
2172,
5... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSDNotify(t *testing.T) {
l, err := listen()
if err != nil {
t.Fatal(err)
}
defer func() { _ = l.close() }()
ch := make(chan error)
go func() {
ch <- l.wait()
}()
if err := notify(l.Path, readyMsg); err != nil {
t.Fatal(err)
}
if err := <-ch; err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/75719 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 142
} | [
2830,
3393,
5491,
28962,
1155,
353,
8840,
836,
8,
341,
8810,
11,
1848,
1669,
8844,
741,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
532,
16867,
2915,
368,
314,
716,
284,
326,
4653,
368,
335,
2822,
23049,
1669,
1281,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestApixu_Forecast(t *testing.T) {
data := loadData(t, "forecast")
a := &apixu{
config: Config{},
httpClient: &httpClientMock{
response: &http.Response{
StatusCode: http.StatusOK,
Body: &bodyMock{},
},
err: nil,
},
read: func(r io.Reader) ([]byte, error) {
return data, nil
},
}
expected := &response.Forecast{}
if err := json.Unmarshal(data, expected); err != nil {
assert.Fail(t, err.Error())
}
hour := 17
res, err := a.Forecast("query", 2, &hour)
assert.Equal(t, expected, res)
assert.NoError(t, err)
} | explode_data.jsonl/14941 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 248
} | [
2830,
3393,
10611,
941,
84,
1400,
460,
3829,
1155,
353,
8840,
836,
8,
341,
8924,
1669,
62432,
1155,
11,
330,
58984,
5130,
11323,
1669,
609,
391,
941,
84,
515,
197,
25873,
25,
5532,
38837,
197,
28080,
2959,
25,
609,
83417,
11571,
515,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReconciler_NewCanaries_FillNames(t *testing.T) {
job := mock.Job()
job.TaskGroups[0].Update = &structs.UpdateStrategy{
Canary: 4,
MaxParallel: 2,
HealthCheck: structs.UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 10 * time.Minute,
}
// Create an existing deployment that has placed some canaries
d := structs.NewDeployment(job)
s := &structs.DeploymentState{
Promoted: false,
DesiredTotal: 10,
DesiredCanaries: 4,
PlacedAllocs: 2,
}
d.TaskGroups[job.TaskGroups[0].Name] = s
// Create 10 allocations from the old job
var allocs []*structs.Allocation
for i := 0; i < 10; i++ {
alloc := mock.Alloc()
alloc.Job = job
alloc.JobID = job.ID
alloc.NodeID = uuid.Generate()
alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
alloc.TaskGroup = job.TaskGroups[0].Name
allocs = append(allocs, alloc)
}
// Create canaries but pick names at the ends
for i := 0; i < 4; i += 3 {
// Create one canary
canary := mock.Alloc()
canary.Job = job
canary.JobID = job.ID
canary.NodeID = uuid.Generate()
canary.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
canary.TaskGroup = job.TaskGroups[0].Name
s.PlacedCanaries = append(s.PlacedCanaries, canary.ID)
canary.DeploymentID = d.ID
allocs = append(allocs, canary)
}
reconciler := NewAllocReconciler(testLogger(), allocUpdateFnDestructive, false, job.ID, job, d, allocs, nil)
r := reconciler.Compute()
// Assert the correct results
assertResults(t, r, &resultExpectation{
createDeployment: nil,
deploymentUpdates: nil,
place: 2,
inplace: 0,
stop: 0,
desiredTGUpdates: map[string]*structs.DesiredUpdates{
job.TaskGroups[0].Name: {
Canary: 2,
Ignore: 12,
},
},
})
assertNamesHaveIndexes(t, intRange(1, 2), placeResultsToNames(r.place))
} | explode_data.jsonl/67265 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 827
} | [
2830,
3393,
693,
40446,
5769,
39582,
6713,
5431,
1400,
483,
7980,
1155,
353,
8840,
836,
8,
341,
68577,
1669,
7860,
45293,
741,
68577,
28258,
22173,
58,
15,
936,
4289,
284,
609,
1235,
82,
16689,
19816,
515,
197,
6258,
276,
658,
25,
688... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestConnectionStatementCacheExecute(t *testing.T) {
a := assert.New(t)
conn := NewFromEnv()
defer func() {
closeErr := conn.Close()
a.Nil(closeErr)
}()
conn.EnableStatementCache()
_, err := conn.Open()
a.Nil(err)
err = conn.Exec("select 'ok!'")
a.Nil(err)
err = conn.Exec("select 'ok!'")
a.Nil(err)
a.True(conn.StatementCache().HasStatement("select 'ok!'"))
} | explode_data.jsonl/33835 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 160
} | [
2830,
3393,
4526,
8636,
8233,
17174,
1155,
353,
8840,
836,
8,
341,
11323,
1669,
2060,
7121,
1155,
692,
32917,
1669,
1532,
3830,
14359,
741,
16867,
2915,
368,
341,
197,
27873,
7747,
1669,
4534,
10421,
741,
197,
11323,
59678,
68890,
7747,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestValidateCustomResourceDefinitionValidation(t *testing.T) {
tests := []struct {
name string
input apiextensions.CustomResourceValidation
statusEnabled bool
wantError bool
}{
{
name: "empty",
input: apiextensions.CustomResourceValidation{},
wantError: false,
},
{
name: "empty with status",
input: apiextensions.CustomResourceValidation{},
statusEnabled: true,
wantError: false,
},
{
name: "root type without status",
input: apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "string",
},
},
statusEnabled: false,
wantError: false,
},
{
name: "root type having invalid value, with status",
input: apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "string",
},
},
statusEnabled: true,
wantError: true,
},
{
name: "non-allowed root field with status",
input: apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
AnyOf: []apiextensions.JSONSchemaProps{
{
Description: "First schema",
},
{
Description: "Second schema",
},
},
},
},
statusEnabled: true,
wantError: true,
},
{
name: "all allowed fields at the root of the schema with status",
input: apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Description: "This is a description",
Type: "object",
Format: "date-time",
Title: "This is a title",
Maximum: float64Ptr(10),
ExclusiveMaximum: true,
Minimum: float64Ptr(5),
ExclusiveMinimum: true,
MaxLength: int64Ptr(10),
MinLength: int64Ptr(5),
Pattern: "^[a-z]$",
MaxItems: int64Ptr(10),
MinItems: int64Ptr(5),
MultipleOf: float64Ptr(3),
Required: []string{"spec", "status"},
Items: &apiextensions.JSONSchemaPropsOrArray{
Schema: &apiextensions.JSONSchemaProps{
Description: "This is a schema nested under Items",
},
},
Properties: map[string]apiextensions.JSONSchemaProps{
"spec": {},
"status": {},
},
ExternalDocs: &apiextensions.ExternalDocumentation{
Description: "This is an external documentation description",
},
Example: &example,
},
},
statusEnabled: true,
wantError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := ValidateCustomResourceDefinitionValidation(&tt.input, tt.statusEnabled, field.NewPath("spec", "validation"))
if !tt.wantError && len(got) > 0 {
t.Errorf("Expected no error, but got: %v", got)
} else if tt.wantError && len(got) == 0 {
t.Error("Expected error, but got none")
}
})
}
} | explode_data.jsonl/46611 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1373
} | [
2830,
3393,
17926,
10268,
4783,
10398,
13799,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
688,
914,
198,
197,
22427,
260,
6330,
27609,
27649,
4783,
13799,
198,
197,
23847,
5462,
1807,
198,
197,
50780,
1454,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestValidArgsNotValidArgsFunc(t *testing.T) {
rootCmd := &Command{
Use: "root",
ValidArgs: []string{"one", "two"},
ValidArgsFunction: func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
return []string{"three", "four"}, ShellCompDirectiveNoFileComp
},
Run: emptyRun,
}
// Test that if both ValidArgs and ValidArgsFunction are present
// only ValidArgs is considered
output, err := executeCommand(rootCmd, ShellCompNoDescRequestCmd, "")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
expected := strings.Join([]string{
"one",
"two",
":4",
"Completion ended with directive: ShellCompDirectiveNoFileComp", ""}, "\n")
if output != expected {
t.Errorf("expected: %q, got: %q", expected, output)
}
// Check completing with a prefix
output, err = executeCommand(rootCmd, ShellCompNoDescRequestCmd, "t")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
expected = strings.Join([]string{
"two",
":4",
"Completion ended with directive: ShellCompDirectiveNoFileComp", ""}, "\n")
if output != expected {
t.Errorf("expected: %q, got: %q", expected, output)
}
} | explode_data.jsonl/43761 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 411
} | [
2830,
3393,
4088,
4117,
2623,
4088,
4117,
9626,
1155,
353,
8840,
836,
8,
341,
33698,
15613,
1669,
609,
4062,
515,
197,
95023,
25,
981,
330,
2888,
756,
197,
197,
4088,
4117,
25,
3056,
917,
4913,
603,
497,
330,
19789,
7115,
197,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAsapKeyAdmitFunc(t *testing.T) {
t.Parallel()
tests := []struct {
name string
admissionReview admissionv1beta1.AdmissionReview
want *admissionv1beta1.AdmissionResponse
}{
{
"serviceName matching namespace",
buildAdmissionReview("foo", k8s.ServiceInstanceGVR, admissionv1beta1.Create, genASAPKeyRawSpec(t, "foo")),
buildAdmissionResponse(true, 0, metav1.StatusReasonUnknown, nil, "serviceName is prefixed by namespace"),
},
{
"serviceName prefixed by namespace",
buildAdmissionReview("foo", k8s.ServiceInstanceGVR, admissionv1beta1.Create, genASAPKeyRawSpec(t, "foo/bar")),
buildAdmissionResponse(true, 0, metav1.StatusReasonUnknown, nil, "serviceName is prefixed by namespace"),
},
{
"namespace with label",
buildAdmissionReview("foo--dev", k8s.ServiceInstanceGVR, admissionv1beta1.Create, genASAPKeyRawSpec(t, "foo")),
buildAdmissionResponse(true, 0, metav1.StatusReasonUnknown, nil, "serviceName is prefixed by namespace"),
},
{
"namespace with label and serviceName with ASAPKey resource name",
buildAdmissionReview("foo--dev", k8s.ServiceInstanceGVR, admissionv1beta1.Create, genASAPKeyRawSpec(t, "foo/bar")),
buildAdmissionResponse(true, 0, metav1.StatusReasonUnknown, nil, "serviceName is prefixed by namespace"),
},
{
"namespace with label and serviceName with ASAPKey resource name mismatch",
buildAdmissionReview("foo--dev", k8s.ServiceInstanceGVR, admissionv1beta1.Create, genASAPKeyRawSpec(t, "bar/foo")),
buildAdmissionResponse(false, http.StatusForbidden, metav1.StatusReasonForbidden, nil, `serviceName was set to "bar/foo", which is not prefixed by namespace "foo--dev"`),
},
{
"serviceName not prefixed by namespace",
buildAdmissionReview("foo", k8s.ServiceInstanceGVR, admissionv1beta1.Create, genASAPKeyRawSpec(t, "bar/foo")),
buildAdmissionResponse(false, http.StatusForbidden, metav1.StatusReasonForbidden, nil, `serviceName was set to "bar/foo", which is not prefixed by namespace "foo"`),
},
}
testsError := []struct {
name string
admissionReview admissionv1beta1.AdmissionReview
wantErr string
}{
{
"with no namespace",
buildAdmissionReview("", k8s.ServiceInstanceGVR, admissionv1beta1.Create, genASAPKeyRawSpec(t, "whatever")),
"no namespace in AdmissionReview request",
},
}
ctx := context.Background()
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got, err := AsapKeyAdmitFunc(ctx, tc.admissionReview)
require.NoError(t, err)
require.Equal(t, tc.want.Result.Message, got.Result.Message)
require.Equal(t, tc.want.Allowed, got.Allowed)
})
}
for _, tc := range testsError {
t.Run(tc.name, func(t *testing.T) {
_, err := AsapKeyAdmitFunc(ctx, tc.admissionReview)
require.EqualError(t, err, tc.wantErr)
})
}
} | explode_data.jsonl/78963 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1060
} | [
2830,
3393,
2121,
391,
1592,
2589,
1763,
9626,
1155,
353,
8840,
836,
8,
1476,
3244,
41288,
7957,
2822,
78216,
1669,
3056,
1235,
341,
197,
11609,
310,
914,
198,
197,
98780,
2728,
19432,
25293,
85,
16,
19127,
16,
17865,
2728,
19432,
198,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBuildLocation(t *testing.T) {
invalidType := &ingress.Ingress{}
expected := "/"
actual := buildLocation(invalidType, true)
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Expected '%v' but returned '%v'", expected, actual)
}
for k, tc := range tmplFuncTestcases {
loc := &ingress.Location{
Path: tc.Path,
Rewrite: rewrite.Config{Target: tc.Target},
}
newLoc := buildLocation(loc, tc.enforceRegex)
if tc.Location != newLoc {
t.Errorf("%s: expected '%v' but returned %v", k, tc.Location, newLoc)
}
}
} | explode_data.jsonl/80579 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 218
} | [
2830,
3393,
11066,
4707,
1155,
353,
8840,
836,
8,
341,
197,
11808,
929,
1669,
609,
287,
673,
5337,
2483,
16094,
42400,
1669,
80455,
88814,
1669,
1936,
4707,
5900,
1891,
929,
11,
830,
692,
743,
753,
34913,
94750,
15253,
11,
5042,
8,
34... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestDNSProvider(t *testing.T) {
fakeAPIKey := "123412341234123412341234"
fakeKeyAuth := "XXXX"
provider, err := NewDNSProviderCredentials(fakeAPIKey)
require.NoError(t, err)
regexpDate, err := regexp.Compile(`\[ACME Challenge [^\]:]*:[^\]]*\]`)
require.NoError(t, err)
// start fake RPC server
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, "text/xml", r.Header.Get("Content-Type"), "invalid content type")
req, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
req = regexpDate.ReplaceAllLiteral(req, []byte(`[ACME Challenge 01 Jan 16 00:00 +0000]`))
resp, ok := serverResponses[string(req)]
require.True(t, ok, "Server response for request not found")
_, err = io.Copy(w, strings.NewReader(resp))
require.NoError(t, err)
}))
defer fakeServer.Close()
// define function to override findZoneByFqdn with
fakeFindZoneByFqdn := func(fqdn string, nameserver []string) (string, error) {
return "example.com.", nil
}
// override gandi endpoint and findZoneByFqdn function
savedEndpoint, savedFindZoneByFqdn := endpoint, findZoneByFqdn
defer func() {
endpoint, findZoneByFqdn = savedEndpoint, savedFindZoneByFqdn
}()
endpoint, findZoneByFqdn = fakeServer.URL+"/", fakeFindZoneByFqdn
// run Present
err = provider.Present("abc.def.example.com", "", fakeKeyAuth)
require.NoError(t, err)
// run CleanUp
err = provider.CleanUp("abc.def.example.com", "", fakeKeyAuth)
require.NoError(t, err)
} | explode_data.jsonl/43105 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 572
} | [
2830,
3393,
61088,
5179,
1155,
353,
8840,
836,
8,
341,
1166,
726,
7082,
1592,
1669,
330,
16,
17,
18,
19,
16,
17,
18,
19,
16,
17,
18,
19,
16,
17,
18,
19,
16,
17,
18,
19,
16,
17,
18,
19,
698,
1166,
726,
1592,
5087,
1669,
330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRecordSerializeSimple(t *testing.T) {
var r Record
{
d := r.Marshal()
assert.Equal(t, 0, len(d))
}
r.Write("key", "val")
d := r.Marshal()
r2, err := UnmarshalRecord(d, nil)
assert.NoError(t, err)
{
v, ok := r2.Get("key")
assert.True(t, ok)
assert.Equal(t, v, "val")
}
{
v, ok := r2.Get("Key")
assert.False(t, ok)
assert.Equal(t, v, "")
}
s := testRoundTrip(t, &r)
assert.Equal(t, "key: val\n", s)
} | explode_data.jsonl/44791 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 224
} | [
2830,
3393,
6471,
15680,
16374,
1155,
353,
8840,
836,
8,
341,
2405,
435,
13583,
271,
197,
515,
197,
2698,
1669,
435,
37271,
741,
197,
6948,
12808,
1155,
11,
220,
15,
11,
2422,
1500,
1171,
197,
630,
7000,
4073,
445,
792,
497,
330,
83... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCollectionVisit(t *testing.T) {
tests := []struct {
Input ParsedFileCollection
Expects []*ParsedFile
}{
{
[]*ParsedFile{modA, modG},
[]*ParsedFile{modA, modG},
},
{
[]*ParsedFile{modG, modA},
[]*ParsedFile{modA, modG},
},
{
[]*ParsedFile{modA, modB, modC},
[]*ParsedFile{modA, modB, modC},
},
{
[]*ParsedFile{modC, modB, modA},
[]*ParsedFile{modA, modB, modC},
},
{
[]*ParsedFile{modA, modD, modE},
[]*ParsedFile{modA, modD, modE},
},
{
[]*ParsedFile{modD, modE, modA},
[]*ParsedFile{modA, modD, modE},
},
{
[]*ParsedFile{modA, modG, modI},
[]*ParsedFile{modA, modG, modI},
},
{
[]*ParsedFile{modG, modA, modI},
[]*ParsedFile{modA, modG, modI},
},
{
[]*ParsedFile{modI, modG, modA},
[]*ParsedFile{modA, modG, modI},
},
{
[]*ParsedFile{modI, modG, modA, modK},
[]*ParsedFile{modA, modG, modI, modK},
},
{
[]*ParsedFile{modA, modG, modI, modK},
[]*ParsedFile{modA, modG, modI, modK},
},
{
[]*ParsedFile{modA, modK, modI, modG},
[]*ParsedFile{modA, modG, modI, modK},
},
{
[]*ParsedFile{modK, modI, modG, modA},
[]*ParsedFile{modA, modG, modI, modK},
},
{
[]*ParsedFile{modKV, modAKS, modGw, modReg, modSpoke},
[]*ParsedFile{modKV, modReg, modSpoke, modGw, modAKS},
},
}
for i, test := range tests {
t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) {
visited := []*ParsedFile{}
err := test.Input.Walk(func(file *ParsedFile) error {
visited = append(visited, file)
return nil
})
assert.NoError(t, err)
assert.ElementsMatch(t, test.Expects, visited)
})
}
} | explode_data.jsonl/74662 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 894
} | [
2830,
3393,
6482,
26218,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
66588,
256,
393,
18112,
1703,
6482,
198,
197,
197,
840,
7973,
29838,
82959,
1703,
198,
197,
59403,
197,
197,
515,
298,
197,
1294,
9,
82959,
1703... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAccContainerInfraV1ClusterDataSource_basic(t *testing.T) {
resourceName := "openstack_containerinfra_cluster_v1.cluster_1"
clusterName := acctest.RandomWithPrefix("tf-acc-cluster")
imageName := acctest.RandomWithPrefix("tf-acc-image")
keypairName := acctest.RandomWithPrefix("tf-acc-keypair")
clusterTemplateName := acctest.RandomWithPrefix("tf-acc-clustertemplate")
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheckContainerInfra(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerInfraV1ClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerInfraV1ClusterBasic(imageName, keypairName, clusterTemplateName, clusterName),
},
{
Config: testAccContainerInfraV1ClusterDataSource_basic(
testAccContainerInfraV1ClusterBasic(imageName, keypairName, clusterTemplateName, clusterName),
),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerInfraV1ClusterDataSourceID(resourceName),
resource.TestCheckResourceAttr(resourceName, "name", clusterName),
resource.TestCheckResourceAttr(resourceName, "master_count", "1"),
resource.TestCheckResourceAttr(resourceName, "node_count", "1"),
),
},
},
})
} | explode_data.jsonl/32844 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 454
} | [
2830,
3393,
14603,
4502,
19433,
956,
53,
16,
28678,
17173,
34729,
1155,
353,
8840,
836,
8,
1476,
50346,
675,
1669,
330,
2508,
7693,
15847,
92317,
28441,
2273,
16,
40501,
62,
16,
698,
197,
18855,
675,
1669,
1613,
67880,
26709,
2354,
1433... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRevelLevelOutput(t *testing.T) {
l, b := newBufferedRevelLog()
l.Info("This is a test")
expectedMatch := "INFO.*This is a test\n"
actual := b.String()
if ok, _ := regexp.Match(expectedMatch, []byte(actual)); !ok {
t.Errorf("Log output mismatch %s (actual) != %s (expected)", actual, expectedMatch)
}
} | explode_data.jsonl/3451 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 116
} | [
2830,
3393,
693,
889,
4449,
5097,
1155,
353,
8840,
836,
8,
341,
8810,
11,
293,
1669,
501,
4095,
291,
693,
889,
2201,
741,
8810,
20132,
445,
1986,
374,
264,
1273,
5130,
42400,
8331,
1669,
330,
6637,
4908,
1986,
374,
264,
1273,
1699,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestMSSQLQueryGetName(t *testing.T) {
MSSQLQuery, ok := NewSqlserverSQLQuery(sMSSQLQuery)
if ok != nil {
t.Fail()
}
if MSSQLQuery.GetName() != "default" {
t.Fail()
}
} | explode_data.jsonl/22808 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 83
} | [
2830,
3393,
44,
1220,
3588,
2859,
49403,
1155,
353,
8840,
836,
8,
341,
9209,
1220,
3588,
2859,
11,
5394,
1669,
1532,
8269,
4030,
6688,
2859,
1141,
44,
1220,
3588,
2859,
340,
743,
5394,
961,
2092,
341,
197,
3244,
57243,
741,
197,
630,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 3 |
func TestDb_CreateReopenDbOnFile(t *testing.T) {
dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid()))
if err := os.RemoveAll(dbpath); err != nil {
t.Fatal("cannot remove old db: ", err)
}
defer os.RemoveAll(dbpath)
for i := 0; i < 3; i++ {
stor, err := storage.OpenFile(dbpath)
if err != nil {
t.Fatalf("(%d) cannot open storage: %s", i, err)
}
db, err := Open(stor, nil)
if err != nil {
t.Fatalf("(%d) cannot open db: %s", i, err)
}
if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil {
t.Fatalf("(%d) cannot write to db: %s", i, err)
}
if err := db.Close(); err != nil {
t.Fatalf("(%d) cannot close db: %s", i, err)
}
if err := stor.Close(); err != nil {
t.Fatalf("(%d) cannot close storage: %s", i, err)
}
}
} | explode_data.jsonl/6040 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 370
} | [
2830,
3393,
7994,
34325,
693,
2508,
7994,
1925,
1703,
1155,
353,
8840,
836,
8,
341,
20939,
2343,
1669,
26054,
22363,
9638,
65009,
6184,
1507,
8879,
17305,
445,
70,
1263,
85,
783,
65,
1944,
4021,
693,
2508,
7994,
1925,
1703,
11069,
67,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestNodeGraphWithServiceInjection(t *testing.T) {
assert := assert.New(t)
q0 := `round(sum(rate(istio_requests_total{reporter="destination",destination_service_namespace="bookinfo",request_operation="Top",destination_service_name="reviews"}[60s])) by (source_cluster,source_workload_namespace,source_workload,source_canonical_service,source_canonical_revision,destination_cluster,destination_service_namespace,destination_service,destination_service_name,destination_workload_namespace,destination_workload,destination_canonical_service,destination_canonical_revision,request_protocol,response_code,grpc_response_status,response_flags,request_operation) > 0,0.001)`
q0m0 := model.Metric{
"source_workload_namespace": "bookinfo",
"source_workload": "productpage-v1",
"source_canonical_service": "productpage",
"source_canonical_revision": "v1",
"destination_service_namespace": "bookinfo",
"destination_service": "reviews.bookinfo.svc.cluster.local",
"destination_service_name": "reviews",
"destination_workload_namespace": "bookinfo",
"destination_workload": "reviews-v1",
"destination_canonical_service": "reviews",
"destination_canonical_revision": "v1",
"response_code": "200",
"response_flags": "",
"request_protocol": "http",
"request_operation": "Top"}
v0 := model.Vector{
&model.Sample{
Metric: q0m0,
Value: 70}}
client, api, err := setupMocked()
if err != nil {
t.Error(err)
return
}
mockQuery(api, q0, &v0)
trafficMap := aggregateNodeTestTraffic(true)
ppID, _ := graph.Id(graph.Unknown, "bookinfo", "productpage", "bookinfo", "productpage-v1", "productpage", "v1", graph.GraphTypeVersionedApp)
pp, ok := trafficMap[ppID]
assert.Equal(true, ok)
assert.Equal(1, len(pp.Edges))
assert.Equal(graph.NodeTypeService, pp.Edges[0].Dest.NodeType)
duration, _ := time.ParseDuration("60s")
appender := AggregateNodeAppender{
Aggregate: "request_operation",
AggregateValue: "Top",
GraphType: graph.GraphTypeVersionedApp,
InjectServiceNodes: true,
Namespaces: map[string]graph.NamespaceInfo{
"bookinfo": {
Name: "bookinfo",
Duration: duration,
},
},
QueryTime: time.Now().Unix(),
Service: "reviews",
}
appender.appendNodeGraph(trafficMap, "bookinfo", client)
pp, ok = trafficMap[ppID]
assert.Equal(true, ok)
assert.Equal(1, len(pp.Edges))
assert.Equal(graph.NodeTypeAggregate, pp.Edges[0].Dest.NodeType)
topReviews := pp.Edges[0].Dest
assert.Equal("Top", topReviews.Metadata[graph.AggregateValue])
assert.Equal("request_operation", topReviews.Metadata[graph.Aggregate])
assert.Equal("Top", topReviews.Metadata[graph.AggregateValue])
assert.Equal("reviews", topReviews.App)
assert.Equal(1, len(topReviews.Edges))
assert.Equal(graph.NodeTypeService, topReviews.Edges[0].Dest.NodeType)
reviewsService := topReviews.Edges[0].Dest
assert.Equal(graph.NodeTypeService, reviewsService.NodeType)
assert.Equal("reviews", reviewsService.Service)
assert.Equal(1, len(reviewsService.Edges))
reviews := reviewsService.Edges[0].Dest
assert.Equal("reviews", reviews.App)
assert.Equal("v1", reviews.Version)
} | explode_data.jsonl/44686 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1302
} | [
2830,
3393,
1955,
11212,
2354,
1860,
36653,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
692,
18534,
15,
1669,
1565,
1049,
19713,
76723,
7,
380,
815,
37216,
10784,
90,
11736,
261,
428,
17997,
497,
17997,
12267,
41571,
428... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestDisplayTaskDelayedRestart(t *testing.T) {
require.NoError(t, db.ClearCollections(task.Collection, task.OldCollection, build.Collection), "error clearing collection")
assert := assert.New(t)
dt := task.Task{
Id: "dt",
DisplayOnly: true,
Status: evergreen.TaskStarted,
Activated: true,
BuildId: "b",
ExecutionTasks: []string{
"task1",
"task2",
},
}
assert.NoError(dt.Insert())
task1 := task.Task{
Id: "task1",
BuildId: "b",
Status: evergreen.TaskSucceeded,
}
assert.NoError(task1.Insert())
task2 := task.Task{
Id: "task2",
BuildId: "b",
Status: evergreen.TaskSucceeded,
}
assert.NoError(task2.Insert())
b := build.Build{
Id: "b",
Tasks: []build.TaskCache{
{Id: "dt", Status: evergreen.TaskStarted, Activated: true},
},
}
assert.NoError(b.Insert())
// request that the task restarts when it's done
assert.NoError(dt.SetResetWhenFinished())
dbTask, err := task.FindOne(task.ById(dt.Id))
assert.NoError(err)
assert.True(dbTask.ResetWhenFinished)
assert.Equal(evergreen.TaskStarted, dbTask.Status)
// end the final task so that it restarts
assert.NoError(checkResetDisplayTask(&dt))
dbTask, err = task.FindOne(task.ById(dt.Id))
assert.NoError(err)
assert.Equal(evergreen.TaskUndispatched, dbTask.Status)
dbTask2, err := task.FindOne(task.ById(task2.Id))
assert.NoError(err)
assert.Equal(evergreen.TaskUndispatched, dbTask2.Status)
oldTask, err := task.FindOneOld(task.ById("dt_0"))
assert.NoError(err)
assert.NotNil(oldTask)
} | explode_data.jsonl/60448 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 636
} | [
2830,
3393,
7020,
6262,
57361,
59354,
1155,
353,
8840,
836,
8,
341,
17957,
35699,
1155,
11,
2927,
13524,
52730,
17483,
28629,
11,
3383,
8382,
507,
6482,
11,
1936,
28629,
701,
330,
841,
32750,
4426,
1138,
6948,
1669,
2060,
7121,
1155,
34... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGlobals(t *testing.T) {
var (
a = assert.New(t)
)
gCorredor = NewService(zap.NewNop(), options.CorredorOpt{})
a.Equal(gCorredor, Service())
a.NoError(Setup(zap.NewNop(), options.CorredorOpt{}))
a.NotNil(gCorredor)
gCorredor = nil
a.NoError(Setup(zap.NewNop(), options.CorredorOpt{}))
a.Equal(gCorredor, Service())
a.NotNil(gCorredor)
} | explode_data.jsonl/81124 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 170
} | [
2830,
3393,
48592,
1155,
353,
8840,
836,
8,
341,
2405,
2399,
197,
11323,
284,
2060,
7121,
1155,
340,
197,
692,
3174,
10580,
1151,
269,
284,
1532,
1860,
13174,
391,
7121,
45,
453,
1507,
2606,
63560,
1151,
269,
21367,
37790,
11323,
12808,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.