text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func TestLinkedInProviderOverrides(t *testing.T) {
p := NewLinkedInProvider(
&ProviderData{
LoginURL: &url.URL{
Scheme: "https",
Host: "example.com",
Path: "/oauth/auth"},
RedeemURL: &url.URL{
Scheme: "https",
Host: "example.com",
Path: "/oauth/token"},
ProfileURL: &url.URL{
Scheme: "https",
Host: "example.com",
Path: "/oauth/profile"},
ValidateURL: &url.URL{
Scheme: "https",
Host: "example.com",
Path: "/oauth/tokeninfo"},
Scope: "profile"})
assert.NotEqual(t, nil, p)
assert.Equal(t, "LinkedIn", p.Data().ProviderName)
assert.Equal(t, "https://example.com/oauth/auth",
p.Data().LoginURL.String())
assert.Equal(t, "https://example.com/oauth/token",
p.Data().RedeemURL.String())
assert.Equal(t, "https://example.com/oauth/profile",
p.Data().ProfileURL.String())
assert.Equal(t, "https://example.com/oauth/tokeninfo",
p.Data().ValidateURL.String())
assert.Equal(t, "profile", p.Data().Scope)
} | explode_data.jsonl/39461 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 439
} | [
2830,
3393,
75203,
5179,
80010,
1155,
353,
8840,
836,
8,
341,
3223,
1669,
1532,
75203,
5179,
1006,
197,
197,
5,
5179,
1043,
515,
298,
197,
6231,
3144,
25,
609,
1085,
20893,
515,
571,
7568,
8058,
25,
330,
2428,
756,
571,
197,
9296,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDLL(t *testing.T) {
tests := []struct {
name string
source string
expected int
}{
{
name: "none",
expected: 0,
source: `
package main
func main() {
println("Hello!")
}
`,
},
{
name: "for",
expected: 1,
source: `
package main
func main() {
for i := 0; i < 5; i++ {
defer println("defer")
}
}
`,
},
{
name: "range",
expected: 1,
source: `
package main
func main() {
list := []int{1, 2, 3, 4, 5, 6, 7}
for _, x := range list {
defer println(x)
}
}
`,
},
{
name: "nested",
expected: 1,
source: `
package main
func main() {
list := []int{1, 2, 3, 4, 5, 6, 7}
for _, i := range list {
for j := 0; j < i; j++ {
defer println(j)
}
}
}
`,
},
{
name: "block",
expected: 1,
source: `
package main
func main() {
for i := 0; i < 5; i++ {
{
defer println("defer")
}
}
}
`,
},
{
name: "if",
expected: 1,
source: `
package main
func main() {
for i := 0; i < 5; i++ {
if true {
defer println("defer")
}
}
}
`,
},
{
name: "funclit",
expected: 0,
source: `
package main
func main() {
for i := 0; i < 5; i++ {
func() {
defer println("defer")
}()
}
}
`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
reports, err := gather(tt.source, false)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(reports) != tt.expected {
t.Fatalf("expected %d reports, got %d", tt.expected, len(reports))
}
})
}
} | explode_data.jsonl/503 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 996
} | [
2830,
3393,
64187,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
257,
914,
198,
197,
47418,
256,
914,
198,
197,
42400,
526,
198,
197,
59403,
197,
197,
515,
298,
11609,
25,
257,
330,
6697,
756,
298,
42400,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestGetConfig(t *testing.T) {
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("an error '%s' was not expected when opening a stub database connection", err)
}
defer mockDB.Close()
db := sqlx.NewDb(mockDB, "sqlmock")
defer db.Close()
config := map[string]interface{}{
"name0": "val0",
"name1": "val1",
}
rows := sqlmock.NewRows([]string{"name", "value"})
for name, val := range config {
rows = rows.AddRow(name, val)
}
mock.ExpectQuery("SELECT").WillReturnRows(rows)
sqlConfig, err := getConfig(db)
if err != nil {
t.Errorf("getProfiles expected: nil error, actual: %v", err)
}
if !reflect.DeepEqual(config, sqlConfig) {
t.Errorf("getConfig expected: %+v actual: %+v", config, sqlConfig)
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expections: %s", err)
}
} | explode_data.jsonl/31539 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 335
} | [
2830,
3393,
1949,
2648,
1155,
353,
8840,
836,
8,
341,
77333,
3506,
11,
7860,
11,
1848,
1669,
5704,
16712,
7121,
741,
743,
1848,
961,
2092,
341,
197,
3244,
30762,
445,
276,
1465,
7677,
82,
6,
572,
537,
3601,
979,
8568,
264,
13633,
46... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestUserStoreGetByEmail(t *testing.T) {
Setup()
teamid := model.NewId()
u1 := &model.User{}
u1.Email = model.NewId()
Must(store.User().Save(u1))
Must(store.Team().SaveMember(&model.TeamMember{TeamId: teamid, UserId: u1.Id}))
if err := (<-store.User().GetByEmail(u1.Email)).Err; err != nil {
t.Fatal(err)
}
if err := (<-store.User().GetByEmail("")).Err; err == nil {
t.Fatal("Should have failed because of missing email")
}
} | explode_data.jsonl/5098 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 181
} | [
2830,
3393,
1474,
6093,
1949,
87197,
1155,
353,
8840,
836,
8,
341,
197,
21821,
2822,
197,
9196,
307,
1669,
1614,
7121,
764,
2822,
10676,
16,
1669,
609,
2528,
7344,
16094,
10676,
16,
24066,
284,
1614,
7121,
764,
741,
9209,
590,
31200,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestExtractErrorCounts(t *testing.T) {
require.Equal(
t,
[]statusErrorCount{
statusErrorCount{"tank", "UNAVAIL", 0, 0, 0},
statusErrorCount{"c1t0d0", "ONLINE", 0, 0, 0},
statusErrorCount{"c1t1d0", "UNAVAIL", 4, 1, 0},
},
extractErrorCounts(sampleStatusErrorOutput),
)
} | explode_data.jsonl/18993 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 132
} | [
2830,
3393,
28959,
1454,
63731,
1155,
353,
8840,
836,
8,
341,
17957,
12808,
1006,
197,
3244,
345,
197,
197,
1294,
2829,
1454,
2507,
515,
298,
23847,
1454,
2507,
4913,
85171,
497,
330,
1861,
8093,
5965,
497,
220,
15,
11,
220,
15,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestValidateServerHooks_ValidInput(t *testing.T) {
var tests = []struct {
name string
hooksInput map[string][]models.Hook
}{
{"One hook, one script",
map[string][]models.Hook{"ApplicationStop": {
{
Location: "script-location",
Timeout: "10",
Runas: "user-name",
},
}},
},
{"One hook, multiple scripts",
map[string][]models.Hook{"ApplicationStop": {
{
Location: "script-location",
Timeout: "10",
Runas: "user-name",
},
{
Location: "script-location",
Timeout: "10",
Runas: "user-name",
},
}},
},
{"Multiple hooks, multiple scripts",
map[string][]models.Hook{"ApplicationStop": {
{
Location: "script-location",
Timeout: "10",
Runas: "user-name",
},
{
Location: "script-location",
Timeout: "10",
Runas: "user-name",
},
},
"BeforeInstall": {
{
Location: "script-location",
Timeout: "10",
Runas: "user-name",
},
}},
},
}
for _, test := range tests {
output := validateServerHooks(test.hooksInput)
if output != true {
t.Errorf("The validateServerHooks function failed for: %v", test)
}
}
} | explode_data.jsonl/71210 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 583
} | [
2830,
3393,
17926,
5475,
67769,
97279,
2505,
1155,
353,
8840,
836,
8,
341,
2405,
7032,
284,
3056,
1235,
341,
197,
11609,
981,
914,
198,
197,
9598,
14685,
2505,
2415,
14032,
45725,
6507,
3839,
1941,
198,
197,
59403,
197,
197,
4913,
3966,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestParseImageStreamImageName(t *testing.T) {
tests := map[string]struct {
input string
expectedRepo string
expectedId string
expectError bool
}{
"empty string": {
input: "",
expectError: true,
},
"one part": {
input: "a",
expectError: true,
},
"more than 2 parts": {
input: "a@b@c",
expectError: true,
},
"empty name part": {
input: "@id",
expectError: true,
},
"empty id part": {
input: "name@",
expectError: true,
},
"valid input": {
input: "repo@id",
expectedRepo: "repo",
expectedId: "id",
expectError: false,
},
}
for name, test := range tests {
repo, id, err := ParseImageStreamImageName(test.input)
didError := err != nil
if e, a := test.expectError, didError; e != a {
t.Errorf("%s: expected error=%t, got=%t: %s", name, e, a, err)
continue
}
if test.expectError {
continue
}
if e, a := test.expectedRepo, repo; e != a {
t.Errorf("%s: repo: expected %q, got %q", name, e, a)
continue
}
if e, a := test.expectedId, id; e != a {
t.Errorf("%s: id: expected %q, got %q", name, e, a)
continue
}
}
} | explode_data.jsonl/40823 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 567
} | [
2830,
3393,
14463,
1906,
3027,
1906,
675,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
2415,
14032,
60,
1235,
341,
197,
22427,
286,
914,
198,
197,
42400,
25243,
914,
198,
197,
42400,
764,
256,
914,
198,
197,
24952,
1454,
220,
1807,
198,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestDefragOutputExhaustion(t *testing.T) {
if testing.Short() || !build.VLONG {
t.SkipNow()
}
t.Parallel()
wt, err := createWalletTester(t.Name(), modules.ProdDependencies)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := wt.closeWt(); err != nil {
t.Fatal(err)
}
}()
wt.wallet.mu.Lock()
var dest types.UnlockHash
for k := range wt.wallet.keys {
dest = k
break
}
wt.wallet.mu.Unlock()
_, err = wt.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
// concurrently make a bunch of transactions with lots of outputs to keep the
// defragger running
closechan := make(chan struct{})
donechan := make(chan struct{})
go func() {
defer close(donechan)
for {
select {
case <-closechan:
return
case <-time.After(time.Millisecond * 100):
_, err := wt.miner.AddBlock()
if err != nil {
t.Error(err)
return
}
txnValue := types.SiacoinPrecision.Mul64(3000)
fee := types.SiacoinPrecision.Mul64(10)
numOutputs := defragThreshold + 1
tbuilder, err := wt.wallet.StartTransaction()
if err != nil {
t.Error(err)
return
}
tbuilder.FundSiacoins(txnValue.Mul64(uint64(numOutputs)).Add(fee))
for i := 0; i < numOutputs; i++ {
tbuilder.AddSiacoinOutput(types.SiacoinOutput{
Value: txnValue,
UnlockHash: dest,
})
}
tbuilder.AddMinerFee(fee)
txns, err := tbuilder.Sign(true)
if err != nil {
t.Error("Error signing fragmenting transaction:", err)
}
err = wt.tpool.AcceptTransactionSet(txns)
if err != nil {
t.Error("Error accepting fragmenting transaction:", err)
}
_, err = wt.miner.AddBlock()
if err != nil {
t.Error(err)
return
}
}
}
}()
time.Sleep(time.Second * 1)
// ensure we can still send transactions while receiving aggressively
// fragmented outputs
for i := 0; i < 30; i++ {
sendAmount := types.SiacoinPrecision.Mul64(2000)
_, err = wt.wallet.SendSiacoins(sendAmount, types.UnlockHash{})
if err != nil {
t.Errorf("%v: %v", i, err)
}
time.Sleep(time.Millisecond * 50)
}
close(closechan)
<-donechan
} | explode_data.jsonl/64329 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 957
} | [
2830,
3393,
2620,
4101,
5097,
840,
15074,
290,
1155,
353,
8840,
836,
8,
341,
743,
7497,
55958,
368,
1369,
753,
5834,
5058,
51306,
341,
197,
3244,
57776,
7039,
741,
197,
532,
3244,
41288,
7957,
741,
6692,
83,
11,
1848,
1669,
1855,
3825... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_connectionDetails(t *testing.T) {
type args struct {
kube client.Client
connDetails []v1beta1.ConnectionDetail
relName string
relNamespace string
}
type want struct {
out managed.ConnectionDetails
err error
}
cases := map[string]struct {
args
want
}{
"Fail_NotPartOfRelease": {
args: args{
kube: &test.MockClient{
MockGet: func(ctx context.Context, key client.ObjectKey, obj runtime.Object) error {
if o, ok := obj.(*unstructured.Unstructured); o.GetKind() == "Secret" && ok && key.Name == testSecretName && key.Namespace == testNamespace {
*obj.(*unstructured.Unstructured) = unstructured.Unstructured{
Object: map[string]interface{}{
"data": map[string]interface{}{
"db-password": "MTIzNDU=",
},
},
}
}
return nil
},
},
connDetails: []v1beta1.ConnectionDetail{
{
ObjectReference: corev1.ObjectReference{
Kind: "Secret",
Namespace: testNamespace,
Name: testSecretName,
APIVersion: "v1",
FieldPath: "data.db-password",
},
ToConnectionSecretKey: "password",
},
},
relName: testReleaseName,
relNamespace: testNamespace,
},
want: want{
out: managed.ConnectionDetails{},
err: errors.Errorf(errObjectNotPartOfRelease, corev1.ObjectReference{
Kind: "Secret",
Namespace: testNamespace,
Name: testSecretName,
APIVersion: "v1",
FieldPath: "data.db-password",
}),
},
},
"Success_PartOfRelease": {
args: args{
kube: &test.MockClient{
MockGet: func(ctx context.Context, key client.ObjectKey, obj runtime.Object) error {
if o, ok := obj.(*unstructured.Unstructured); o.GetKind() == "Secret" && ok && key.Name == testSecretName && key.Namespace == testNamespace {
*obj.(*unstructured.Unstructured) = unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
helmReleaseNameAnnotation: testReleaseName,
helmReleaseNamespaceAnnotation: testNamespace,
},
},
"data": map[string]interface{}{
"db-password": "MTIzNDU=",
},
},
}
}
return nil
},
},
connDetails: []v1beta1.ConnectionDetail{
{
ObjectReference: corev1.ObjectReference{
Kind: "Secret",
Namespace: testNamespace,
Name: testSecretName,
APIVersion: "v1",
FieldPath: "data.db-password",
},
ToConnectionSecretKey: "password",
},
},
relName: testReleaseName,
relNamespace: testNamespace,
},
want: want{
out: managed.ConnectionDetails{
"password": []byte("12345"),
},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
got, gotErr := connectionDetails(context.Background(), tc.args.kube, tc.args.connDetails, tc.args.relName, tc.args.relNamespace)
if diff := cmp.Diff(tc.want.err, gotErr, test.EquateErrors()); diff != "" {
t.Fatalf("connectionDetails(...): -want error, +got error: %s", diff)
}
if diff := cmp.Diff(tc.want.out, got); diff != "" {
t.Errorf("connectionDetails(...): -want result, +got result: %s", diff)
}
})
}
} | explode_data.jsonl/21511 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1572
} | [
2830,
3393,
15866,
7799,
1155,
353,
8840,
836,
8,
341,
13158,
2827,
2036,
341,
197,
16463,
3760,
260,
2943,
11716,
198,
197,
32917,
7799,
220,
3056,
85,
16,
19127,
16,
17463,
10649,
198,
197,
197,
3748,
675,
414,
914,
198,
197,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestGetPodDetails(t *testing.T) {
// POD_NAME & POD_NAMESPACE not exist
os.Setenv("POD_NAME", "")
os.Setenv("POD_NAMESPACE", "")
_, err1 := GetPodDetails(testclient.NewSimpleClientset())
if err1 == nil {
t.Errorf("expected an error but returned nil")
}
// POD_NAME not exist
os.Setenv("POD_NAME", "")
os.Setenv("POD_NAMESPACE", apiv1.NamespaceDefault)
_, err2 := GetPodDetails(testclient.NewSimpleClientset())
if err2 == nil {
t.Errorf("expected an error but returned nil")
}
// POD_NAMESPACE not exist
os.Setenv("POD_NAME", "testpod")
os.Setenv("POD_NAMESPACE", "")
_, err3 := GetPodDetails(testclient.NewSimpleClientset())
if err3 == nil {
t.Errorf("expected an error but returned nil")
}
// POD not exist
os.Setenv("POD_NAME", "testpod")
os.Setenv("POD_NAMESPACE", apiv1.NamespaceDefault)
_, err4 := GetPodDetails(testclient.NewSimpleClientset())
if err4 == nil {
t.Errorf("expected an error but returned nil")
}
// success to get PodInfo
fkClient := testclient.NewSimpleClientset(
&apiv1.PodList{Items: []apiv1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: apiv1.NamespaceDefault,
Labels: map[string]string{
"first": "first_label",
"second": "second_label",
},
},
}}},
&apiv1.NodeList{Items: []apiv1.Node{{
ObjectMeta: metav1.ObjectMeta{
Name: "demo",
},
Status: apiv1.NodeStatus{
Addresses: []apiv1.NodeAddress{
{
Type: apiv1.NodeInternalIP,
Address: "10.0.0.1",
},
},
},
}}})
epi, err5 := GetPodDetails(fkClient)
if err5 != nil {
t.Errorf("expected a PodInfo but returned error")
return
}
if epi == nil {
t.Errorf("expected a PodInfo but returned nil")
}
} | explode_data.jsonl/5383 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 753
} | [
2830,
3393,
1949,
23527,
7799,
1155,
353,
8840,
836,
8,
341,
197,
322,
90501,
4708,
609,
90501,
34552,
537,
3000,
198,
25078,
4202,
3160,
445,
2045,
35,
4708,
497,
14676,
25078,
4202,
3160,
445,
2045,
35,
34552,
497,
14676,
197,
6878,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestCreateTables(t *testing.T) {
err := DB.CreateTables(UserProfile{}, Post{})
assert.Nil(t, err)
assert.True(t, DB.CheckIfTableExists("user_profiles"))
assert.True(t, DB.CheckIfTableExists("renamed_posts"))
} | explode_data.jsonl/59898 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 85
} | [
2830,
3393,
4021,
21670,
1155,
353,
8840,
836,
8,
341,
9859,
1669,
5952,
7251,
21670,
13087,
8526,
22655,
3877,
37790,
6948,
59678,
1155,
11,
1848,
340,
6948,
32443,
1155,
11,
5952,
10600,
2679,
2556,
15575,
445,
872,
64021,
5455,
6948,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestSet_Join(t *testing.T) {
gtest.C(t, func(t *gtest.T) {
s1 := gset.New(true)
s1.Add("a", "a1", "b", "c")
str1 := s1.Join(",")
t.Assert(strings.Contains(str1, "a1"), true)
})
gtest.C(t, func(t *gtest.T) {
s1 := gset.New(true)
s1.Add("a", `"b"`, `\c`)
str1 := s1.Join(",")
t.Assert(strings.Contains(str1, `"b"`), true)
t.Assert(strings.Contains(str1, `\c`), true)
t.Assert(strings.Contains(str1, `a`), true)
})
} | explode_data.jsonl/34390 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 242
} | [
2830,
3393,
1649,
10598,
1961,
1155,
353,
8840,
836,
8,
341,
3174,
1944,
727,
1155,
11,
2915,
1155,
353,
82038,
836,
8,
341,
197,
1903,
16,
1669,
342,
746,
7121,
3715,
340,
197,
1903,
16,
1904,
445,
64,
497,
330,
64,
16,
497,
330,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestValidateImageGCPolicy(t *testing.T) {
testCases := []struct {
name string
imageGCPolicy ImageGCPolicy
expectErr string
}{
{
name: "Test for LowThresholdPercent < HighThresholdPercent",
imageGCPolicy: ImageGCPolicy{
HighThresholdPercent: 2,
LowThresholdPercent: 1,
},
},
{
name: "Test for HighThresholdPercent < 0,",
imageGCPolicy: ImageGCPolicy{
HighThresholdPercent: -1,
},
expectErr: "invalid HighThresholdPercent -1, must be in range [0-100]",
},
{
name: "Test for HighThresholdPercent > 100",
imageGCPolicy: ImageGCPolicy{
HighThresholdPercent: 101,
},
expectErr: "invalid HighThresholdPercent 101, must be in range [0-100]",
},
{
name: "Test for LowThresholdPercent < 0",
imageGCPolicy: ImageGCPolicy{
LowThresholdPercent: -1,
},
expectErr: "invalid LowThresholdPercent -1, must be in range [0-100]",
},
{
name: "Test for LowThresholdPercent > 100",
imageGCPolicy: ImageGCPolicy{
LowThresholdPercent: 101,
},
expectErr: "invalid LowThresholdPercent 101, must be in range [0-100]",
},
{
name: "Test for LowThresholdPercent > HighThresholdPercent",
imageGCPolicy: ImageGCPolicy{
HighThresholdPercent: 1,
LowThresholdPercent: 2,
},
expectErr: "LowThresholdPercent 2 can not be higher than HighThresholdPercent 1",
},
}
for _, tc := range testCases {
if _, err := NewImageGCManager(nil, nil, nil, nil, tc.imageGCPolicy, ""); err != nil {
if err.Error() != tc.expectErr {
t.Errorf("[%s:]Expected err:%v, but got:%v", tc.name, tc.expectErr, err.Error())
}
}
}
} | explode_data.jsonl/48117 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 692
} | [
2830,
3393,
17926,
1906,
38,
7123,
8018,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
11609,
688,
914,
198,
197,
31426,
38,
7123,
8018,
4654,
38,
7123,
8018,
198,
197,
24952,
7747,
257,
914,
198,
197,
59403,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestMissingPatternDiagnostic(t *testing.T) {
testenv.NeedsGo1Point(t, 16)
const files = `
-- go.mod --
module example.com
-- x.go --
package x
import (
_ "embed"
)
// Issue 47436
func F() {}
//go:embed NONEXISTENT
var foo string
`
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("x.go")
env.Await(env.DiagnosticAtRegexpWithMessage("x.go", `NONEXISTENT`, "no matching files found"))
env.RegexpReplace("x.go", `NONEXISTENT`, "x.go")
env.Await(EmptyDiagnostics("x.go"))
})
} | explode_data.jsonl/1719 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 216
} | [
2830,
3393,
25080,
15760,
79388,
1155,
353,
8840,
836,
8,
341,
18185,
3160,
2067,
68,
6767,
10850,
16,
2609,
1155,
11,
220,
16,
21,
340,
4777,
3542,
284,
22074,
313,
728,
10929,
39514,
4352,
3110,
905,
198,
313,
856,
18002,
39514,
172... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLinkWithCreationModeError(t *testing.T) {
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer)
fakeNodePool := instances.NewNodePool(fakeIGs, defaultNamer)
fakeGCE := gce.FakeGCECloud(gce.DefaultTestClusterValues())
linker := newTestIGLinker(fakeGCE, fakeNodePool)
sp := utils.ServicePort{NodePort: 8080, Protocol: annotations.ProtocolHTTP}
modes := []BalancingMode{Rate, Utilization}
// block the update of Backends with the given balancingMode
// and verify that a backend with the other balancingMode is
// updated properly.
for i, bm := range modes {
(fakeGCE.Compute().(*cloud.MockGCE)).MockBackendServices.UpdateHook = func(ctx context.Context, key *meta.Key, be *compute.BackendService, m *cloud.MockBackendServices) error {
for _, b := range be.Backends {
if b.BalancingMode == string(bm) {
return &googleapi.Error{Code: http.StatusBadRequest}
}
}
return mock.UpdateBackendServiceHook(ctx, key, be, m)
}
// Mimic the instance group being created
if _, err := linker.instancePool.EnsureInstanceGroupsAndPorts(defaultNamer.InstanceGroup(), []int64{sp.NodePort}); err != nil {
t.Fatalf("Did not expect error when ensuring IG for ServicePort %+v: %v", sp, err)
}
// Mimic the syncer creating the backend.
linker.backendPool.Create(sp, "fake-health-check-link")
if err := linker.Link(sp, []GroupKey{{Zone: defaultZone}}); err != nil {
t.Fatalf("%v", err)
}
be, err := fakeGCE.GetGlobalBackendService(sp.BackendName(defaultNamer))
if err != nil {
t.Fatalf("%v", err)
}
if len(be.Backends) == 0 {
t.Fatalf("Expected Backends to be created")
}
for _, b := range be.Backends {
if b.BalancingMode != string(modes[(i+1)%len(modes)]) {
t.Fatalf("Wrong balancing mode, expected %v got %v", modes[(i+1)%len(modes)], b.BalancingMode)
}
}
linker.backendPool.Delete(sp.BackendName(defaultNamer))
}
} | explode_data.jsonl/81934 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 717
} | [
2830,
3393,
3939,
2354,
32701,
3636,
1454,
1155,
353,
8840,
836,
8,
341,
1166,
726,
1914,
82,
1669,
13121,
7121,
52317,
2523,
22173,
7,
4917,
7121,
703,
1507,
1638,
45,
15232,
340,
1166,
726,
1955,
10551,
1669,
13121,
7121,
1955,
10551,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestCopyBundleWithNonCollocatedReferencedImagesToRepo(t *testing.T) {
env := BuildEnv(t)
imgpkg := Imgpkg{t, Logger{}, env.ImgpkgPath}
assetsPath := filepath.Join("assets", "simple-app")
randFile, err := addRandomFile(assetsPath)
if err != nil {
t.Fatalf("failed to create unuique file: %v", err)
}
defer os.Remove(randFile)
image := env.Image + "-image-outside-repo"
out := imgpkg.Run([]string{"push", "--tty", "-i", image, "-f", assetsPath})
imageDigest := fmt.Sprintf("@%s", extractDigest(out, t))
// image intentionally does not exist in bundle repo
imageDigestRef := image + imageDigest
imgsYml := fmt.Sprintf(`---
apiVersion: imgpkg.carvel.dev/v1alpha1
kind: ImagesLock
spec:
images:
- image: %s
`, imageDigestRef)
imgpkgDir, err := createBundleDir(assetsPath, bundleYAML, imgsYml)
if err != nil {
t.Fatalf("failed to create bundle dir: %v", err)
}
defer os.RemoveAll(imgpkgDir)
out = imgpkg.Run([]string{"push", "--tty", "-b", env.Image, "-f", assetsPath})
bundleDigest := fmt.Sprintf("@%s", extractDigest(out, t))
bundleDigestRef := env.Image + bundleDigest
imgpkg.Run([]string{"copy", "--bundle", bundleDigestRef, "--to-repo", env.RelocationRepo})
refs := []string{env.RelocationRepo + imageDigest, env.RelocationRepo + bundleDigest}
if err := validateImagePresence(refs); err != nil {
t.Fatalf("could not validate image presence: %v", err)
}
} | explode_data.jsonl/23215 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 524
} | [
2830,
3393,
12106,
8409,
2354,
8121,
6127,
39463,
47447,
5767,
14228,
1249,
25243,
1155,
353,
8840,
836,
8,
341,
57538,
1669,
7854,
14359,
1155,
340,
39162,
30069,
1669,
2362,
21888,
7351,
90,
83,
11,
9514,
22655,
6105,
13,
13033,
30069,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestCookieStore_Get_decrypt_failure(t *testing.T) {
cipher := &fakeCipher{
fakeDecrypt: func(ciphertext []byte) ([]byte, error) {
return nil, errors.New("test decrypt failure")
},
}
opt := CookieOption{}
cookieman := New(cipher, opt)
w := httptest.NewRecorder()
store := cookieman.NewCookieStore("n", nil)
if err := store.Set(w, []byte("v")); err != nil {
t.Error(err)
}
req := GetRequestWithCookie(w)
if _, err := store.Get(req); err == nil {
t.Error("got nil, but want error")
}
} | explode_data.jsonl/48010 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 206
} | [
2830,
3393,
20616,
6093,
13614,
80764,
43618,
1155,
353,
8840,
836,
8,
341,
1444,
10558,
1669,
609,
30570,
79460,
515,
197,
1166,
726,
89660,
25,
2915,
1337,
45043,
3056,
3782,
8,
34923,
3782,
11,
1465,
8,
341,
298,
853,
2092,
11,
597... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestValidateOnceAWeekWindowFormat(t *testing.T) {
cases := []struct {
Value string
ErrCount int
}{
{
// once a day window format
Value: "04:00-05:00",
ErrCount: 1,
},
{
// invalid day of week
Value: "san:04:00-san:05:00",
ErrCount: 1,
},
{
// invalid hour
Value: "sun:24:00-san:25:00",
ErrCount: 1,
},
{
// invalid min
Value: "sun:04:00-sun:04:60",
ErrCount: 1,
},
{
// valid format
Value: "sun:04:00-sun:05:00",
ErrCount: 0,
},
{
// "Sun" can also be used
Value: "Sun:04:00-Sun:05:00",
ErrCount: 0,
},
{
// valid format
Value: "",
ErrCount: 0,
},
}
for _, tc := range cases {
_, errors := validateOnceAWeekWindowFormat(tc.Value, "maintenance_window")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected %d validation errors, But got %d errors for \"%s\"", tc.ErrCount, len(errors), tc.Value)
}
}
} | explode_data.jsonl/78592 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 468
} | [
2830,
3393,
17926,
12522,
32,
17053,
4267,
4061,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
3056,
1235,
341,
197,
47399,
262,
914,
198,
197,
197,
7747,
2507,
526,
198,
197,
59403,
197,
197,
515,
298,
197,
322,
3055,
264,
1899,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestToleratingMissingFiles(t *testing.T) {
loadingRules := ClientConfigLoadingRules{
Precedence: []string{"bogus1", "bogus2", "bogus3"},
}
_, err := loadingRules.Load()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
} | explode_data.jsonl/67847 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 95
} | [
2830,
3393,
51,
22072,
1095,
25080,
10809,
1155,
353,
8840,
836,
8,
341,
197,
10628,
26008,
1669,
8423,
2648,
8578,
26008,
515,
197,
197,
4703,
1998,
763,
25,
3056,
917,
4913,
65,
538,
355,
16,
497,
330,
65,
538,
355,
17,
497,
330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGetSenderWithSameIDsReturnsSameSender(t *testing.T) {
resetAggregator()
InitAggregator(nil, "")
sender1, err := GetSender(checkID1)
assert.Nil(t, err)
assert.Len(t, aggregatorInstance.checkSamplers, 1)
assert.Len(t, senderPool.senders, 1)
sender2, err := GetSender(checkID1)
assert.Nil(t, err)
assert.Equal(t, sender1, sender2)
assert.Len(t, aggregatorInstance.checkSamplers, 1)
assert.Len(t, senderPool.senders, 1)
} | explode_data.jsonl/78295 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 179
} | [
2830,
3393,
1949,
20381,
2354,
19198,
30466,
16446,
19198,
20381,
1155,
353,
8840,
836,
8,
341,
70343,
9042,
58131,
741,
98762,
9042,
58131,
27907,
11,
85617,
1903,
1659,
16,
11,
1848,
1669,
2126,
20381,
24077,
915,
16,
340,
6948,
59678,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTaxonomyDataParse(t *testing.T) {
type testCase struct {
lang string
cnt int
}
cases := []testCase{
testCase{"cs-CZ", 5427},
testCase{"da-DK", 5427},
testCase{"de-CH", 5427},
testCase{"de-DE", 5427},
testCase{"en-US", 5427},
testCase{"es-ES", 5427},
testCase{"fr-FR", 5427},
testCase{"it-IT", 5427},
testCase{"ja-JP", 5442},
testCase{"pl-PL", 5427},
testCase{"pt-BR", 5427},
testCase{"sv-SE", 5427},
testCase{"zh-CN", 4586},
}
for _, cas := range cases {
t.Run(fmt.Sprintf("Load taxonomy data file in %s", cas.lang), func(t *testing.T) {
td := taxonomyData{
Language: cas.lang,
LoadFunc: data.Asset,
}
if td.Filename() != fmt.Sprintf("taxonomy-with-ids.%s.txt", cas.lang) {
t.Fatalf("%s is not valid taxonomy data filename", td.Filename())
}
if err := td.Parse(); err != nil {
t.Fatal(err)
}
if len(td.data) != cas.cnt {
t.Fatalf("%s contains %d records, not eq. to %d", td.Filename(), len(td.data), cas.cnt)
}
})
}
} | explode_data.jsonl/35846 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 466
} | [
2830,
3393,
31349,
16974,
1043,
14463,
1155,
353,
8840,
836,
8,
341,
13158,
54452,
2036,
341,
197,
197,
5205,
914,
198,
197,
60553,
220,
526,
198,
197,
532,
1444,
2264,
1669,
3056,
66194,
515,
197,
18185,
4207,
4913,
4837,
7658,
57,
4... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestCanSetIdempotencyToken(t *testing.T) {
cases := []struct {
CanSet bool
Case interface{}
}{
{
true,
struct {
Field *string `idempotencyToken:"true"`
}{},
},
{
true,
struct {
Field string `idempotencyToken:"true"`
}{},
},
{
false,
struct {
Field *string `idempotencyToken:"true"`
}{Field: new(string)},
},
{
false,
struct {
Field string `idempotencyToken:"true"`
}{Field: "value"},
},
{
false,
struct {
Field *int `idempotencyToken:"true"`
}{},
},
{
false,
struct {
Field *string
}{},
},
}
for i, c := range cases {
v := reflect.Indirect(reflect.ValueOf(c.Case))
ty := v.Type()
canSet := protocol.CanSetIdempotencyToken(v.Field(0), ty.Field(0))
if e, a := c.CanSet, canSet; e != a {
t.Errorf("%d, expect %v, got %v", i, e, a)
}
}
} | explode_data.jsonl/78825 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 423
} | [
2830,
3393,
6713,
1649,
764,
3262,
354,
2251,
3323,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
3056,
1235,
341,
197,
6258,
276,
1649,
1807,
198,
197,
197,
4207,
256,
3749,
16094,
197,
59403,
197,
197,
515,
298,
42808,
345,
298,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestGetLabelsForVolume(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
testCloud0 := GetTestCloud(ctrl)
diskName := "disk1"
diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
testCloud0.SubscriptionID, testCloud0.ResourceGroup, diskName)
diskSizeGB := int32(30)
fakeGetDiskFailed := "fakeGetDiskFailed"
fakeGetDiskFailedDiskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
testCloud0.SubscriptionID, testCloud0.ResourceGroup, fakeGetDiskFailed)
testCases := []struct {
desc string
diskName string
pv *v1.PersistentVolume
existedDisk compute.Disk
expected map[string]string
expectedErr bool
expectedErrMsg error
}{
{
desc: "labels and no error shall be returned if everything is good",
diskName: diskName,
pv: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
DataDiskURI: diskURI,
},
},
},
},
existedDisk: compute.Disk{Name: to.StringPtr(diskName), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}, Zones: &[]string{"1"}},
expected: map[string]string{
v1.LabelTopologyRegion: testCloud0.Location,
v1.LabelTopologyZone: testCloud0.makeZone(testCloud0.Location, 1),
},
expectedErr: false,
},
{
desc: "an error shall be returned if everything is good with invalid zone",
diskName: diskName,
pv: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
DataDiskURI: diskURI,
},
},
},
},
existedDisk: compute.Disk{Name: to.StringPtr(diskName), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}, Zones: &[]string{"invalid"}},
expectedErr: true,
expectedErrMsg: fmt.Errorf("failed to parse zone [invalid] for AzureDisk %v: %v", diskName, "strconv.Atoi: parsing \"invalid\": invalid syntax"),
},
{
desc: "nil shall be returned if everything is good with null Zones",
diskName: diskName,
pv: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
DataDiskURI: diskURI,
},
},
},
},
existedDisk: compute.Disk{Name: to.StringPtr(diskName), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}},
expected: map[string]string{
v1.LabelTopologyRegion: testCloud0.Location,
},
expectedErr: false,
expectedErrMsg: nil,
},
{
desc: "an error shall be returned if everything is good with get disk failed",
diskName: fakeGetDiskFailed,
pv: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: fakeGetDiskFailed,
DataDiskURI: fakeGetDiskFailedDiskURI,
},
},
},
},
existedDisk: compute.Disk{Name: to.StringPtr(fakeGetDiskFailed), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}, Zones: &[]string{"1"}},
expectedErr: true,
expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: %w", fmt.Errorf("Get Disk failed")),
},
{
desc: "an error shall be returned if everything is good with invalid DiskURI",
diskName: diskName,
pv: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
DataDiskURI: "invalidDiskURI",
},
},
},
},
existedDisk: compute.Disk{Name: to.StringPtr(diskName), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}, Zones: &[]string{"1"}},
expectedErr: true,
expectedErrMsg: fmt.Errorf("invalid disk URI: invalidDiskURI"),
},
{
desc: "nil shall be returned if everything is good but pv.Spec.AzureDisk.DiskName is cloudvolume.ProvisionedVolumeName",
diskName: diskName,
pv: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: cloudvolume.ProvisionedVolumeName,
DataDiskURI: diskURI,
},
},
},
},
existedDisk: compute.Disk{Name: to.StringPtr(diskName), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}},
expected: nil,
expectedErr: false,
},
{
desc: "nil shall be returned if everything is good but pv.Spec.AzureDisk is nil",
diskName: diskName,
pv: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{},
},
},
existedDisk: compute.Disk{Name: to.StringPtr(diskName), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}},
expected: nil,
expectedErr: false,
},
}
for i, test := range testCases {
testCloud := GetTestCloud(ctrl)
mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface)
if test.diskName == fakeGetDiskFailed {
mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, &retry.Error{RawError: fmt.Errorf("Get Disk failed")}).AnyTimes()
} else {
mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes()
}
mockDisksClient.EXPECT().CreateOrUpdate(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(nil).AnyTimes()
result, err := testCloud.GetLabelsForVolume(context.TODO(), test.pv)
assert.Equal(t, test.expected, result, "TestCase[%d]: %s, expected: %v, return: %v", i, test.desc, test.expected, result)
assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err)
assert.Equal(t, test.expectedErrMsg, err, "TestCase[%d]: %s, expected: %v, return: %v", i, test.desc, test.expectedErrMsg, err)
}
} | explode_data.jsonl/36149 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2593
} | [
2830,
3393,
1949,
23674,
2461,
18902,
1155,
353,
8840,
836,
8,
341,
84381,
1669,
342,
316,
1176,
7121,
2051,
1155,
340,
16867,
23743,
991,
18176,
2822,
18185,
16055,
15,
1669,
2126,
2271,
16055,
62100,
340,
2698,
3187,
675,
1669,
330,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestOuterLinkV2WithMetadataPointerEmbedderDecode(t *testing.T) {
var o outerLinkV2WithMetadataPointerEmbedder
err := MsgpackDecode(&o, []byte{0x1, 0x2})
requireErrorHasSuffix(t, errCodecDecodeSelf, err)
} | explode_data.jsonl/72243 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 86
} | [
2830,
3393,
51322,
3939,
53,
17,
2354,
14610,
9084,
25486,
1107,
32564,
1155,
353,
8840,
836,
8,
341,
2405,
297,
15955,
3939,
53,
17,
2354,
14610,
9084,
25486,
1107,
198,
9859,
1669,
24205,
4748,
32564,
2099,
78,
11,
3056,
3782,
90,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestParsersCStyleLog(t *testing.T) {
env := newInputTestingEnvironment(t)
testlogName := "test.log"
inp := env.mustCreateInput(map[string]interface{}{
"paths": []string{env.abspath(testlogName)},
"prospector.scanner.check_interval": "1ms",
"parsers": []map[string]interface{}{
map[string]interface{}{
"multiline": map[string]interface{}{
"type": "pattern",
"pattern": "\\\\$",
"negate": false,
"match": "before",
"timeout": "100ms", // set to lower value to speed up test
},
},
},
})
testlines := []byte(`The following are log messages
This is a C style log\\
file which is on multiple\\
lines
In addition it has normal lines
The total should be 4 lines covered
`)
env.mustWriteLinesToFile(testlogName, testlines)
ctx, cancelInput := context.WithCancel(context.Background())
env.startInput(ctx, inp)
env.waitUntilEventCount(4)
env.requireOffsetInRegistry(testlogName, len(testlines))
cancelInput()
env.waitUntilInputStops()
} | explode_data.jsonl/14917 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 409
} | [
2830,
3393,
47,
40488,
34,
2323,
2201,
1155,
353,
8840,
836,
8,
341,
57538,
1669,
501,
2505,
16451,
12723,
1155,
692,
18185,
839,
675,
1669,
330,
1944,
1665,
698,
17430,
79,
1669,
6105,
69419,
4021,
2505,
9147,
14032,
31344,
67066,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSetCurrentLogLevel(t *testing.T) {
var log = Init(INFO_LEVEL, false, false)
log.SetLogLevel(WARN_LEVEL)
var levelNum, _ = log.GetCurrentLogLevel()
if levelNum != WARN_LEVEL {
t.Errorf("Got: %d expected: %d", levelNum, WARN_LEVEL)
}
} | explode_data.jsonl/3657 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 99
} | [
2830,
3393,
1649,
5405,
72676,
1155,
353,
8840,
836,
8,
341,
2405,
1487,
284,
15690,
62318,
17415,
11,
895,
11,
895,
692,
6725,
4202,
72676,
14031,
9051,
17415,
340,
2405,
2188,
4651,
11,
716,
284,
1487,
44242,
72676,
741,
743,
2188,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestEmptyCallStack(t *testing.T) {
t.Parallel()
var buf bytes.Buffer
p := NewProfile("test18836")
p.Add("foo", 47674)
p.WriteTo(&buf, 1)
p.Remove("foo")
got := buf.String()
prefix := "test18836 profile: total 1\n"
if !strings.HasPrefix(got, prefix) {
t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
}
lostevent := "lostProfileEvent"
if !strings.Contains(got, lostevent) {
t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
}
} | explode_data.jsonl/13656 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 205
} | [
2830,
3393,
3522,
7220,
4336,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
2405,
6607,
5820,
22622,
198,
3223,
1669,
1532,
8526,
445,
1944,
16,
23,
23,
18,
21,
1138,
3223,
1904,
445,
7975,
497,
220,
19,
22,
21,
22,
19,
34... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestWorkspace_ReadPipelineManifest(t *testing.T) {
copilotDir := "/copilot"
testCases := map[string]struct {
fs func() afero.Fs
expectedError error
}{
"reads existing pipeline manifest": {
fs: func() afero.Fs {
fs := afero.NewMemMapFs()
fs.MkdirAll("/copilot", 0755)
manifest, _ := fs.Create("/copilot/pipeline.yml")
defer manifest.Close()
manifest.Write([]byte("hello"))
return fs
},
expectedError: nil,
},
"when no pipeline file exists": {
fs: func() afero.Fs {
fs := afero.NewMemMapFs()
fs.Mkdir(copilotDir, 0755)
return fs
},
expectedError: ErrNoPipelineInWorkspace,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
fs := tc.fs()
ws := &Workspace{
copilotDir: copilotDir,
fsUtils: &afero.Afero{Fs: fs},
}
// WHEN
_, err := ws.ReadPipelineManifest()
// THEN
if tc.expectedError != nil {
require.Equal(t, tc.expectedError.Error(), err.Error())
} else {
require.NoError(t, err)
}
})
}
} | explode_data.jsonl/30121 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 501
} | [
2830,
3393,
45981,
38381,
34656,
38495,
1155,
353,
8840,
836,
8,
341,
1444,
453,
23958,
6184,
1669,
3521,
37728,
23958,
698,
18185,
37302,
1669,
2415,
14032,
60,
1235,
341,
197,
53584,
310,
2915,
368,
264,
802,
78,
991,
82,
198,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAlignment(t *testing.T) {
type T1inner struct {
a int
}
type T1 struct {
T1inner
f int
}
type T2inner struct {
a, b int
}
type T2 struct {
T2inner
f int
}
x := T1{T1inner{2}, 17}
check2ndField(x, uintptr(unsafe.Pointer(&x.f))-uintptr(unsafe.Pointer(&x)), t)
x1 := T2{T2inner{2, 3}, 17}
check2ndField(x1, uintptr(unsafe.Pointer(&x1.f))-uintptr(unsafe.Pointer(&x1)), t)
} | explode_data.jsonl/29543 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 198
} | [
2830,
3393,
7033,
1155,
353,
8840,
836,
8,
341,
13158,
350,
16,
4382,
2036,
341,
197,
11323,
526,
198,
197,
532,
13158,
350,
16,
2036,
341,
197,
10261,
16,
4382,
198,
197,
1166,
526,
198,
197,
532,
13158,
350,
17,
4382,
2036,
341,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestVersion(t *testing.T) {
version1 := "20.02.15"
version2 := "20.03.25"
fmt.Println(CompareVersion(version1, version2))
version1 = "1.0.13"
version2 = "1.0.1a"
fmt.Println(CompareVersion(version1, version2))
version1 = "1.0.131"
version2 = "1.0.1a"
fmt.Println(CompareVersion(version1, version2))
version1 = "1.1.131"
version2 = "1.10.1a"
fmt.Println(CompareVersion(version1, version2))
version1 = "1.0.4"
version2 = "1.1.1"
fmt.Println(CompareVersion(version1, version2))
} | explode_data.jsonl/1125 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 221
} | [
2830,
3393,
5637,
1155,
353,
8840,
836,
8,
341,
74954,
16,
1669,
330,
17,
15,
13,
15,
17,
13,
16,
20,
698,
74954,
17,
1669,
330,
17,
15,
13,
15,
18,
13,
17,
20,
698,
11009,
12419,
7,
27374,
5637,
37770,
16,
11,
2319,
17,
4390,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNatSort(t *testing.T) {
// Validate that the order of SRV records returned by a DNS
// lookup for a k8s StatefulSet are ordered as expected when
// a natsort is done.
input := []string{
"memcached-10.memcached.cortex.svc.cluster.local.",
"memcached-1.memcached.cortex.svc.cluster.local.",
"memcached-6.memcached.cortex.svc.cluster.local.",
"memcached-3.memcached.cortex.svc.cluster.local.",
"memcached-25.memcached.cortex.svc.cluster.local.",
}
expected := []string{
"memcached-1.memcached.cortex.svc.cluster.local.",
"memcached-3.memcached.cortex.svc.cluster.local.",
"memcached-6.memcached.cortex.svc.cluster.local.",
"memcached-10.memcached.cortex.svc.cluster.local.",
"memcached-25.memcached.cortex.svc.cluster.local.",
}
natsort.Sort(input)
require.Equal(t, expected, input)
} | explode_data.jsonl/2661 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 350
} | [
2830,
3393,
65214,
10231,
1155,
353,
8840,
836,
8,
341,
197,
322,
23282,
429,
279,
1973,
315,
20880,
53,
7424,
5927,
553,
264,
27598,
198,
197,
322,
18615,
369,
264,
595,
23,
82,
3234,
1262,
1649,
525,
11457,
438,
3601,
979,
198,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMetricsMiddleware(t *testing.T) {
cfg := newFakeKeycloakConfig()
cfg.EnableMetrics = true
cfg.LocalhostMetrics = true
requests := []fakeRequest{
{
URI: cfg.WithOAuthURI(metricsURL),
Headers: map[string]string{
"X-Forwarded-For": "10.0.0.1",
},
ExpectedCode: http.StatusForbidden,
},
// Some request must run before this one to generate request status numbers
{
URI: cfg.WithOAuthURI(metricsURL),
ExpectedCode: http.StatusOK,
ExpectedContentContains: "proxy_request_status_total",
},
}
newFakeProxy(cfg).RunTests(t, requests)
} | explode_data.jsonl/14746 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 259
} | [
2830,
3393,
27328,
24684,
1155,
353,
8840,
836,
8,
341,
50286,
1669,
501,
52317,
1592,
88751,
2648,
741,
50286,
32287,
27328,
284,
830,
198,
50286,
20856,
3790,
27328,
284,
830,
198,
23555,
82,
1669,
3056,
30570,
1900,
515,
197,
197,
51... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestProviderRateLimiterHandler_Handle_SourceMicroService(t *testing.T) {
t.Log("testing providerratelimiter handler with source microservice and qps enabled as true")
initEnv()
c := handler.Chain{}
c.AddHandler(&handler.ProviderRateLimiterHandler{})
config.GlobalDefinition = &model.GlobalCfg{}
config.GlobalDefinition.Cse.FlowControl.Provider.QPS.Enabled = true
i := &invocation.Invocation{
SourceMicroService: "service1",
SchemaID: "schema1",
OperationID: "SayHello",
Args: &helloworld.HelloRequest{Name: "peter"},
}
c.Next(i, func(r *invocation.Response) error {
assert.NoError(t, r.Err)
log.Println(r.Result)
return r.Err
})
} | explode_data.jsonl/62821 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 267
} | [
2830,
3393,
5179,
11564,
43,
17700,
3050,
42714,
48122,
34609,
1860,
1155,
353,
8840,
836,
8,
341,
3244,
5247,
445,
8840,
2059,
59609,
301,
17700,
7013,
448,
2530,
8003,
7936,
323,
2804,
1690,
8970,
438,
830,
5130,
28248,
14359,
741,
14... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestWatchNewFile(t *testing.T) {
t.Parallel()
dir, ks := tmpKeyStore(t, false)
defer os.RemoveAll(dir)
// Ensure the watcher is started before adding any files.
ks.Accounts()
time.Sleep(1000 * time.Millisecond)
// Move in the files.
wantAccounts := make([]accounts.Account, len(cachetestAccounts))
for i := range cachetestAccounts {
wantAccounts[i] = accounts.Account{
Address: cachetestAccounts[i].Address,
URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, filepath.Base(cachetestAccounts[i].URL.Path))},
}
if err := cp.CopyFile(wantAccounts[i].URL.Path, cachetestAccounts[i].URL.Path); err != nil {
t.Fatal(err)
}
}
// ks should see the accounts.
var list []accounts.Account
for d := 200 * time.Millisecond; d < 5*time.Second; d *= 2 {
list = ks.Accounts()
if reflect.DeepEqual(list, wantAccounts) {
// ks should have also received change notifications
select {
case <-ks.changes:
default:
t.Fatalf("wasn't notified of new accounts")
}
return
}
time.Sleep(d)
}
t.Errorf("got %s, want %s", spew.Sdump(list), spew.Sdump(wantAccounts))
} | explode_data.jsonl/36027 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 448
} | [
2830,
3393,
14247,
3564,
1703,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
48532,
11,
41282,
1669,
4174,
1592,
6093,
1155,
11,
895,
340,
16867,
2643,
84427,
14161,
692,
197,
322,
29279,
279,
55727,
374,
3855,
1573,
7842,
894,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestNewPairs(t *testing.T) {
for _, tc := range []struct {
Name string
Local Candidates
Remote Candidates
Result Pairs
}{
{
Name: "Blank",
},
{
Name: "No pairs",
Local: Candidates{
{
Addr: Addr{
IP: net.ParseIP("1.1.1.1"),
},
},
},
Remote: Candidates{
{
Addr: Addr{
IP: net.ParseIP("2001:11:12:13:14:15:16:17"),
},
},
},
},
{
Name: "Simple",
Local: Candidates{
{
Addr: Addr{
IP: net.ParseIP("1.1.1.1"),
},
},
},
Remote: Candidates{
{
Addr: Addr{
IP: net.ParseIP("1.1.1.2"),
},
},
},
Result: Pairs{
{
Local: Candidate{
Addr: Addr{
IP: net.ParseIP("1.1.1.1"),
},
},
Remote: Candidate{
Addr: Addr{
IP: net.ParseIP("1.1.1.2"),
},
},
},
},
},
} {
t.Run(tc.Name, func(t *testing.T) {
got := NewPairs(tc.Local, tc.Remote)
if len(got) != len(tc.Result) {
t.Fatalf("bad length: %d (got) != %d (expected)", len(got), len(tc.Result))
}
for i := range tc.Result {
expectedAddr := tc.Result[i].Remote.Addr
gotAddr := got[i].Remote.Addr
if !gotAddr.Equal(expectedAddr) {
t.Errorf("[%d]: remote addr mismatch: %s (got) != %s (expected)", i, gotAddr, expectedAddr)
}
expectedAddr = tc.Result[i].Local.Addr
gotAddr = got[i].Local.Addr
if !gotAddr.Equal(expectedAddr) {
t.Errorf("[%d]: local addr mismatch: %s (got) != %s (expected)", i, gotAddr, expectedAddr)
}
}
})
}
} | explode_data.jsonl/9182 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 862
} | [
2830,
3393,
3564,
54228,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
17130,
1669,
2088,
3056,
1235,
341,
197,
21297,
256,
914,
198,
197,
82404,
220,
64438,
198,
197,
197,
24703,
64438,
198,
197,
56503,
393,
4720,
198,
197,
59403,
197,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestReconcilePrivateDNS(t *testing.T) {
testcases := []struct {
name string
expectedError string
expect func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder)
}{
{
name: "no private dns",
expectedError: "",
expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) {
s.PrivateDNSSpec().Return(nil)
},
},
{
name: "create ipv4 private dns successfully",
expectedError: "",
expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) {
s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New())
s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{
ZoneName: "my-dns-zone",
VNetName: "my-vnet",
VNetResourceGroup: "vnet-rg",
LinkName: "my-link",
Records: []infrav1.AddressRecord{
{
Hostname: "hostname-1",
IP: "10.0.0.8",
},
},
})
s.ResourceGroup().AnyTimes().Return("my-rg")
s.SubscriptionID().Return("123")
m.CreateOrUpdateZone(gomockinternal.AContext(), "my-rg", "my-dns-zone", privatedns.PrivateZone{Location: to.StringPtr(azure.Global)})
m.CreateOrUpdateLink(gomockinternal.AContext(), "my-rg", "my-dns-zone", "my-link", privatedns.VirtualNetworkLink{
VirtualNetworkLinkProperties: &privatedns.VirtualNetworkLinkProperties{
VirtualNetwork: &privatedns.SubResource{
ID: to.StringPtr("/subscriptions/123/resourceGroups/vnet-rg/providers/Microsoft.Network/virtualNetworks/my-vnet"),
},
RegistrationEnabled: to.BoolPtr(false),
},
Location: to.StringPtr(azure.Global),
})
m.CreateOrUpdateRecordSet(gomockinternal.AContext(), "my-rg", "my-dns-zone", privatedns.A, "hostname-1", privatedns.RecordSet{
RecordSetProperties: &privatedns.RecordSetProperties{
TTL: to.Int64Ptr(300),
ARecords: &[]privatedns.ARecord{
{
Ipv4Address: to.StringPtr("10.0.0.8"),
},
},
},
})
},
},
{
name: "create ipv6 private dns successfully",
expectedError: "",
expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) {
s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New())
s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{
ZoneName: "my-dns-zone",
VNetName: "my-vnet",
VNetResourceGroup: "vnet-rg",
LinkName: "my-link",
Records: []infrav1.AddressRecord{
{
Hostname: "hostname-2",
IP: "2603:1030:805:2::b",
},
},
})
s.ResourceGroup().AnyTimes().Return("my-rg")
s.SubscriptionID().Return("123")
m.CreateOrUpdateZone(gomockinternal.AContext(), "my-rg", "my-dns-zone", privatedns.PrivateZone{Location: to.StringPtr(azure.Global)})
m.CreateOrUpdateLink(gomockinternal.AContext(), "my-rg", "my-dns-zone", "my-link", privatedns.VirtualNetworkLink{
VirtualNetworkLinkProperties: &privatedns.VirtualNetworkLinkProperties{
VirtualNetwork: &privatedns.SubResource{
ID: to.StringPtr("/subscriptions/123/resourceGroups/vnet-rg/providers/Microsoft.Network/virtualNetworks/my-vnet"),
},
RegistrationEnabled: to.BoolPtr(false),
},
Location: to.StringPtr(azure.Global),
})
m.CreateOrUpdateRecordSet(gomockinternal.AContext(), "my-rg", "my-dns-zone", privatedns.AAAA, "hostname-2", privatedns.RecordSet{
RecordSetProperties: &privatedns.RecordSetProperties{
TTL: to.Int64Ptr(300),
AaaaRecords: &[]privatedns.AaaaRecord{
{
Ipv6Address: to.StringPtr("2603:1030:805:2::b"),
},
},
},
})
},
},
{
name: "link creation fails",
expectedError: "failed to create virtual network link my-link: #: Internal Server Error: StatusCode=500",
expect: func(s *mock_privatedns.MockScopeMockRecorder, m *mock_privatedns.MockclientMockRecorder) {
s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New())
s.PrivateDNSSpec().Return(&azure.PrivateDNSSpec{
ZoneName: "my-dns-zone",
VNetName: "my-vnet",
VNetResourceGroup: "vnet-rg",
LinkName: "my-link",
Records: []infrav1.AddressRecord{
{
Hostname: "hostname-1",
IP: "10.0.0.8",
},
},
})
s.ResourceGroup().AnyTimes().Return("my-rg")
s.SubscriptionID().Return("123")
m.CreateOrUpdateZone(gomockinternal.AContext(), "my-rg", "my-dns-zone", privatedns.PrivateZone{Location: to.StringPtr(azure.Global)})
m.CreateOrUpdateLink(gomockinternal.AContext(), "my-rg", "my-dns-zone", "my-link", privatedns.VirtualNetworkLink{
VirtualNetworkLinkProperties: &privatedns.VirtualNetworkLinkProperties{
VirtualNetwork: &privatedns.SubResource{
ID: to.StringPtr("/subscriptions/123/resourceGroups/vnet-rg/providers/Microsoft.Network/virtualNetworks/my-vnet"),
},
RegistrationEnabled: to.BoolPtr(false),
},
Location: to.StringPtr(azure.Global),
}).Return(autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal Server Error"))
},
},
}
for _, tc := range testcases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
t.Parallel()
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scopeMock := mock_privatedns.NewMockScope(mockCtrl)
clientMock := mock_privatedns.NewMockclient(mockCtrl)
tc.expect(scopeMock.EXPECT(), clientMock.EXPECT())
s := &Service{
Scope: scopeMock,
client: clientMock,
}
err := s.Reconcile(context.TODO())
if tc.expectedError != "" {
g.Expect(err).To(HaveOccurred())
g.Expect(err).To(MatchError(tc.expectedError))
} else {
g.Expect(err).NotTo(HaveOccurred())
}
})
}
} | explode_data.jsonl/70329 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2724
} | [
2830,
3393,
693,
40446,
457,
16787,
61088,
1155,
353,
8840,
836,
8,
341,
18185,
23910,
1669,
3056,
1235,
341,
197,
11609,
688,
914,
198,
197,
42400,
1454,
914,
198,
197,
24952,
286,
2915,
1141,
353,
16712,
24726,
657,
4412,
24664,
10803... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAvailableDevices(t *testing.T) {
executor := &exectest.MockExecutor{}
// set up a mock function to return "rook owned" partitions on the device and it does not have a filesystem
executor.MockExecuteCommandWithOutput = func(debug bool, name string, command string, args ...string) (string, error) {
logger.Infof("OUTPUT for %s. %s %+v", name, command, args)
if command == "lsblk" {
if strings.Index(name, "sdb") != -1 {
// /dev/sdb has a partition
return `NAME="sdb" SIZE="65" TYPE="disk" PKNAME=""
NAME="sdb1" SIZE="30" TYPE="part" PKNAME="sdb"`, nil
}
return "", nil
} else if command == "blkid" {
if strings.Index(name, "sdb1") != -1 {
// partition sdb1 has a label MY-PART
return "MY-PART", nil
}
} else if command == "df" {
if strings.Index(name, "sdc") != -1 {
// /dev/sdc has a file system
return "/dev/sdc ext4", nil
}
return "", nil
}
return "", fmt.Errorf("unknown command %s %+v", command, args)
}
context := &clusterd.Context{Executor: executor}
context.Devices = []*clusterd.LocalDisk{
{Name: "sda"},
{Name: "sdb"},
{Name: "sdc"},
{Name: "sdd"},
{Name: "nvme01"},
{Name: "rda"},
{Name: "rdb"},
}
// select all devices, including nvme01 for metadata
mapping, err := getAvailableDevices(context, "all", "nvme01", true)
assert.Nil(t, err)
assert.Equal(t, 5, len(mapping.Entries))
assert.Equal(t, -1, mapping.Entries["sda"].Data)
assert.Equal(t, -1, mapping.Entries["sdd"].Data)
assert.Equal(t, -1, mapping.Entries["rda"].Data)
assert.Equal(t, -1, mapping.Entries["rdb"].Data)
assert.Equal(t, -1, mapping.Entries["nvme01"].Data)
assert.NotNil(t, mapping.Entries["nvme01"].Metadata)
assert.Equal(t, 0, len(mapping.Entries["nvme01"].Metadata))
// select no devices both using and not using a filter
mapping, err = getAvailableDevices(context, "", "", false)
assert.Nil(t, err)
assert.Equal(t, 0, len(mapping.Entries))
mapping, err = getAvailableDevices(context, "", "", true)
assert.Nil(t, err)
assert.Equal(t, 0, len(mapping.Entries))
// select the sd* devices
mapping, err = getAvailableDevices(context, "^sd.$", "", true)
assert.Nil(t, err)
assert.Equal(t, 2, len(mapping.Entries))
assert.Equal(t, -1, mapping.Entries["sda"].Data)
assert.Equal(t, -1, mapping.Entries["sdd"].Data)
// select an exact device
mapping, err = getAvailableDevices(context, "sdd", "", false)
assert.Nil(t, err)
assert.Equal(t, 1, len(mapping.Entries))
assert.Equal(t, -1, mapping.Entries["sdd"].Data)
// select all devices except those that have a prefix of "s"
mapping, err = getAvailableDevices(context, "^[^s]", "", true)
assert.Nil(t, err)
assert.Equal(t, 3, len(mapping.Entries))
assert.Equal(t, -1, mapping.Entries["rda"].Data)
assert.Equal(t, -1, mapping.Entries["rdb"].Data)
assert.Equal(t, -1, mapping.Entries["nvme01"].Data)
} | explode_data.jsonl/53727 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1179
} | [
2830,
3393,
16485,
40835,
1155,
353,
8840,
836,
8,
341,
67328,
4831,
1669,
609,
327,
439,
477,
24664,
25255,
16094,
197,
322,
738,
705,
264,
7860,
729,
311,
470,
330,
299,
562,
12938,
1,
46688,
389,
279,
3671,
323,
432,
1558,
537,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestDnsStatisticsRecord(t *testing.T) {
config := dnsutils.GetFakeConfig()
stats := NewStatsPerStream(config, "test")
dm := dnsutils.DnsMessage{}
dm.Init()
dm.DNS.Type = dnsutils.DnsQuery
dm.NetworkInfo.Family = "INET"
dm.NetworkInfo.Protocol = "UDP"
dm.DNS.Qname = "dnscollector.test."
stats.Record(dm)
nb := stats.GetTotalDomains()
if nb != 1 {
t.Errorf("invalid number of domains, expected 1, got %d", nb)
}
} | explode_data.jsonl/75653 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 177
} | [
2830,
3393,
35,
4412,
38599,
6471,
1155,
353,
8840,
836,
8,
341,
25873,
1669,
44077,
6031,
2234,
52317,
2648,
741,
79659,
1669,
1532,
16635,
3889,
3027,
8754,
11,
330,
1944,
5130,
2698,
76,
1669,
44077,
6031,
909,
4412,
2052,
16094,
269... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestRuleError(t *testing.T) {
tests := []struct {
in ticketdb.DBError
want string
}{
{ticketdb.DBError{Description: "duplicate block"},
"duplicate block",
},
{ticketdb.DBError{Description: "human-readable error"},
"human-readable error",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.Error()
if result != test.want {
t.Errorf("Error #%d\n got: %s want: %s", i, result,
test.want)
continue
}
}
} | explode_data.jsonl/80670 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 203
} | [
2830,
3393,
11337,
1454,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
17430,
256,
11727,
1999,
22537,
1454,
198,
197,
50780,
914,
198,
197,
59403,
197,
197,
90,
26534,
1999,
22537,
1454,
90,
5009,
25,
330,
63826,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestReconcileWorkspaceMissing(t *testing.T) {
taskWithWorkspace := tb.Task("test-task-with-workspace",
tb.TaskSpec(
tb.TaskWorkspace("ws1", "a test task workspace", "", true),
), tb.TaskNamespace("foo"))
taskRun := tb.TaskRun("test-taskrun-missing-workspace", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef(taskWithWorkspace.Name, tb.TaskRefAPIVersion("a1")),
))
d := test.Data{
Tasks: []*v1alpha1.Task{taskWithWorkspace},
TaskRuns: []*v1alpha1.TaskRun{taskRun},
ClusterTasks: nil,
PipelineResources: nil,
}
names.TestingSeed()
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
clients := testAssets.Clients
if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil {
t.Errorf("expected no error reconciling valid TaskRun but got %v", err)
}
tr, err := clients.Pipeline.TektonV1alpha1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err)
}
failedCorrectly := false
for _, c := range tr.Status.Conditions {
if c.Type == apis.ConditionSucceeded && c.Status == corev1.ConditionFalse && c.Reason == podconvert.ReasonFailedValidation {
failedCorrectly = true
}
}
if !failedCorrectly {
t.Errorf("Expected TaskRun to fail validation but it did not. Final conditions were:\n%#v", tr.Status.Conditions)
}
} | explode_data.jsonl/895 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 575
} | [
2830,
3393,
693,
40446,
457,
45981,
25080,
1155,
353,
8840,
836,
8,
341,
49115,
2354,
45981,
1669,
16363,
28258,
445,
1944,
52579,
26189,
28621,
8746,
756,
197,
62842,
28258,
8327,
1006,
298,
62842,
28258,
45981,
445,
8915,
16,
497,
330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestEncoderWriterStruct1(t *testing.T) {
var buf bytes.Buffer
e := velocypack.NewEncoder(&buf)
for i := 0; i < 1000; i++ {
input := Struct1{
Field1: i,
}
must(e.Encode(input))
}
r := bytes.NewReader(buf.Bytes())
d := velocypack.NewDecoder(r)
for i := 0; i < 1000; i++ {
var v Struct1
must(d.Decode(&v))
expected := Struct1{
Field1: i,
}
ASSERT_EQ(v, expected, t)
}
} | explode_data.jsonl/52698 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 189
} | [
2830,
3393,
19921,
6492,
9422,
16,
1155,
353,
8840,
836,
8,
341,
2405,
6607,
5820,
22622,
198,
7727,
1669,
40509,
1082,
473,
7121,
19921,
2099,
5909,
340,
2023,
600,
1669,
220,
15,
26,
600,
366,
220,
16,
15,
15,
15,
26,
600,
1027,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestPythonInterop(t *testing.T) {
var r Record
if err := rlp.DecodeBytes(pyRecord, &r); err != nil {
t.Fatalf("can't decode: %v", err)
}
var (
wantAddr, _ = hex.DecodeString("caaa1485d83b18b32ed9ad666026151bf0cae8a0a88c857ae2d4c5be2daa6726")
wantSeq = uint64(1)
wantIP = IP4{127, 0, 0, 1}
wantDiscport = DiscPort(60606)
)
if r.Seq() != wantSeq {
t.Errorf("wrong seq: got %d, want %d", r.Seq(), wantSeq)
}
if addr := r.NodeAddr(); !bytes.Equal(addr, wantAddr) {
t.Errorf("wrong addr: got %x, want %x", addr, wantAddr)
}
want := map[Entry]interface{}{new(IP4): &wantIP, new(DiscPort): &wantDiscport}
for k, v := range want {
desc := fmt.Sprintf("loading key %q", k.ENRKey())
if assert.NoError(t, r.Load(k), desc) {
assert.Equal(t, k, v, desc)
}
}
} | explode_data.jsonl/39495 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 381
} | [
2830,
3393,
30280,
94000,
1155,
353,
8840,
836,
8,
341,
2405,
435,
13583,
198,
743,
1848,
1669,
435,
13545,
56372,
7078,
46827,
6471,
11,
609,
81,
1215,
1848,
961,
2092,
341,
197,
3244,
30762,
445,
4814,
944,
16895,
25,
1018,
85,
497,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestGreaterThanOrEqualToMatcherDatetime(t *testing.T) {
logger := logging.NewLogger(&logging.LoggerOptions{})
attrName := "value"
dto := &dtos.MatcherDTO{
MatcherType: "GREATER_THAN_OR_EQUAL_TO",
UnaryNumeric: &dtos.UnaryNumericMatcherDataDTO{
DataType: "DATETIME",
Value: int64(960293532000), // 06/06/2000
},
KeySelector: &dtos.KeySelectorDTO{
Attribute: &attrName,
},
}
matcher, err := BuildMatcher(dto, nil, logger)
if err != nil {
t.Error("There should be no errors when building the matcher")
t.Error(err)
}
matcherType := reflect.TypeOf(matcher).String()
if matcherType != "*matchers.GreaterThanOrEqualToMatcher" {
t.Errorf("Incorrect matcher constructed. Should be *matchers.GreaterThanOrEqualToMatcher and was %s", matcherType)
}
attributes := make(map[string]interface{})
attributes["value"] = int64(960293532)
if !matcher.Match("asd", attributes, nil) {
t.Error("Equal should match")
}
attributes["value"] = int64(1275782400)
if !matcher.Match("asd", attributes, nil) {
t.Error("Greater should match")
}
attributes["value"] = int64(293532000)
if matcher.Match("asd", attributes, nil) {
t.Error("Lower should NOT match")
}
} | explode_data.jsonl/60371 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 479
} | [
2830,
3393,
41366,
89387,
37554,
94191,
1155,
353,
8840,
836,
8,
341,
17060,
1669,
8392,
7121,
7395,
2099,
25263,
12750,
3798,
37790,
60943,
675,
1669,
330,
957,
698,
98864,
1669,
609,
8047,
436,
76452,
14923,
515,
197,
197,
37554,
929,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestNilServiceResolvers(t *testing.T) {
ctx := logger.WithLogger(broker.NewBackground())
resolvers := ServiceResolvers{nil}
_, err := resolvers.Resolve(ctx)
if err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/2209 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 82
} | [
2830,
3393,
19064,
1860,
1061,
39435,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
5925,
26124,
7395,
1883,
45985,
7121,
8706,
2398,
10202,
39435,
1669,
5362,
1061,
39435,
90,
8385,
532,
197,
6878,
1848,
1669,
592,
39435,
57875,
7502,
340,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestIgnorePackages_whenTypical(t *testing.T) {
assert := testifyassert.New(t)
arbitraryPackages := []string{"abc", "xyz/abc"}
actual := IgnorePackages(arbitraryPackages)
assert.Equal(arbitraryPackages, actual)
} | explode_data.jsonl/73324 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 79
} | [
2830,
3393,
12497,
69513,
47636,
12834,
938,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
48162,
2207,
7121,
1155,
692,
69340,
87851,
69513,
1669,
3056,
917,
4913,
13683,
497,
330,
28854,
14,
13683,
63159,
88814,
1669,
38971,
69513,
37544,
8... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestFeeStats(t *testing.T) {
hmock := httptest.NewClient()
client := &Client{
HorizonURL: "https://localhost/",
HTTP: hmock,
}
// happy path
hmock.On(
"GET",
"https://localhost/fee_stats",
).ReturnString(200, feesResponse)
fees, err := client.FeeStats()
if assert.NoError(t, err) {
assert.Equal(t, fees.LastLedger, 22606298)
assert.Equal(t, fees.LastLedgerBaseFee, 100)
assert.Equal(t, fees.LedgerCapacityUsage, 0.97)
assert.Equal(t, fees.MinAcceptedFee, 130)
assert.Equal(t, fees.ModeAcceptedFee, 250)
assert.Equal(t, fees.P10AcceptedFee, 150)
assert.Equal(t, fees.P20AcceptedFee, 200)
assert.Equal(t, fees.P30AcceptedFee, 300)
assert.Equal(t, fees.P40AcceptedFee, 400)
assert.Equal(t, fees.P50AcceptedFee, 500)
assert.Equal(t, fees.P60AcceptedFee, 1000)
assert.Equal(t, fees.P70AcceptedFee, 2000)
assert.Equal(t, fees.P80AcceptedFee, 3000)
assert.Equal(t, fees.P90AcceptedFee, 4000)
assert.Equal(t, fees.P95AcceptedFee, 5000)
assert.Equal(t, fees.P99AcceptedFee, 8000)
}
// connection error
hmock.On(
"GET",
"https://localhost/metrics",
).ReturnError("http.Client error")
_, err = client.Metrics()
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "http.Client error")
_, ok := err.(*Error)
assert.Equal(t, ok, false)
}
} | explode_data.jsonl/34857 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 581
} | [
2830,
3393,
41941,
16635,
1155,
353,
8840,
836,
8,
341,
9598,
16712,
1669,
54320,
70334,
7121,
2959,
741,
25291,
1669,
609,
2959,
515,
197,
13292,
269,
16973,
3144,
25,
330,
2428,
1110,
8301,
35075,
197,
197,
9230,
25,
981,
305,
16712,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func Test_Mock_AssertCalled_WithArguments(t *testing.T) {
var mockedService = new(TestExampleImplementation)
mockedService.On("Test_Mock_AssertCalled_WithArguments", 1, 2, 3).Return(5, 6, 7)
mockedService.Called(1, 2, 3)
tt := new(testing.T)
assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 1, 2, 3))
assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 2, 3, 4))
} | explode_data.jsonl/8609 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 193
} | [
2830,
3393,
1245,
1176,
62222,
529,
20960,
62,
2354,
19139,
1155,
353,
8840,
836,
8,
8022,
2405,
46149,
1860,
284,
501,
31159,
13314,
36850,
7229,
2109,
67385,
1860,
8071,
445,
2271,
1245,
1176,
62222,
529,
20960,
62,
2354,
19139,
497,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetRelation(t *testing.T) {
t.Parallel()
resolver := newMockResolver()
expectedParams := &LocalGetClassParams{
Kind: kind.ACTION_KIND,
ClassName: "SomeAction",
Properties: []SelectProperty{
{
Name: "HasAction",
IsPrimitive: false,
Refs: []SelectClass{
{
ClassName: "SomeAction",
RefProperties: []SelectProperty{
{
Name: "intField",
IsPrimitive: true,
},
{
Name: "HasAction",
IsPrimitive: false,
Refs: []SelectClass{
{
ClassName: "SomeAction",
RefProperties: []SelectProperty{
{
Name: "intField",
IsPrimitive: true,
},
},
},
},
},
},
},
},
},
},
}
resolver.On("LocalGetClass", expectedParams).
Return(test_helper.EmptyListThunk(), nil).Once()
query := "{ Get { Actions { SomeAction { HasAction { ... on SomeAction { intField, HasAction { ... on SomeAction { intField } } } } } } } }"
resolver.AssertResolve(t, query)
} | explode_data.jsonl/10590 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 566
} | [
2830,
3393,
1949,
33790,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
10202,
7921,
1669,
501,
11571,
18190,
2822,
42400,
4870,
1669,
609,
7319,
1949,
1957,
4870,
515,
197,
197,
10629,
25,
414,
3093,
28934,
72959,
345,
197,
197... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestFindIndexOfEmail_Successful(t *testing.T) {
x := []string{"testingATtest.com",
"testing@Test.com",
"testingATtest.com",
"testingATtest.com"}
got := FindIndexOfEmail(x)
want := 1
if got != want {
t.Errorf("Got: %d, Wanted: %d", got, want)
}
} | explode_data.jsonl/45939 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 110
} | [
2830,
3393,
9885,
27376,
4781,
87161,
1262,
1155,
353,
8840,
836,
8,
341,
10225,
1669,
3056,
917,
4913,
8840,
828,
1944,
905,
756,
197,
197,
42097,
18229,
905,
756,
197,
197,
42097,
828,
1944,
905,
756,
197,
197,
42097,
828,
1944,
905... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestReturns200IfThereAreNoChecks(t *testing.T) {
recorder := httptest.NewRecorder()
req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil)
if err != nil {
t.Errorf("Failed to create request.")
}
StatusHandler(recorder, req)
if recorder.Code != 200 {
t.Errorf("Did not get a 200.")
}
} | explode_data.jsonl/56023 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 120
} | [
2830,
3393,
16446,
17,
15,
15,
2679,
3862,
11526,
2753,
49820,
1155,
353,
8840,
836,
8,
341,
67904,
1358,
1669,
54320,
70334,
7121,
47023,
2822,
24395,
11,
1848,
1669,
1758,
75274,
445,
3806,
497,
330,
2428,
1110,
30570,
1085,
905,
6747... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestJvInvalidWithMessage_string(t *testing.T) {
errMsg := "Error message 1"
jv := jq.JvInvalidWithMessage(jq.JvFromString(errMsg))
if jv.IsValid() == true {
t.Errorf("IsValid() returned true for JvInvalidWithMessage()")
}
msg := jv.Copy().GetInvalidMessage()
if msg.Kind() != jq.JvKindString {
t.Errorf("JvInvalidWithMessage().GetInvalidMessage().Kind() returned a kind other than JvKindString")
}
msg.Free()
str, ok := jv.GetInvalidMessageAsString()
if !ok {
t.Errorf("JvInvalidWithMessage().JvGetInvalidMessageAsString() is not ok")
}
if str != errMsg {
t.Errorf("JvInvalidWithMessage().JvGetInvalidMessageAsString() did not return original error message")
}
} | explode_data.jsonl/38761 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 248
} | [
2830,
3393,
41,
85,
7928,
2354,
2052,
3904,
1155,
353,
8840,
836,
8,
341,
9859,
6611,
1669,
330,
1454,
1943,
220,
16,
698,
12428,
85,
1669,
44648,
3503,
85,
7928,
2354,
2052,
96887,
3503,
85,
44491,
3964,
6611,
1171,
743,
502,
85,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestDeleteByQueryDocument(t *testing.T) {
c := &internal.MockedConnection{
MockSend: func(query []byte, options types.QueryOptions) *types.KuzzleResponse {
parsedQuery := &types.KuzzleRequest{}
json.Unmarshal(query, parsedQuery)
assert.Equal(t, "document", parsedQuery.Controller)
assert.Equal(t, "deleteByQuery", parsedQuery.Action)
assert.Equal(t, "index", parsedQuery.Index)
assert.Equal(t, "collection", parsedQuery.Collection)
return &types.KuzzleResponse{Result: []byte(`
{
"hits": ["id1", "id2"]
}`),
}
},
}
k, _ := kuzzle.NewKuzzle(c, nil)
d := document.NewDocument(k)
_, err := d.DeleteByQuery("index", "collection", json.RawMessage(`{"foo": "bar"}`), nil)
assert.Nil(t, err)
} | explode_data.jsonl/75168 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 295
} | [
2830,
3393,
6435,
1359,
2859,
7524,
1155,
353,
8840,
836,
8,
1476,
1444,
1669,
609,
10481,
24664,
291,
4526,
515,
197,
9209,
1176,
11505,
25,
2915,
10741,
3056,
3782,
11,
2606,
4494,
15685,
3798,
8,
353,
9242,
11352,
14945,
2582,
341,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCancelOrder(t *testing.T) {
_, err := b.CancelExistingOrder([]int64{1337})
if err == nil {
t.Error("Test failed - CancelgOrder() error", err)
}
} | explode_data.jsonl/48999 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 64
} | [
2830,
3393,
9269,
4431,
1155,
353,
8840,
836,
8,
341,
197,
6878,
1848,
1669,
293,
36491,
53067,
4431,
10556,
396,
21,
19,
90,
16,
18,
18,
22,
8824,
743,
1848,
621,
2092,
341,
197,
3244,
6141,
445,
2271,
4641,
481,
23542,
70,
4431,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestShimDefaults(t *testing.T) {
assert := assert.New(t)
tmpdir, err := ioutil.TempDir(testDir, "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
testShimPath := filepath.Join(tmpdir, "shim")
testShimLinkPath := filepath.Join(tmpdir, "shim-link")
err = createEmptyFile(testShimPath)
assert.NoError(err)
err = syscall.Symlink(testShimPath, testShimLinkPath)
assert.NoError(err)
savedShimPath := defaultShimPath
defer func() {
defaultShimPath = savedShimPath
}()
defaultShimPath = testShimPath
s := shim{}
p, err := s.path()
assert.NoError(err)
assert.Equal(p, defaultShimPath, "default shim path wrong")
// test path resolution
defaultShimPath = testShimLinkPath
s = shim{}
p, err = s.path()
assert.NoError(err)
assert.Equal(p, testShimPath)
assert.False(s.debug())
s.Debug = true
assert.True(s.debug())
} | explode_data.jsonl/5131 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 341
} | [
2830,
3393,
2016,
318,
16273,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
692,
20082,
3741,
11,
1848,
1669,
43144,
65009,
6184,
8623,
6184,
11,
14676,
6948,
35699,
3964,
340,
16867,
2643,
84427,
10368,
3741,
692,
18185,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTypeMatchesReflectType(t *testing.T) {
t.Run("Slice", func(t *testing.T) {
testHeaderMatchesReflect(t, unsafeheader.Slice{}, reflect.SliceHeader{})
})
t.Run("String", func(t *testing.T) {
testHeaderMatchesReflect(t, unsafeheader.String{}, reflect.StringHeader{})
})
} | explode_data.jsonl/68576 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 109
} | [
2830,
3393,
929,
42470,
72789,
929,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
33236,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
18185,
4047,
42470,
72789,
1155,
11,
19860,
2708,
95495,
22655,
8708,
95495,
4047,
37790,
197,
8824... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStatusHandlerGetFailures(t *testing.T) {
expected := cabby.Error{
Title: "Internal Server Error", Description: "Status failure", HTTPStatus: http.StatusInternalServerError}
ms := mockStatusService()
ms.StatusFn = func(ctx context.Context, statusID string) (cabby.Status, error) {
return cabby.Status{}, errors.New(expected.Description)
}
h := StatusHandler{StatusService: &ms}
status, body := handlerTest(h.Get, "GET", testStatusURL, nil)
if status != expected.HTTPStatus {
t.Error("Got:", status, "Expected:", expected.HTTPStatus)
}
var result cabby.Error
err := json.Unmarshal([]byte(body), &result)
if err != nil {
t.Fatal(err)
}
passed := tester.CompareError(result, expected)
if !passed {
t.Error("Comparison failed")
}
} | explode_data.jsonl/39129 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 262
} | [
2830,
3393,
2522,
3050,
1949,
19524,
1413,
1155,
353,
8840,
836,
8,
341,
42400,
1669,
21516,
1694,
6141,
515,
197,
92233,
25,
330,
11569,
8422,
4600,
497,
7662,
25,
330,
2522,
7901,
497,
10130,
2522,
25,
1758,
66760,
630,
47691,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDft02(tst *testing.T) {
//verbose()
chk.PrintTitle("Dft02. FFT sinusoid")
// set sinusoid equation
T := 1.0 / 5.0 // period [s]
A0 := 0.0 // mean value
C1 := 1.0 // amplitude
θ := -math.Pi / 2.0 // phase shift [rad]
ss := NewSinusoidEssential(T, A0, C1, θ)
// discrete data
N := 16
dt := 1.0 / float64(N-1)
tt := make([]float64, N) // time
xx := make([]float64, N) // x[n]
data := make([]complex128, N) // x[n] to use as input of FFT
for i := 0; i < N; i++ {
tt[i] = float64(i) * dt
xx[i] = ss.Ybasis(tt[i])
data[i] = complex(xx[i], 0)
}
// execute FFT
Dft1d(data, false)
// extract results
Xr := make([]float64, N) // real(X[n])
Xi := make([]float64, N) // imag(X[n])
Rf := make([]float64, N) // |X[n]|/n
maxRf := 0.0
for k := 0; k < N; k++ {
Xr[k] = real(data[k])
Xi[k] = imag(data[k])
Rf[k] = math.Sqrt(Xr[k]*Xr[k]+Xi[k]*Xi[k]) / float64(N)
if Rf[k] > maxRf {
maxRf = Rf[k]
}
}
io.Pforan("maxRf = %v\n", maxRf)
chk.Float64(tst, "maxRf", 1e-12, maxRf, 0.383616856748)
// plot
if chk.Verbose {
ts := utl.LinSpace(0, 1, 201)
xs := make([]float64, len(ts))
for i := 0; i < len(ts); i++ {
xs[i] = ss.Ybasis(ts[i])
}
fn := utl.LinSpace(0, float64(N), N)
plt.Reset(true, &plt.A{Prop: 1.2})
plt.Subplot(3, 1, 1)
plt.Plot(ts, xs, &plt.A{C: "b", L: "continuous signal", NoClip: true})
plt.Plot(tt, xx, &plt.A{C: "r", M: ".", L: "discrete signal", NoClip: true})
plt.Cross(0, 0, nil)
plt.HideAllBorders()
plt.Gll("t", "x(t)", &plt.A{LegOut: true, LegNcol: 3})
plt.Subplot(3, 1, 2)
plt.Plot(tt, Xr, &plt.A{C: "r", M: ".", L: "real(X)", NoClip: true})
plt.HideAllBorders()
plt.Gll("t", "f(t)", &plt.A{LegOut: true, LegNcol: 3})
plt.Subplot(3, 1, 3)
plt.Plot(fn, Rf, &plt.A{C: "m", M: ".", NoClip: true})
plt.HideAllBorders()
plt.Gll("freq", "|X(f)|/n", &plt.A{LegOut: true, LegNcol: 3})
plt.Save("/tmp/gosl/fun", "dft02")
}
} | explode_data.jsonl/43538 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1075
} | [
2830,
3393,
35,
723,
15,
17,
1155,
267,
353,
8840,
836,
8,
1476,
197,
322,
14883,
741,
23049,
74,
7918,
3851,
445,
35,
723,
15,
17,
13,
60036,
75814,
588,
5130,
197,
322,
738,
75814,
588,
23606,
198,
10261,
1669,
220,
16,
13,
15,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestEngine_ConcurrentShardSnapshots(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "shard_test")
require.NoError(t, err, "error creating temporary directory")
defer os.RemoveAll(tmpDir)
tmpShard := filepath.Join(tmpDir, "shard")
tmpWal := filepath.Join(tmpDir, "wal")
sfile := NewSeriesFile(t, tmpDir)
defer sfile.Close()
opts := tsdb.NewEngineOptions()
opts.Config.WALDir = filepath.Join(tmpDir, "wal")
opts.SeriesIDSets = seriesIDSets([]*tsdb.SeriesIDSet{})
sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile, opts)
require.NoError(t, sh.Open(context.Background()), "error opening shard")
defer sh.Close()
points := make([]models.Point, 0, 10000)
for i := 0; i < cap(points); i++ {
points = append(points, models.MustNewPoint(
"cpu",
models.NewTags(map[string]string{"host": "server"}),
map[string]interface{}{"value": 1.0},
time.Unix(int64(i), 0),
))
}
err = sh.WritePoints(context.Background(), points)
require.NoError(t, err)
engineInterface, err := sh.Engine()
require.NoError(t, err, "error retrieving shard engine")
// Get the struct underlying the interface. Not a recommended practice.
realEngineStruct, ok := (engineInterface).(*Engine)
if !ok {
t.Log("Engine type does not permit simulating Cache race conditions")
return
}
// fake a race condition in snapshotting the cache.
realEngineStruct.Cache.snapshotting = true
defer func() {
realEngineStruct.Cache.snapshotting = false
}()
snapshotFunc := func(skipCacheOk bool) {
if f, err := sh.CreateSnapshot(skipCacheOk); err == nil {
require.NoError(t, os.RemoveAll(f), "error cleaning up TestEngine_ConcurrentShardSnapshots")
} else if err == ErrSnapshotInProgress {
if skipCacheOk {
t.Fatalf("failing to ignore this error,: %s", err.Error())
}
} else {
t.Fatalf("error creating shard snapshot: %s", err.Error())
}
}
// Permit skipping cache in the snapshot
snapshotFunc(true)
// do not permit skipping the cache in the snapshot
snapshotFunc(false)
realEngineStruct.Cache.snapshotting = false
} | explode_data.jsonl/76226 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 733
} | [
2830,
3393,
4571,
15100,
3231,
2016,
567,
61871,
27634,
1155,
353,
8840,
836,
8,
341,
20082,
6184,
11,
1848,
1669,
43144,
65009,
6184,
19814,
330,
927,
567,
4452,
1138,
17957,
35699,
1155,
11,
1848,
11,
330,
841,
6825,
13340,
6220,
1138... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestQuotes(t *testing.T) {
please := Ω.NewWithT(t)
ctx := context.TODO()
var avReq *http.Request
waitCallCount := 0
quotes, err := (&alphavantage.Client{
Client: doerFunc(func(request *http.Request) (*http.Response, error) {
avReq = request
return &http.Response{
Body: io.NopCloser(bytes.NewReader(monthlyIBM)),
StatusCode: http.StatusOK,
}, nil
}),
APIKey: "demo",
Limiter: waitFunc(func(ctx context.Context) error {
waitCallCount++
return nil
}),
}).Quotes(ctx, "IBM", alphavantage.TimeSeriesMonthly)
please.Expect(err).NotTo(Ω.HaveOccurred())
please.Expect(quotes).To(Ω.HaveLen(260))
please.Expect(avReq.Host).To(Ω.Equal("www.alphavantage.co"))
please.Expect(avReq.URL.Scheme).To(Ω.Equal("https"))
please.Expect(avReq.URL.Path).To(Ω.Equal("/query"))
please.Expect(avReq.URL.Query().Get("function")).To(Ω.Equal("TIME_SERIES_MONTHLY"))
please.Expect(avReq.URL.Query().Get("symbol")).To(Ω.Equal("IBM"))
please.Expect(avReq.URL.Query().Get("apikey")).To(Ω.Equal("demo"))
please.Expect(avReq.URL.Query().Get("datatype")).To(Ω.Equal("csv"))
please.Expect(waitCallCount).To(Ω.Equal(1))
} | explode_data.jsonl/71154 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 511
} | [
2830,
3393,
43780,
1155,
353,
8840,
836,
8,
341,
197,
30021,
1669,
7851,
102,
7121,
2354,
51,
1155,
692,
20985,
1669,
2266,
90988,
2822,
2405,
1822,
27234,
353,
1254,
9659,
271,
48750,
7220,
2507,
1669,
220,
15,
271,
197,
53282,
11,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDNSProvider_Present(t *testing.T) {
hostedZone := "example.com"
domain := "prefix." + hostedZone
testCases := []struct {
desc string
username string
apiKey string
handlers map[string]http.HandlerFunc
expectedError string
}{
{
desc: "success",
username: "bar",
apiKey: "foo",
handlers: map[string]http.HandlerFunc{
"/" + hostedZone + "/txt": mockHandlerCreateRecord,
},
},
{
desc: "invalid auth",
username: "nope",
apiKey: "foo",
handlers: map[string]http.HandlerFunc{
"/" + hostedZone + "/txt": mockHandlerCreateRecord,
},
expectedError: "zoneee: status code=401: Unauthorized\n",
},
{
desc: "error",
username: "bar",
apiKey: "foo",
expectedError: "zoneee: status code=404: 404 page not found\n",
},
}
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
mux := http.NewServeMux()
for uri, handler := range test.handlers {
mux.HandleFunc(uri, handler)
}
server := httptest.NewServer(mux)
config := NewDefaultConfig()
config.Endpoint = mustParse(server.URL)
config.Username = test.username
config.APIKey = test.apiKey
p, err := NewDNSProviderConfig(config)
require.NoError(t, err)
err = p.Present(domain, "token", "key")
if test.expectedError == "" {
require.NoError(t, err)
} else {
require.EqualError(t, err, test.expectedError)
}
})
}
} | explode_data.jsonl/9107 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 664
} | [
2830,
3393,
61088,
5179,
1088,
2695,
1155,
353,
8840,
836,
8,
341,
63104,
291,
15363,
1669,
330,
8687,
905,
698,
2698,
3121,
1669,
330,
11849,
1189,
488,
21009,
15363,
271,
18185,
37302,
1669,
3056,
1235,
341,
197,
41653,
688,
914,
198,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestIntegerExpressionGT(t *testing.T) {
assertClauseSerialize(t, table1ColInt.GT(table2ColInt), "(table1.col_int > table2.col_int)")
assertClauseSerialize(t, table1ColInt.GT(Int(11)), "(table1.col_int > $1)", int64(11))
} | explode_data.jsonl/41451 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 91
} | [
2830,
3393,
3486,
9595,
25388,
1155,
353,
8840,
836,
8,
341,
6948,
28482,
15680,
1155,
11,
1965,
16,
6127,
1072,
1224,
51,
15761,
17,
6127,
1072,
701,
11993,
2005,
16,
13414,
4042,
861,
1965,
17,
13414,
4042,
19107,
6948,
28482,
15680,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMapProxy_GetEntryView(t *testing.T) {
mp.Put("key", "value")
mp.Get("key")
mp.Put("key", "newValue")
entryView, err := mp.GetEntryView("key")
AssertEqualf(t, err, entryView.Key(), "key", "Map GetEntryView returned a wrong view.")
AssertEqualf(t, err, entryView.Value(), "newValue", "Map GetEntryView returned a wrong view.")
AssertEqualf(t, err, entryView.Hits(), int64(2), "Map GetEntryView returned a wrong view.")
AssertEqualf(t, err, entryView.EvictionCriteriaNumber(), int64(0), "Map GetEntryView returned a wrong view.")
AssertEqualf(t, err, entryView.Version(), int64(1), "Map GetEntryView returned a wrong view.")
mp.Clear()
} | explode_data.jsonl/57025 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 227
} | [
2830,
3393,
2227,
16219,
13614,
5874,
851,
1155,
353,
8840,
836,
8,
341,
53230,
39825,
445,
792,
497,
330,
957,
1138,
53230,
2234,
445,
792,
1138,
53230,
39825,
445,
792,
497,
330,
52830,
5130,
48344,
851,
11,
1848,
1669,
10490,
2234,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAccAWSDBInstanceSnapshot(t *testing.T) {
var snap rds.DBInstance
rInt := acctest.RandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
// testAccCheckAWSDBInstanceSnapshot verifies a database snapshot is
// created, and subequently deletes it
CheckDestroy: testAccCheckAWSDBInstanceSnapshot(rInt),
Steps: []resource.TestStep{
{
Config: testAccSnapshotInstanceConfigWithSnapshot(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSDBInstanceExists("aws_db_instance.snapshot", &snap),
),
},
},
})
} | explode_data.jsonl/33924 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 232
} | [
2830,
3393,
14603,
36136,
3506,
2523,
15009,
1155,
353,
8840,
836,
8,
341,
2405,
10658,
435,
5356,
22537,
2523,
198,
7000,
1072,
1669,
1613,
67880,
2013,
437,
1072,
2822,
50346,
8787,
1155,
11,
5101,
31363,
515,
197,
197,
4703,
3973,
25... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestImplicitAdminsKeyedForSubteam(t *testing.T) {
fus, tcs, cleanup := setupNTests(t, 3)
defer cleanup()
t.Logf("U0 creates a root team")
parentName, _ := createTeam2(*tcs[0])
t.Logf("U0 creates a subteam")
subteamID, err := CreateSubteam(context.TODO(), tcs[0].G, "sub", parentName)
require.NoError(t, err)
t.Logf("U1 and U2 can't load the subteam")
_, err = tcs[1].G.GetTeamLoader().ImplicitAdmins(context.TODO(), *subteamID)
require.Error(t, err, "U1 should not be able to load subteam without implicit admin status")
_, err = tcs[2].G.GetTeamLoader().ImplicitAdmins(context.TODO(), *subteamID)
require.Error(t, err, "U2 isn't in the subteam at all yet, shouldn't be able to load")
t.Logf("U0 adds U1 as an admin in the root team")
_, err = AddMember(context.TODO(), tcs[0].G, parentName.String(), fus[1].Username, keybase1.TeamRole_ADMIN)
require.NoError(t, err)
t.Logf("now U1 can load the subteam, but not U2")
_, err = tcs[1].G.GetTeamLoader().ImplicitAdmins(context.TODO(), *subteamID)
require.NoError(t, err, "U1 should able to load subteam with implicit admin status")
_, err = tcs[2].G.GetTeamLoader().ImplicitAdmins(context.TODO(), *subteamID)
require.Error(t, err, "U2 still isn't in the subteam at yet, shouldn't be able to load")
t.Logf("U1 can add U2 to the subteam")
_, err = AddMember(context.TODO(), tcs[1].G, parentName.String(), fus[2].Username, keybase1.TeamRole_ADMIN)
require.NoError(t, err)
t.Logf("now U2 can load the subteam")
_, err = tcs[1].G.GetTeamLoader().ImplicitAdmins(context.TODO(), *subteamID)
require.NoError(t, err, "now U2 is a member of the subteam and should be able to read it")
} | explode_data.jsonl/13532 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 643
} | [
2830,
3393,
59558,
7210,
82,
1592,
291,
2461,
3136,
9196,
1155,
353,
8840,
836,
8,
341,
1166,
355,
11,
259,
4837,
11,
21290,
1669,
6505,
45,
18200,
1155,
11,
220,
18,
340,
16867,
21290,
2822,
3244,
98954,
445,
52,
15,
11450,
264,
37... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCommitFind(t *testing.T) {
defer gock.Off()
mockServerVersion()
gock.New("https://try.gitea.io").
Get("/api/v1/repos/gitea/gitea/git/commits/c43399cad8766ee521b873a32c1652407c5a4630").
Reply(200).
Type("application/json").
File("testdata/commit.json")
client, _ := New("https://try.gitea.io")
got, _, err := client.Git.FindCommit(
context.Background(),
"gitea/gitea",
"c43399cad8766ee521b873a32c1652407c5a4630",
)
if err != nil {
t.Error(err)
}
want := new(scm.Commit)
raw, _ := ioutil.ReadFile("testdata/commit.json.golden")
err = json.Unmarshal(raw, &want)
if err != nil {
t.Error(err)
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("Unexpected Results")
t.Log(diff)
}
} | explode_data.jsonl/50606 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 345
} | [
2830,
3393,
33441,
9885,
1155,
353,
8840,
836,
8,
341,
16867,
728,
377,
13,
4596,
2822,
77333,
5475,
5637,
2822,
3174,
1176,
7121,
445,
2428,
1110,
1539,
1302,
632,
64,
4245,
38609,
197,
37654,
4283,
2068,
5457,
16,
49505,
4846,
632,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestWebRootRedirect(t *testing.T) {
oldConfig := config.Get()
defer config.Set(oldConfig)
conf := new(config.Config)
conf.Server.WebRoot = "/test"
config.Set(conf)
router := NewRouter()
ts := httptest.NewServer(router)
defer ts.Close()
client := &http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
resp, err := client.Get(ts.URL + "/")
if err != nil {
t.Fatal(err)
}
// body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, 302, resp.StatusCode, "Response should redirect to the webroot")
assert.Equal(t, "/test/", resp.Header.Get("Location"), "Response should redirect to the webroot")
} | explode_data.jsonl/14285 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 258
} | [
2830,
3393,
5981,
8439,
17725,
1155,
353,
8840,
836,
8,
341,
61828,
2648,
1669,
2193,
2234,
741,
16867,
2193,
4202,
21972,
2648,
692,
67850,
1669,
501,
8754,
10753,
340,
67850,
22997,
6473,
8439,
284,
3521,
1944,
698,
25873,
4202,
29879,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStoreDeleteCollection(t *testing.T) {
podA := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}
podB := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}}
testContext := api.WithNamespace(api.NewContext(), "test")
server, registry := NewTestGenericStoreRegistry(t)
defer server.Terminate(t)
if _, err := registry.Create(testContext, podA); err != nil {
t.Errorf("Unexpected error: %v", err)
}
if _, err := registry.Create(testContext, podB); err != nil {
t.Errorf("Unexpected error: %v", err)
}
// Delete all pods.
deleted, err := registry.DeleteCollection(testContext, nil, &api.ListOptions{})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
deletedPods := deleted.(*api.PodList)
if len(deletedPods.Items) != 2 {
t.Errorf("Unexpected number of pods deleted: %d, expected: 2", len(deletedPods.Items))
}
if _, err := registry.Get(testContext, podA.Name); !errors.IsNotFound(err) {
t.Errorf("Unexpected error: %v", err)
}
if _, err := registry.Get(testContext, podB.Name); !errors.IsNotFound(err) {
t.Errorf("Unexpected error: %v", err)
}
} | explode_data.jsonl/237 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 410
} | [
2830,
3393,
6093,
6435,
6482,
1155,
353,
8840,
836,
8,
341,
3223,
347,
32,
1669,
609,
2068,
88823,
90,
1190,
12175,
25,
6330,
80222,
63121,
25,
330,
7975,
95642,
3223,
347,
33,
1669,
609,
2068,
88823,
90,
1190,
12175,
25,
6330,
80222,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestInitFrameworkWithScorePlugins(t *testing.T) {
tests := []struct {
name string
plugins *config.Plugins
// If initErr is true, we expect framework initialization to fail.
initErr bool
}{
{
name: "enabled Score plugin doesn't exist in registry",
plugins: buildScoreConfigDefaultWeights("notExist"),
initErr: true,
},
{
name: "enabled Score plugin doesn't extend the ScorePlugin interface",
plugins: buildScoreConfigDefaultWeights(pluginNotImplementingScore),
initErr: true,
},
{
name: "Score plugins are nil",
plugins: &config.Plugins{},
},
{
name: "enabled Score plugin list is empty",
plugins: buildScoreConfigDefaultWeights(),
},
{
name: "enabled plugin only implements ScorePlugin interface",
plugins: buildScoreConfigDefaultWeights(scorePlugin1),
},
{
name: "enabled plugin implements ScoreWithNormalizePlugin interface",
plugins: buildScoreConfigDefaultWeights(scoreWithNormalizePlugin1),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := newFrameworkWithQueueSortAndBind(registry, tt.plugins, emptyArgs)
if tt.initErr && err == nil {
t.Fatal("Framework initialization should fail")
}
if !tt.initErr && err != nil {
t.Fatalf("Failed to create framework for testing: %v", err)
}
})
}
} | explode_data.jsonl/35737 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 499
} | [
2830,
3393,
3803,
14837,
2354,
10570,
45378,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
262,
914,
198,
197,
197,
18716,
353,
1676,
21368,
6840,
198,
197,
197,
322,
1416,
2930,
7747,
374,
830,
11,
582,
1720... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestATSConfigs(t *testing.T) {
WithObjs(t, []TCObj{CDNs, Types, Tenants, Parameters, Profiles, Statuses, Divisions, Regions, PhysLocations, CacheGroups, Servers, DeliveryServices}, func() {
defer DeleteTestDeliveryServiceServersCreated(t)
CreateTestDeliveryServiceServers(t)
GetTestATSConfigs(t)
})
} | explode_data.jsonl/20782 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 106
} | [
2830,
3393,
49107,
84905,
1155,
353,
8840,
836,
8,
341,
197,
2354,
4121,
2519,
1155,
11,
3056,
7749,
5261,
90,
6484,
47360,
11,
20768,
11,
17695,
1783,
11,
13522,
11,
71727,
11,
8104,
288,
11,
8765,
6805,
11,
77347,
11,
12809,
43037,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStartEndExecutor(t *testing.T) {
tests := []struct {
name string
mockDb NotificationLoader
expectedResult []contract.Notification
expectedError bool
expectedErrType error
}{
{
name: "Successful database call",
mockDb: createMockNotificiationLoaderStartEndStringArg("GetNotificationsByStartEnd", nil, SuccessfulDatabaseResult, Start, End, Limit),
expectedResult: SuccessfulDatabaseResult,
expectedError: false,
expectedErrType: nil,
},
{
name: "Unsuccessful database call",
mockDb: createMockNotificiationLoaderStartEndStringArg("GetNotificationsByStartEnd", Error, []contract.Notification{}, Start, End, Limit),
expectedResult: []contract.Notification{},
expectedError: true,
expectedErrType: Error,
},
{
name: "Notification not found",
mockDb: createMockNotificiationLoaderStartEndStringArg("GetNotificationsByStartEnd", nil, []contract.Notification{}, Start, End, Limit),
expectedResult: []contract.Notification{},
expectedError: true,
expectedErrType: ErrorNotFound,
},
{
name: "Unknown Error",
mockDb: createMockNotificiationLoaderStartEndStringArg("GetNotificationsByStartEnd", Error, SuccessfulDatabaseResult, Start, End, Limit),
expectedResult: SuccessfulDatabaseResult,
expectedError: true,
expectedErrType: Error,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
op := NewStartEndExecutor(test.mockDb, Start, End, Limit)
actual, err := op.Execute()
if test.expectedError && err == nil {
t.Error("Expected an error")
return
}
if !test.expectedError && err != nil {
t.Errorf("Unexpectedly encountered error: %s", err.Error())
return
}
if !reflect.DeepEqual(test.expectedErrType, err) {
t.Errorf("Expected error result does not match the observed.\nExpected: %v\nObserved: %v\n", test.expectedErrType, err)
return
}
if !reflect.DeepEqual(test.expectedResult, actual) {
t.Errorf("Expected result does not match the observed.\nExpected: %v\nObserved: %v\n", test.expectedResult, actual)
return
}
})
}
} | explode_data.jsonl/31059 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 872
} | [
2830,
3393,
3479,
3727,
25255,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
310,
914,
198,
197,
77333,
7994,
688,
16571,
9181,
198,
197,
42400,
2077,
220,
3056,
20257,
49329,
198,
197,
42400,
1454,
256,
1807,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestColorRenderDescription(t *testing.T) {
renderer := New(true)
expected := "\x1b[37mDescription\x1b[0m\n"
actual := renderer.RenderDescription("Description")
assert.Equal(t, expected, actual)
} | explode_data.jsonl/12221 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 71
} | [
2830,
3393,
1636,
6750,
5009,
1155,
353,
8840,
836,
8,
341,
83509,
1669,
1532,
3715,
340,
42400,
1669,
2917,
87,
16,
65,
58,
18,
22,
76,
5009,
3462,
16,
65,
58,
15,
76,
1699,
698,
88814,
1669,
19715,
27386,
5009,
445,
5009,
1138,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestLargeContractBlock(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
ht, err := newHostTester("TestLargeContractBlock")
if err != nil {
t.Fatal(err)
}
defer func() {
if err := ht.Close(); err != nil {
t.Fatal(err)
}
}()
// Create 2 storage obligations for the test and add them to the host.
so1, err := ht.newTesterStorageObligation()
if err != nil {
t.Fatal(err)
}
ht.host.managedLockStorageObligation(so1.id())
err = ht.host.managedAddStorageObligation(so1)
if err != nil {
t.Fatal(err)
}
ht.host.managedUnlockStorageObligation(so1.id())
so2, err := ht.newTesterStorageObligation()
if err != nil {
t.Fatal(err)
}
ht.host.managedLockStorageObligation(so2.id())
err = ht.host.managedAddStorageObligation(so2)
if err != nil {
t.Fatal(err)
}
ht.host.managedUnlockStorageObligation(so2.id())
// Add a file contract revision, increasing the filesize of the obligation
// beyong the largeContractSize.
validPayouts, missedPayouts := so1.payouts()
validPayouts[0].Value = validPayouts[0].Value.Sub(types.ZeroCurrency)
validPayouts[1].Value = validPayouts[1].Value.Add(types.ZeroCurrency)
missedPayouts[0].Value = missedPayouts[0].Value.Sub(types.ZeroCurrency)
missedPayouts[1].Value = missedPayouts[1].Value.Add(types.ZeroCurrency)
revisionSet := []types.Transaction{{
FileContractRevisions: []types.FileContractRevision{{
ParentID: so1.id(),
UnlockConditions: types.UnlockConditions{},
NewRevisionNumber: 1,
NewFileSize: uint64(largeContractSize),
NewFileMerkleRoot: crypto.Hash{},
NewWindowStart: so1.expiration(),
NewWindowEnd: so1.proofDeadline(),
NewValidProofOutputs: validPayouts,
NewMissedProofOutputs: missedPayouts,
NewUnlockHash: types.UnlockConditions{}.UnlockHash(),
}},
}}
so1.RevisionTransactionSet = revisionSet
ht.host.managedLockStorageObligation(so1.id())
err = ht.host.managedModifyStorageObligation(so1, nil, nil)
if err != nil {
t.Fatal(err)
}
ht.host.managedUnlockStorageObligation(so1.id())
err = ht.host.tg.Flush()
if err != nil {
t.Fatal(err)
}
// Lock so1 for the remaining test. This shouldn't block operations on so2.
ht.host.managedLockStorageObligation(so1.id())
defer ht.host.managedUnlockStorageObligation(so1.id())
done := make(chan struct{})
go func() {
// Modify so1. This should at least take
// largeContractUpdateDelay seconds.
defer close(done)
start := time.Now()
err := ht.host.managedModifyStorageObligation(so1, nil, nil)
delay := time.Since(start)
if err != nil {
t.Error(err)
}
if delay < largeContractUpdateDelay {
t.Errorf("delay should be at least %v but was %v", largeContractUpdateDelay, delay)
}
}()
// Lock so2 and modify it repeatedly. This simulates uploads to a different
// contract. No modification sho
numMods := 0
LOOP:
for {
select {
case <-done:
break LOOP
default:
}
numMods++
ht.host.managedLockStorageObligation(so2.id())
start := time.Now()
err := ht.host.managedModifyStorageObligation(so2, nil, nil)
delay := time.Since(start)
ht.host.managedUnlockStorageObligation(so2.id())
if err != nil {
t.Fatal(err)
}
if delay >= largeContractUpdateDelay {
t.Fatal("delay was longer than largeContractDelay which means so2 got blocked by so1", delay, largeContractUpdateDelay)
}
}
if numMods == 0 {
t.Fatal("expected at least one modification to happen to so2")
}
t.Logf("updated so2 %v times", numMods)
} | explode_data.jsonl/47342 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1382
} | [
2830,
3393,
34253,
14067,
4713,
1155,
353,
8840,
836,
8,
341,
743,
7497,
55958,
368,
341,
197,
3244,
57776,
7039,
741,
197,
532,
3244,
41288,
7957,
741,
197,
426,
11,
1848,
1669,
501,
9296,
58699,
445,
2271,
34253,
14067,
4713,
1138,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFilterFailsToInit(t *testing.T) {
// Arrange
config := getConfigWithFilters(t, []string{"filter.1"})
nFilter := filterstest.MockFilter{}
nFilter.On("Init", mock.Anything).
Return(errors.New("failed to init")).Once()
loader := loaderstest.MockLoader{}
loader.On("LoadFilter", "filter.1").Return(&nFilter, nil)
defer loader.AssertExpectations(t)
// Act
server, err := New(config, zap.NewNop(), &loader)
// Assert
assert.Nil(t, server)
assert.Equal(t, err.Error(), "failed to init")
} | explode_data.jsonl/59973 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 191
} | [
2830,
3393,
5632,
37,
6209,
1249,
3803,
1155,
353,
8840,
836,
8,
341,
197,
322,
40580,
198,
25873,
1669,
66763,
2354,
28351,
1155,
11,
3056,
917,
4913,
5315,
13,
16,
1,
8824,
9038,
5632,
1669,
4051,
267,
477,
24664,
5632,
16094,
9038,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSetenv(t *testing.T) {
job := mkJob(t, "dummy")
job.Setenv("foo", "bar")
if val := job.Getenv("foo"); val != "bar" {
t.Fatalf("Getenv returns incorrect value: %s", val)
}
job.Setenv("bar", "")
if val := job.Getenv("bar"); val != "" {
t.Fatalf("Getenv returns incorrect value: %s", val)
}
if val := job.Getenv("nonexistent"); val != "" {
t.Fatalf("Getenv returns incorrect value: %s", val)
}
} | explode_data.jsonl/15386 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 168
} | [
2830,
3393,
1649,
3160,
1155,
353,
8840,
836,
8,
341,
68577,
1669,
23789,
12245,
1155,
11,
330,
31390,
1138,
68577,
4202,
3160,
445,
7975,
497,
330,
2257,
1138,
743,
1044,
1669,
2618,
64883,
445,
7975,
5038,
1044,
961,
330,
2257,
1,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestCreatePipelineVersion_StorePipelineVersionMetadataError(t *testing.T) {
store := NewFakeClientManagerOrFatal(util.NewFakeTimeForEpoch())
defer store.Close()
manager := NewResourceManager(store)
// Create a pipeline.
_, err := manager.CreatePipeline(
"pipeline",
"",
[]byte("apiVersion: argoproj.io/v1alpha1\nkind: Workflow"))
assert.Nil(t, err)
// Close db.
store.DB().Close()
// Create a version under the above pipeline, resulting in error because of
// closed db.
pipelineStore, ok := store.pipelineStore.(*storage.PipelineStore)
assert.True(t, ok)
pipelineStore.SetUUIDGenerator(util.NewFakeUUIDGeneratorOrFatal(
FakeUUIDOne, nil))
_, err = manager.CreatePipelineVersion(
&api.PipelineVersion{
Name: "pipeline_version",
ResourceReferences: []*api.ResourceReference{
&api.ResourceReference{
Key: &api.ResourceKey{
Id: DefaultFakeUUID,
Type: api.ResourceType_PIPELINE,
},
Relationship: api.Relationship_OWNER,
},
},
},
[]byte("apiVersion: argoproj.io/v1alpha1\nkind: Workflow"), true)
assert.Equal(t, codes.Internal, err.(*util.UserError).ExternalStatusCode())
assert.Contains(t, err.Error(), "database is closed")
} | explode_data.jsonl/77074 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 464
} | [
2830,
3393,
4021,
34656,
5637,
92684,
34656,
5637,
14610,
1454,
1155,
353,
8840,
836,
8,
341,
57279,
1669,
1532,
52317,
2959,
2043,
2195,
62396,
67811,
7121,
52317,
1462,
2461,
44338,
2398,
16867,
3553,
10421,
741,
92272,
1669,
1532,
32498,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetEvent_ReadSecrets(t *testing.T) {
valSt := []string{"s1", "s2"}
val, _ := json.Marshal(valSt)
os.Setenv("Http_Secrets", string(val))
owner := "alexellis"
os.Setenv("Http_Owner", owner)
installationID := "123456"
os.Setenv("Http_Installation_id", installationID)
eventInfo, err := getEventFromEnv()
if err != nil {
t.Errorf(err.Error())
t.Fail()
}
expected := []string{owner + "-s1", owner + "-s2"}
for _, val := range eventInfo.Secrets {
found := false
for _, expectedVal := range expected {
if expectedVal == val {
found = true
}
}
if !found {
t.Errorf("Wanted secret: %s, didn't find it in list", val)
}
}
} | explode_data.jsonl/11972 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 271
} | [
2830,
3393,
1949,
1556,
38381,
19773,
82,
1155,
353,
8840,
836,
8,
1476,
19302,
623,
1669,
3056,
917,
4913,
82,
16,
497,
330,
82,
17,
16707,
19302,
11,
716,
1669,
2951,
37271,
9098,
623,
340,
25078,
4202,
3160,
445,
2905,
1098,
50856,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestStartCmdCreateKMSFailure(t *testing.T) {
t.Run("KMS fails (DB)", func(t *testing.T) {
startCmd := GetStartCmd()
args := []string{
"--" + hostURLFlagName, "localhost:8080",
"--" + hostMetricsURLFlagName, "localhost:8081",
"--" + casTypeFlagName, "ipfs",
"--" + ipfsURLFlagName, "localhost:8081",
"--" + vctURLFlagName, "localhost:8081",
"--" + didNamespaceFlagName, "namespace",
"--" + databaseTypeFlagName, databaseTypeMemOption,
"--" + kmsSecretsDatabaseTypeFlagName, databaseTypeCouchDBOption,
"--" + anchorCredentialSignatureSuiteFlagName, "suite",
"--" + anchorCredentialDomainFlagName, "domain.com",
"--" + anchorCredentialIssuerFlagName, "issuer.com",
"--" + anchorCredentialURLFlagName, "peer.com",
"--" + kmsSecretsDatabaseURLFlagName, "badURL",
}
startCmd.SetArgs(args)
err := startCmd.Execute()
require.NotNil(t, err)
require.Contains(t, err.Error(), "failed to ping couchDB")
})
t.Run("KMS fails (create kid)", func(t *testing.T) {
startCmd := GetStartCmd()
args := []string{
"--" + hostURLFlagName, "localhost:8080",
"--" + hostMetricsURLFlagName, "localhost:8081",
"--" + casTypeFlagName, "local",
"--" + vctURLFlagName, "localhost:8081",
"--" + didNamespaceFlagName, "namespace",
"--" + databaseTypeFlagName, databaseTypeMemOption,
"--" + anchorCredentialSignatureSuiteFlagName, "suite",
"--" + anchorCredentialDomainFlagName, "domain.com",
"--" + anchorCredentialIssuerFlagName, "issuer.com",
"--" + anchorCredentialURLFlagName, "peer.com",
"--" + kmsStoreEndpointFlagName, "https://vct.example.com",
}
startCmd.SetArgs(args)
err := startCmd.Execute()
require.NotNil(t, err)
require.Contains(t, err.Error(), "create kid: init config value for")
})
t.Run("KMS fails (create remote store)", func(t *testing.T) {
startCmd := GetStartCmd()
args := []string{
"--" + hostURLFlagName, "localhost:8080",
"--" + hostMetricsURLFlagName, "localhost:8081",
"--" + casTypeFlagName, "local",
"--" + vctURLFlagName, "localhost:8081",
"--" + didNamespaceFlagName, "namespace",
"--" + databaseTypeFlagName, databaseTypeMemOption,
"--" + anchorCredentialSignatureSuiteFlagName, "suite",
"--" + anchorCredentialDomainFlagName, "domain.com",
"--" + anchorCredentialIssuerFlagName, "issuer.com",
"--" + anchorCredentialURLFlagName, "peer.com",
"--" + kmsEndpointFlagName, "https://vct.example.com",
}
startCmd.SetArgs(args)
err := startCmd.Execute()
require.NotNil(t, err)
require.Contains(t, err.Error(), "init config value for \"web-key-store\"")
})
} | explode_data.jsonl/31126 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1053
} | [
2830,
3393,
3479,
15613,
4021,
42,
4826,
17507,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
42,
4826,
14525,
320,
3506,
11583,
2915,
1155,
353,
8840,
836,
8,
341,
197,
21375,
15613,
1669,
2126,
3479,
15613,
2822,
197,
31215,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetStraddle(t *testing.T) {
iter := GetStraddle(tests.TestStraddleSymbol)
success := iter.Next()
assert.True(t, success)
assert.Nil(t, iter.Err())
assert.Equal(t, iter.Meta().UnderlyingSymbol, tests.TestStraddleSymbol)
} | explode_data.jsonl/70632 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 89
} | [
2830,
3393,
1949,
2580,
20137,
1155,
353,
8840,
836,
8,
1476,
79924,
1669,
2126,
2580,
20137,
8623,
82,
8787,
2580,
20137,
15090,
340,
30553,
1669,
5367,
18501,
741,
6948,
32443,
1155,
11,
2393,
340,
6948,
59678,
1155,
11,
5367,
27862,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestClient_NewOrder(t *testing.T) {
key := makePrivateKey(t)
account, err := testClient.NewAccount(key, false, true)
if err != nil {
t.Fatalf("unexpected error making account: %v", err)
}
identifiers := []Identifier{{"dns", randString() + ".com"}}
order, err := testClient.NewOrder(account, identifiers)
if err != nil {
t.Fatalf("unexpected error making order: %v", err)
}
if !reflect.DeepEqual(order.Identifiers, identifiers) {
t.Fatalf("order identifiers mismatch, identifiers: %+v, order identifiers: %+v", identifiers, order.Identifiers)
}
badIdentifiers := []Identifier{{"bad", randString() + ".com"}}
_, err = testClient.NewOrder(account, badIdentifiers)
if err == nil {
t.Fatal("expected error, got none")
}
if _, ok := err.(Problem); !ok {
t.Fatalf("expected Problem, got: %v - %v", reflect.TypeOf(err), err)
}
} | explode_data.jsonl/68105 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 300
} | [
2830,
3393,
2959,
39582,
4431,
1155,
353,
8840,
836,
8,
341,
23634,
1669,
1281,
75981,
1155,
340,
86866,
11,
1848,
1669,
1273,
2959,
7121,
7365,
4857,
11,
895,
11,
830,
340,
743,
1848,
961,
2092,
341,
197,
3244,
30762,
445,
53859,
146... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestFormatValid(t *testing.T) {
cases := []struct {
in string
expected bool
}{
{"xml", true},
{"lis", true},
{"html", false},
}
for _, c := range cases {
got := FormatValid(c.in)
if got != c.expected {
t.Errorf("FormatValid(%q) == %t, expected %t\n", c.in, got, c.expected)
}
}
} | explode_data.jsonl/67944 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 141
} | [
2830,
3393,
4061,
4088,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
3056,
1235,
341,
197,
17430,
981,
914,
198,
197,
42400,
1807,
198,
197,
59403,
197,
197,
4913,
6455,
497,
830,
1583,
197,
197,
4913,
47203,
497,
830,
1583,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestMedianFetcher_MajorityFetches(t *testing.T) {
hf := newFixedPricedFetcher(decimal.NewFromInt(100)) // healthy fetcher)
ef := newErroringPricedFetcher() // erroring fetcher
tests := []struct {
name string
fetchers []Fetcher
}{
{"2/3", []Fetcher{hf, hf, ef}},
{"3/3", []Fetcher{hf, hf, hf}},
{"3/4", []Fetcher{hf, hf, hf, ef}},
{"3/5", []Fetcher{hf, hf, hf, ef, ef}},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
medianFetcher, err := newMedianFetcher(test.fetchers...)
require.NoError(t, err)
medianPrice, err := medianFetcher.Fetch(context.Background(), emptyMeta)
assert.NoError(t, err)
assert.True(t, decimal.NewFromInt(100).Equal(medianPrice))
})
}
} | explode_data.jsonl/26503 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 359
} | [
2830,
3393,
79514,
97492,
1245,
3035,
487,
20714,
288,
1155,
353,
8840,
836,
8,
341,
9598,
69,
1669,
501,
13520,
47,
2216,
291,
97492,
71100,
7121,
3830,
1072,
7,
16,
15,
15,
593,
442,
9314,
7807,
261,
340,
197,
823,
1669,
501,
1454... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_generateEphemeralOKPKey_Failure(t *testing.T) {
c := Crypto{
okpKW: &mockKeyWrapperSupport{
generateKeyErr: errors.New("generate failure"),
},
}
_, _, err := c.generateOrGetEphemeralOKPKey(nil)
require.EqualError(t, err, "generate failure")
c.okpKW = &mockKeyWrapperSupport{
generateKeyVal: &ecdsa.PrivateKey{},
}
_, _, err = c.generateOrGetEphemeralOKPKey(nil)
require.EqualError(t, err, "invalid ephemeral key type, not OKP, want []byte for OKP")
} | explode_data.jsonl/81262 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 202
} | [
2830,
3393,
48851,
36,
59941,
3253,
3925,
47,
1592,
1400,
9373,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
32886,
515,
197,
59268,
79,
78610,
25,
609,
16712,
1592,
11542,
7916,
515,
298,
3174,
13220,
1592,
7747,
25,
5975,
7121,
445,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestParse(t *testing.T) {
defer ClearTask()
tk := NewTask("taska", "0/30 * * * * *", func() error { fmt.Println("hello world"); return nil })
err := tk.Run()
if err != nil {
t.Fatal(err)
}
AddTask("taska", tk)
StartTask()
time.Sleep(6 * time.Second)
StopTask()
} | explode_data.jsonl/33787 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 118
} | [
2830,
3393,
14463,
1155,
353,
8840,
836,
8,
341,
16867,
12023,
6262,
2822,
3244,
74,
1669,
1532,
6262,
445,
8202,
64,
497,
330,
15,
14,
18,
15,
353,
353,
353,
353,
353,
497,
2915,
368,
1465,
314,
8879,
12419,
445,
14990,
1879,
5038,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestZKGroupWatchConsumerList(t *testing.T) {
zk := newZKGroupStorage([]string{"127.0.0.1:2181"}, 6*time.Second)
consumer1 := fmt.Sprintf("%s-%d", testConsumerID, rand.Int())
consumer2 := fmt.Sprintf("%s-%d", testConsumerID, rand.Int())
consumer3 := fmt.Sprintf("%s-%d", testConsumerID, rand.Int())
consumerList := []string{consumer1, consumer2, consumer3}
for _, consumer := range consumerList {
zk.registerConsumer(testGroup, consumer, nil)
}
watcher, err := zk.watchConsumerList(testGroup)
if err != nil {
t.Error(err)
}
select {
case <-watcher.EvCh:
t.Error("channel receive message before consumer list change")
default:
}
zk.deleteConsumer(testGroup, consumer1)
select {
case <-watcher.EvCh:
default:
t.Error("channel can't receive message after consumer list change")
}
for _, consumer := range consumerList {
zk.deleteConsumer(testGroup, consumer)
}
} | explode_data.jsonl/5652 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 320
} | [
2830,
3393,
57,
42,
2808,
14247,
29968,
852,
1155,
353,
8840,
836,
8,
341,
20832,
74,
1669,
501,
57,
42,
2808,
5793,
10556,
917,
4913,
16,
17,
22,
13,
15,
13,
15,
13,
16,
25,
17,
16,
23,
16,
14345,
220,
21,
77053,
32435,
692,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestLog_ConvertLevel(t *testing.T) {
type fields struct {
logrus *logrus.Logger
level structs.Level
}
type args struct {
level structs.Level
}
tests := []struct {
name string
fields fields
args args
want interface{}
wantErr bool
}{
{
"standard",
fields{
logrus: logrus.New(),
level: structs.LevelTrace,
},
args{level: structs.LevelTrace},
logrus.TraceLevel,
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
l := &Log{
logrus: tt.fields.logrus,
level: tt.fields.level,
}
got, err := l.ConvertLevel(tt.args.level)
if (err != nil) != tt.wantErr {
t.Errorf("ConvertLevel() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ConvertLevel() got = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/34826 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 422
} | [
2830,
3393,
2201,
15100,
1621,
4449,
1155,
353,
8840,
836,
8,
341,
13158,
5043,
2036,
341,
197,
90822,
353,
839,
20341,
12750,
198,
197,
53743,
220,
62845,
25259,
198,
197,
532,
13158,
2827,
2036,
341,
197,
53743,
62845,
25259,
198,
197... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestValidatePullPolicy(t *testing.T) {
type T struct {
Container core.Container
ExpectedPolicy core.PullPolicy
}
testCases := map[string]T{
"NotPresent1": {
core.Container{Name: "abc", Image: "image:latest", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"},
core.PullIfNotPresent,
},
"NotPresent2": {
core.Container{Name: "abc1", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"},
core.PullIfNotPresent,
},
"Always1": {
core.Container{Name: "123", Image: "image:latest", ImagePullPolicy: "Always"},
core.PullAlways,
},
"Always2": {
core.Container{Name: "1234", Image: "image", ImagePullPolicy: "Always"},
core.PullAlways,
},
"Never1": {
core.Container{Name: "abc-123", Image: "image:latest", ImagePullPolicy: "Never"},
core.PullNever,
},
"Never2": {
core.Container{Name: "abc-1234", Image: "image", ImagePullPolicy: "Never"},
core.PullNever,
},
}
for k, v := range testCases {
ctr := &v.Container
errs := validatePullPolicy(ctr.ImagePullPolicy, field.NewPath("field"))
if len(errs) != 0 {
t.Errorf("case[%s] expected success, got %#v", k, errs)
}
if ctr.ImagePullPolicy != v.ExpectedPolicy {
t.Errorf("case[%s] expected policy %v, got %v", k, v.ExpectedPolicy, ctr.ImagePullPolicy)
}
}
} | explode_data.jsonl/1021 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 526
} | [
2830,
3393,
17926,
36068,
13825,
1155,
353,
8840,
836,
8,
341,
13158,
350,
2036,
341,
197,
197,
4502,
414,
6200,
33672,
198,
197,
197,
18896,
13825,
6200,
97357,
13825,
198,
197,
532,
18185,
37302,
1669,
2415,
14032,
60,
51,
515,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestAdvanceNonExistentWorkspace(t *testing.T) {
runTest(t, func(test *testHelper) {
client := test.newClient()
_, _, err := client.AdvanceWorkspace(test.ctx, "test", "test",
"test", quantumfs.WorkspaceNonce{},
quantumfs.EmptyWorkspaceKey, quantumfs.ZeroKey)
wsdbErr, ok := err.(quantumfs.WorkspaceDbErr)
test.Assert(ok, "Error isn't WorkspaceDbErr: %s", err.Error())
test.Assert(wsdbErr.Code == quantumfs.WSDB_WORKSPACE_NOT_FOUND,
"Non-existent workspace was found: %s", err.Error())
})
} | explode_data.jsonl/9162 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 207
} | [
2830,
3393,
95027,
8121,
840,
18128,
45981,
1155,
353,
8840,
836,
8,
341,
56742,
2271,
1155,
11,
2915,
8623,
353,
1944,
5511,
8,
341,
197,
25291,
1669,
1273,
4618,
2959,
2822,
197,
197,
6878,
8358,
1848,
1669,
2943,
17865,
85,
681,
45... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPopVariance(t *testing.T) {
for i, test := range []struct {
x []float64
weights []float64
ans float64
}{
{
x: []float64{8, -3, 7, 8, -4},
weights: nil,
ans: 30.16,
},
{
x: []float64{8, -3, 7, 8, -4},
weights: []float64{1, 1, 1, 1, 1},
ans: 30.16,
},
{
x: []float64{8, 3, 7, 8, 4},
weights: []float64{2, 1, 2, 1, 1},
ans: 3.6734693877551026,
},
{
x: []float64{1, 4, 9},
weights: []float64{1, 1.5, 1},
ans: 9.387755102040817,
},
{
x: []float64{1, 2, 3},
weights: []float64{1, 1.5, 1},
ans: 0.5714285714285714,
},
{
x: []float64{2},
weights: nil,
ans: 0,
},
{
x: []float64{2},
weights: []float64{2},
ans: 0,
},
} {
variance := PopVariance(test.x, test.weights)
if math.Abs(variance-test.ans) > 1e-14 {
t.Errorf("PopVariance mismatch case %d. Expected %v, Found %v", i, test.ans, variance)
}
}
if !panics(func() { PopVariance(make([]float64, 3), make([]float64, 2)) }) {
t.Errorf("PopVariance did not panic with x, weights length mismatch")
}
} | explode_data.jsonl/1788 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 633
} | [
2830,
3393,
11598,
53,
36905,
1155,
353,
8840,
836,
8,
341,
2023,
600,
11,
1273,
1669,
2088,
3056,
1235,
341,
197,
10225,
981,
3056,
3649,
21,
19,
198,
197,
197,
13327,
3056,
3649,
21,
19,
198,
197,
43579,
257,
2224,
21,
19,
198,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDaoSetCheckerTsRds(t *testing.T) {
convey.Convey("SetCheckerTsRds", t, func(ctx convey.C) {
var (
oid = int64(0)
tp = int(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.SetCheckerTsRds(context.Background(), oid, tp)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
} | explode_data.jsonl/21803 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 175
} | [
2830,
3393,
12197,
1649,
35188,
52793,
49,
5356,
1155,
353,
8840,
836,
8,
341,
37203,
5617,
4801,
5617,
445,
1649,
35188,
52793,
49,
5356,
497,
259,
11,
2915,
7502,
20001,
727,
8,
341,
197,
2405,
2399,
298,
197,
588,
284,
526,
21,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestOutputLoadTemplate(t *testing.T) {
client := GetTestingElasticsearch()
err := client.Connect(5 * time.Second)
if err != nil {
t.Fatal(err)
}
// delete template if it exists
client.request("DELETE", "/_template/libbeat", "", nil, nil)
// Make sure template is not yet there
assert.False(t, client.CheckTemplate("libbeat"))
tPath, err := filepath.Abs("../../../packetbeat/packetbeat.template.json")
if err != nil {
t.Fatal(err)
}
config := map[string]interface{}{
"hosts": GetEsHost(),
"template": map[string]interface{}{
"name": "libbeat",
"path": tPath,
"versions.2x.enabled": false,
},
}
cfg, err := common.NewConfigFrom(config)
if err != nil {
t.Fatal(err)
}
output, err := New("libbeat", cfg, 0)
if err != nil {
t.Fatal(err)
}
event := outputs.Data{Event: common.MapStr{
"@timestamp": common.Time(time.Now()),
"host": "test-host",
"type": "libbeat",
"message": "Test message from libbeat",
}}
err = output.PublishEvent(nil, outputs.Options{Guaranteed: true}, event)
if err != nil {
t.Fatal(err)
}
// Guaranteed publish, so the template should be there
assert.True(t, client.CheckTemplate("libbeat"))
} | explode_data.jsonl/40102 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 498
} | [
2830,
3393,
5097,
5879,
7275,
1155,
353,
8840,
836,
8,
1476,
25291,
1669,
2126,
16451,
36,
51179,
1836,
741,
9859,
1669,
2943,
43851,
7,
20,
353,
882,
32435,
340,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
630,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestDefaultMissingKeys(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)()
_, client, closeFn := setup(t)
defer closeFn()
// Applier creates a deployment with containerPort but no protocol
apply := []byte(`{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name": "deployment-shared-map-item-removal",
"labels": {"app": "nginx"}
},
"spec": {
"selector": {
"matchLabels": {
"app": "nginx"
}
},
"template": {
"metadata": {
"labels": {
"app": "nginx"
}
},
"spec": {
"containers": [{
"name": "nginx",
"image": "nginx:latest",
"ports": [{
"name": "foo",
"containerPort": 80
}]
}]
}
}
}
}`)
_, err := client.CoreV1().RESTClient().Patch(types.ApplyPatchType).
AbsPath("/apis/apps/v1").
Namespace("default").
Resource("deployments").
Name("deployment-shared-map-item-removal").
Param("fieldManager", "test_applier").
Body(apply).
Do(context.TODO()).
Get()
if err != nil {
t.Fatalf("Failed to create object using Apply patch: %v", err)
}
// Applier updates the name, and uses the protocol, we should get a conflict.
apply = []byte(`{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name": "deployment-shared-map-item-removal",
"labels": {"app": "nginx"}
},
"spec": {
"selector": {
"matchLabels": {
"app": "nginx"
}
},
"template": {
"metadata": {
"labels": {
"app": "nginx"
}
},
"spec": {
"containers": [{
"name": "nginx",
"image": "nginx:latest",
"ports": [{
"name": "bar",
"containerPort": 80,
"protocol": "TCP"
}]
}]
}
}
}
}`)
patched, err := client.CoreV1().RESTClient().Patch(types.ApplyPatchType).
AbsPath("/apis/apps/v1").
Namespace("default").
Resource("deployments").
Name("deployment-shared-map-item-removal").
Param("fieldManager", "test_applier_conflict").
Body(apply).
Do(context.TODO()).
Get()
if err == nil {
t.Fatalf("Expecting to get conflicts when a different applier updates existing list item, got no error: %s", patched)
}
status, ok := err.(*apierrors.StatusError)
if !ok {
t.Fatalf("Expecting to get conflicts as API error")
}
if len(status.Status().Details.Causes) != 1 {
t.Fatalf("Expecting to get one conflict when a different applier updates existing list item, got: %v", status.Status().Details.Causes)
}
} | explode_data.jsonl/53489 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1150
} | [
2830,
3393,
3675,
25080,
8850,
1155,
353,
8840,
836,
8,
341,
16867,
4565,
70,
266,
57824,
287,
4202,
13859,
42318,
16014,
2271,
1155,
11,
4094,
12753,
13275,
13859,
42318,
11,
13954,
20304,
22997,
16384,
28497,
11,
830,
8,
2822,
197,
68... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestSetGetGlobalPCMode(t *testing.T) {
tp := fmt.Sprintf("%s:%d", "t.go", 42)
SetTracePoint(tp)
defer ResetTracePoint(tp)
enabled := GetTracePoint(tp)
if !enabled {
t.Errorf("Expected tracepoint %s to be enabled", tp)
}
} | explode_data.jsonl/48442 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 102
} | [
2830,
3393,
1649,
1949,
11646,
4872,
3636,
1155,
353,
8840,
836,
8,
341,
73423,
1669,
8879,
17305,
4430,
82,
7533,
67,
497,
330,
83,
18002,
497,
220,
19,
17,
340,
22212,
6550,
2609,
38852,
340,
16867,
16932,
6550,
2609,
38852,
340,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestShardSet(t *testing.T) {
elements := []struct {
s *shard
ws *workers
bg *boolgen
notExpired int64
expired int64
}{
{
s: &shard{elements: make(map[string]element), q: &queue{}},
ws: &workers{wn: 1, number: 256},
bg: newBoolgen(),
},
{
s: &shard{elements: make(map[string]element), q: &queue{}},
ws: &workers{wn: 4, number: 1024},
bg: newBoolgen(),
},
{
s: &shard{elements: make(map[string]element), q: &queue{}},
ws: &workers{wn: 32, number: 2048},
bg: newBoolgen(),
},
{
s: &shard{elements: make(map[string]element), q: &queue{}},
ws: &workers{wn: 128, number: 8192},
bg: newBoolgen(),
},
}
for _, e := range elements {
e.ws.cb = func(w *worker, i int) error {
if e.bg.Bool() {
e.s.set(fmt.Sprintf("%d-%d", w.id, i), i, 0)
atomic.AddInt64(&e.notExpired, 1)
} else {
e.s.set(fmt.Sprintf("%d-%d", w.id, i), i, 30*time.Second)
atomic.AddInt64(&e.expired, 1)
}
return nil
}
e.ws.initialize()
e.ws.run()
actualNotExpired, actualExpired := e.s.size()-e.s.q.size(), e.s.q.size()
t.Logf("not-expired/actual-not-expired (%d/%d) expired/actual-expired (%d/%d)",
e.notExpired, actualNotExpired, e.expired, actualExpired)
assert.Equal(t, actualNotExpired, int(e.notExpired))
assert.Equal(t, actualExpired, int(e.expired))
}
} | explode_data.jsonl/5395 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 687
} | [
2830,
3393,
2016,
567,
1649,
1155,
353,
8840,
836,
8,
341,
197,
21423,
1669,
3056,
1235,
341,
197,
1903,
688,
353,
927,
567,
198,
197,
6692,
82,
260,
353,
54958,
198,
197,
76131,
260,
353,
2641,
4370,
198,
197,
97266,
54349,
526,
21... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestRTR_SuccessfulDeregistration(t *testing.T) {
hss := getTestHSSDiameterServer(t)
swxProxy := getTestSwxProxy(t, hss, false, false, true)
sar := &fegprotos.RegistrationRequest{
UserName: "sub1",
}
_, err := swxProxy.Register(context.Background(), sar)
assert.NoError(t, err)
sub := <eprotos.SubscriberID{Id: "sub1"}
_, err = hss.DeregisterSubscriber(context.Background(), sub)
assert.NoError(t, err)
subData, err := hss.GetSubscriberData(context.Background(), sub)
assert.NoError(t, err)
assert.False(t, subData.GetState().GetTgppAaaServerRegistered())
} | explode_data.jsonl/73022 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 226
} | [
2830,
3393,
49,
2378,
87161,
1262,
35,
52633,
4048,
1155,
353,
8840,
836,
8,
341,
9598,
778,
1669,
633,
2271,
39,
1220,
35,
36044,
5475,
1155,
340,
1903,
20984,
16219,
1669,
633,
2271,
13218,
87,
16219,
1155,
11,
305,
778,
11,
895,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGiniTrivUnsat(t *testing.T) {
g := NewGini()
g.Add(z.Lit(3))
g.Add(0)
g.Add(z.Lit(3).Not())
g.Add(0)
if g.Solve() != -1 {
t.Errorf("basic add unsat failed.")
}
} | explode_data.jsonl/74364 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 99
} | [
2830,
3393,
38,
6591,
1282,
344,
1806,
36468,
1155,
353,
8840,
836,
8,
341,
3174,
1669,
1532,
38,
6591,
741,
3174,
1904,
13174,
1214,
275,
7,
18,
1171,
3174,
1904,
7,
15,
340,
3174,
1904,
13174,
1214,
275,
7,
18,
568,
2623,
2398,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestLRUCache(t *testing.T) {
for x, tc := range cases {
cache := Constructor(tc.capacity)
output := make([]int, len(tc.input))
for j, op := range tc.ops {
input := tc.input[j]
switch op {
case "get":
output[j] = cache.Get(input[0])
case "put":
cache.Put(input[0], input[1])
}
}
if !reflect.DeepEqual(tc.output, output) {
t.Errorf("x: %d\nExpect: %v\nActual: %v\n", x, tc.output, output)
}
}
} | explode_data.jsonl/45956 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 208
} | [
2830,
3393,
20117,
5459,
1777,
1155,
353,
8840,
836,
8,
341,
2023,
856,
11,
17130,
1669,
2088,
5048,
341,
197,
52680,
1669,
16786,
44415,
59168,
340,
197,
21170,
1669,
1281,
10556,
396,
11,
2422,
44415,
10046,
4390,
197,
2023,
502,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestStorage_Read(t *testing.T) {
s := NewStorage("/tmp/chunk/1.chunk", "/tmp/index")
err := s.Open()
defer s.Close()
if err != nil {
t.Fatal(err)
}
i := s.index.slots[1]
t.Log(i.fId)
err, name, bts := s.Read(i.fId)
if err != nil {
t.Fatal(err)
}
t.Log(name)
t.Log(len(bts))
t.Log(string(bts))
} | explode_data.jsonl/77475 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 166
} | [
2830,
3393,
5793,
38381,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
1532,
5793,
4283,
5173,
21284,
3122,
14,
16,
47806,
497,
3521,
5173,
9022,
1138,
9859,
1669,
274,
12953,
741,
16867,
274,
10421,
741,
743,
1848,
961,
2092,
341,
197,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestOptionForeach(t *testing.T) {
sum := 123
s := gs.Some[int](100)
f := func(v int) {
sum += v
}
s.Foreach(f)
assert.Equal(t, 123+100, sum)
sum = 123
n := gs.None[int]()
n.Foreach(f)
assert.Equal(t, 123, sum)
} | explode_data.jsonl/32434 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 115
} | [
2830,
3393,
5341,
37,
8539,
1155,
353,
8840,
836,
8,
341,
31479,
1669,
220,
16,
17,
18,
198,
1903,
1669,
28081,
86833,
18640,
9533,
16,
15,
15,
340,
1166,
1669,
2915,
3747,
526,
8,
341,
197,
31479,
1421,
348,
198,
197,
532,
1903,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReadContents(t *testing.T) {
t.Run("error reading file", func(t *testing.T) {
partition := Partition{
Start: 2048,
End: 3048,
Name: "EFI System",
GUID: "5CA3360B-5DE6-4FCF-B4CE-419CEE433B51",
Attributes: 0,
Type: EFISystemPartition,
}
var b bytes.Buffer
writer := bufio.NewWriter(&b)
expected := "Error reading from file"
f := &testhelper.FileImpl{
Reader: func(b []byte, offset int64) (int, error) {
return 0, fmt.Errorf(expected)
},
}
read, err := partition.ReadContents(f, writer)
if read != 0 {
t.Errorf("Returned %d bytes read instead of 0", read)
}
if err == nil {
t.Errorf("Returned nil error instead of actual errors")
}
if !strings.HasPrefix(err.Error(), expected) {
t.Errorf("Error type %s instead of expected %s", err.Error(), expected)
}
})
t.Run("successful read", func(t *testing.T) {
partition := Partition{
Start: 2048,
End: 3048,
Name: "EFI System",
GUID: "5CA3360B-5DE6-4FCF-B4CE-419CEE433B51",
Attributes: 0,
Type: EFISystemPartition,
}
var b bytes.Buffer
writer := bufio.NewWriter(&b)
size := 100
b2 := make([]byte, size, size)
rand.Read(b2)
f := &testhelper.FileImpl{
Reader: func(b []byte, offset int64) (int, error) {
copy(b, b2)
return size, io.EOF
},
}
read, err := partition.ReadContents(f, writer)
if read != int64(size) {
t.Errorf("Returned %d bytes read instead of %d", read, size)
}
if err != nil {
t.Errorf("Returned error instead of expected nil")
}
writer.Flush()
if bytes.Compare(b.Bytes(), b2) != 0 {
t.Errorf("Mismatched bytes data")
t.Log(b.Bytes())
t.Log(b2)
}
})
} | explode_data.jsonl/39147 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 795
} | [
2830,
3393,
4418,
14803,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
841,
5290,
1034,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
72872,
680,
1669,
54626,
515,
298,
65999,
25,
414,
220,
17,
15,
19,
23,
345,
298,
38407,
25,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRekey_status(t *testing.T) {
core, keys, _ := vault.TestCoreUnsealed(t)
ln, addr := http.TestServer(t, core)
defer ln.Close()
ui := new(cli.MockUi)
c := &RekeyCommand{
Key: hex.EncodeToString(keys[0]),
Meta: meta.Meta{
Ui: ui,
},
}
args := []string{"-address", addr, "-init"}
if code := c.Run(args); code != 0 {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
args = []string{"-address", addr, "-status"}
if code := c.Run(args); code != 0 {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
if !strings.Contains(ui.OutputWriter.String(), "Started: true") {
t.Fatalf("bad: %s", ui.OutputWriter.String())
}
} | explode_data.jsonl/39445 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 293
} | [
2830,
3393,
693,
792,
4773,
1155,
353,
8840,
836,
8,
341,
71882,
11,
6894,
11,
716,
1669,
34584,
8787,
5386,
1806,
75940,
1155,
340,
197,
2261,
11,
10789,
1669,
1758,
8787,
5475,
1155,
11,
6200,
340,
16867,
29390,
10421,
2822,
37278,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.