text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func Test_PJWHash64(t *testing.T) { var x uint64 = 31150 gtest.C(t, func(t *gtest.T) { j := ghash.PJWHash64(strBasic) t.Assert(j, x) }) }
explode_data.jsonl/60235
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 78 }
[ 2830, 3393, 1088, 41, 54, 6370, 21, 19, 1155, 353, 8840, 836, 8, 341, 2405, 856, 2622, 21, 19, 284, 220, 18, 16, 16, 20, 15, 198, 3174, 1944, 727, 1155, 11, 2915, 1155, 353, 82038, 836, 8, 341, 197, 12428, 1669, 342, 8296, 1069,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestStxoSerialization(t *testing.T) { tests := []struct { name string stxo txo.SpentTxOut serialized string }{ { name: "Spends last output of coinbase, height 1", stxo: coinbaseStxo, serialized: "0x030065cd1d0000000000e3054b411051da5492aec7a823b00cb3add772d70c000000000000000000000000", }, { name: "Spends last output of coinbase, height 2", stxo: coinbaseStxo2, serialized: "0x050065cd1d0000000000e3054b411051da5492aec7a823b00cb3add772d70c000000000000000000000000", }, { name: "Spends last output of coinbase, height 10001", stxo: coinbaseStxo10001, serialized: "0x809b220065cd1d0000000000e3054b411051da5492aec7a823b00cb3add772d70c000000000000000000000001", }, { name: "Spends last output of non coinbase,height 1", stxo: normalStxo, serialized: "0x0200e1f5050000000000e3054b411051da5492aec7a823b00cb3add772d70c000000000000000000000000", }, { name: "Spends last output of non coinbase,height 2", stxo: normalStxo2, serialized: "0x0400e1f5050000000000e3054b411051da5492aec7a823b00cb3add772d70c000000000000000000000000", }, { name: "Spends last output of non coinbase, height 10001", stxo: normalStxo10001, serialized: "0x809b2200e1f5050000000000e3054b411051da5492aec7a823b00cb3add772d70c000000000000000000000000", }, { name: "Spends last output of non coinbase, amount 123", stxo: normalStxoAmount123, serialized: "0x047b0000000000000000e3054b411051da5492aec7a823b00cb3add772d70c000000000000000000000000", }, { name: "Spends last output of non coinbase, amount 210000000000", stxo: normalStxoAmountMax, serialized: "0x0400b4f9e43000000000e3054b411051da5492aec7a823b00cb3add772d70c000000000000000000000000", }, { name: "txo.SpentTxOut PkScript is ScriptHash", stxo: normalScriptHashStxo, serialized: "0x030065cd1d0000000001e3054b411051da5492aec7a823b00cb3add772d70c000000000000000000000000", }, { name: "txo.SpentTxOut PkScript is contract", stxo: contractStxo, serialized: "0x030065cd1d0000000006e3054b411051da5492aec7a823b00cb3add772d70c000000000000000000000000", }, { name: "txo.SpentTxOut PkScript is vote", stxo: voteStxo, serialized: "0x030065cd1d0000000007e3054b411051da5492aec7a823b00cb3add772d70c000000000000000000000000", }, } for i, test := range tests { // Ensure the function to calculate the serialized size without // actually serializing it is calculated properly. gotSize := SpentTxOutSerializeSize(&test.stxo) // Ensure the stxo serializes to the expected value. gotSerialized := make([]byte, gotSize) gotBytesWritten := putSpentTxOut(gotSerialized, &test.stxo) gotBytexHex := hexutil.Encode(gotSerialized) if gotBytexHex != test.serialized { t.Errorf("case %d, puttxo.SpentTxOut (%s): did not get expected bytes - got %x, want %x", i, test.name, gotSerialized, test.serialized) continue } if gotBytesWritten * 2 + 2 != len(test.serialized) { t.Errorf("puttxo.SpentTxOut (%s): did not get expected number of bytes written - got %d, want %d", test.name, gotBytesWritten, len(test.serialized)) continue } // Ensure the serialized bytes are decoded back to the expected // stxo. var gotStxo txo.SpentTxOut gotBytesRead, err := decodeSpentTxOut(gotSerialized, &gotStxo) if err != nil { t.Errorf("decodeSpentTxOut (%s): unexpected error: %v", test.name, err) continue } if !reflect.DeepEqual(gotStxo, test.stxo) { t.Errorf("decodeSpentTxOut (%s) mismatched entries - got %v, want %v", test.name, gotStxo, test.stxo) continue } if gotBytesRead * 2 + 2 != len(test.serialized) { t.Errorf("decodeSpentTxOut (%s): did not get expected number of bytes read - got %d, want %d", test.name, gotBytesRead, len(test.serialized)) continue } } }
explode_data.jsonl/37662
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1611 }
[ 2830, 3393, 623, 40822, 35865, 1155, 353, 8840, 836, 8, 1476, 78216, 1669, 3056, 1235, 341, 197, 11609, 981, 914, 198, 197, 18388, 40822, 981, 9854, 78, 808, 45125, 31584, 2662, 198, 197, 197, 75277, 914, 198, 197, 59403, 197, 197, 51...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestCreateBatchChangesCredential(t *testing.T) { if testing.Short() { t.Skip() } ct.MockRSAKeygen(t) ctx := context.Background() db := dbtest.NewDB(t, "") pruneUserCredentials(t, db, nil) userID := ct.CreateTestUser(t, db, true).ID cstore := store.New(db, nil) r := &Resolver{store: cstore} s, err := graphqlbackend.NewSchema(db, r, nil, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } var validationErr error service.Mocks.ValidateAuthenticator = func(ctx context.Context, externalServiceID, externalServiceType string, a auth.Authenticator) error { return validationErr } t.Cleanup(func() { service.Mocks.Reset() }) t.Run("User credential", func(t *testing.T) { input := map[string]interface{}{ "user": graphqlbackend.MarshalUserID(userID), "externalServiceKind": string(extsvc.KindGitHub), "externalServiceURL": "https://github.com/", "credential": "SOSECRET", } var response struct { CreateBatchChangesCredential apitest.BatchChangesCredential } actorCtx := actor.WithActor(ctx, actor.FromUser(userID)) t.Run("validation fails", func(t *testing.T) { // Throw correct error when credential failed validation validationErr = errors.New("fake validation failed") t.Cleanup(func() { validationErr = nil }) errs := apitest.Exec(actorCtx, t, s, input, &response, mutationCreateCredential) if len(errs) != 1 { t.Fatalf("expected single errors, but got none") } if have, want := errs[0].Extensions["code"], "ErrVerifyCredentialFailed"; have != want { t.Fatalf("wrong error code. want=%q, have=%q", want, have) } }) // First time it should work, because no credential exists apitest.MustExec(actorCtx, t, s, input, &response, mutationCreateCredential) if response.CreateBatchChangesCredential.ID == "" { t.Fatalf("expected credential to be created, but was not") } // Second time it should fail errs := apitest.Exec(actorCtx, t, s, input, &response, mutationCreateCredential) if len(errs) != 1 { t.Fatalf("expected single errors, but got none") } if have, want := errs[0].Extensions["code"], "ErrDuplicateCredential"; have != want { t.Fatalf("wrong error code. want=%q, have=%q", want, have) } }) t.Run("Site credential", func(t *testing.T) { input := map[string]interface{}{ "user": nil, "externalServiceKind": string(extsvc.KindGitHub), "externalServiceURL": "https://github.com/", "credential": "SOSECRET", } var response struct { CreateBatchChangesCredential apitest.BatchChangesCredential } actorCtx := actor.WithActor(ctx, actor.FromUser(userID)) t.Run("validation fails", func(t *testing.T) { // Throw correct error when credential failed validation validationErr = errors.New("fake validation failed") t.Cleanup(func() { validationErr = nil }) errs := apitest.Exec(actorCtx, t, s, input, &response, mutationCreateCredential) if len(errs) != 1 { t.Fatalf("expected single errors, but got none") } if have, want := errs[0].Extensions["code"], "ErrVerifyCredentialFailed"; have != want { t.Fatalf("wrong error code. want=%q, have=%q", want, have) } }) // First time it should work, because no site credential exists apitest.MustExec(actorCtx, t, s, input, &response, mutationCreateCredential) if response.CreateBatchChangesCredential.ID == "" { t.Fatalf("expected credential to be created, but was not") } // Second time it should fail errors := apitest.Exec(actorCtx, t, s, input, &response, mutationCreateCredential) if len(errors) != 1 { t.Fatalf("expected single errors, but got none") } if have, want := errors[0].Extensions["code"], "ErrDuplicateCredential"; have != want { t.Fatalf("wrong error code. want=%q, have=%q", want, have) } }) }
explode_data.jsonl/53217
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1495 }
[ 2830, 3393, 4021, 21074, 11317, 48265, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 741, 197, 630, 89216, 24664, 73564, 1592, 4370, 1155, 692, 20985, 1669, 2266, 19047, 741, 20939, 1669, 2927, 1944, 7121, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_run(t *testing.T) { t.Log("Current test is [c]") testCases := [][2]string{ { `1 1 5 6`, `2`, }, { `1 1 1 200001`, `2`, }, { `2 3 998244353 998244853`, `3`, }, { `1 1 1 1`, `0`, }, // TODO 测试参数的下界和上界 { `1 1 1 6`, `2`, }, } testutil.AssertEqualStringCase(t, testCases, -1, run) }
explode_data.jsonl/23580
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 228 }
[ 2830, 3393, 14007, 1155, 353, 8840, 836, 8, 341, 3244, 5247, 445, 5405, 1273, 374, 508, 66, 47915, 18185, 37302, 1669, 508, 1457, 17, 30953, 515, 197, 197, 515, 298, 197, 63, 16, 220, 16, 198, 20, 220, 21, 12892, 298, 197, 63, 17,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEncodingApplies(t *testing.T) { temp, err := os.CreateTemp("", "zapup-encoding-test") require.NoError(t, err, "Unexpected error constructing logger.") defer os.Remove(temp.Name()) os.Setenv(KeyLogLevel, "info") os.Setenv(KeyFieldCustomer, "gopher") os.Setenv(KeyFieldStage, "test") os.Setenv(KeyFieldApp, "testapp") defer unsetEnv(KeyLogLevel, KeyFieldCustomer, KeyFieldStage, KeyFieldApp) tests := []struct { enc string // verifying the entire log line requires adoptions all the time since e.g. caller line changes on test adoptions partial string }{ {"console", `Hello, world! {"app": "testapp", "customer": "gopher", "stage": "test"}`}, {"json", `"message":"Hello, world!","app":"testapp","customer":"gopher","stage":"test"`}, } for _, test := range tests { Reset() os.Setenv(KeyLogEncoding, test.enc) os.Setenv(KeyOutput, temp.Name()) z, err := RootLogger() require.NotNil(t, z) require.NoError(t, err) z.Info("Hello, world!") bytes, e := io.ReadAll(temp) require.NoError(t, e, "Couldn't read log contents from temp file.") assert.Contains(t, string(bytes), test.partial, "Unexpected log.") unsetEnv(KeyLogEncoding, KeyOutput) } }
explode_data.jsonl/63193
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 443 }
[ 2830, 3393, 14690, 10611, 7202, 1155, 353, 8840, 836, 8, 1476, 16280, 11, 1848, 1669, 2643, 7251, 12151, 19814, 330, 92371, 454, 12, 17159, 16839, 1138, 17957, 35699, 1155, 11, 1848, 11, 330, 29430, 1465, 49353, 5925, 13053, 16867, 2643, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestTooLargeResourceVersionList(t *testing.T) { server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix()) defer server.Terminate(t) cacher, v, err := newTestCacher(etcdStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } defer cacher.Stop() podFoo := makeTestPod("foo") fooCreated := updatePod(t, etcdStorage, podFoo, nil) // Set up List at fooCreated.ResourceVersion + 10 rv, err := v.ParseResourceVersion(fooCreated.ResourceVersion) if err != nil { t.Fatalf("Unexpected error: %v", err) } listRV := strconv.Itoa(int(rv + 10)) result := &example.PodList{} options := storage.ListOptions{ ResourceVersion: listRV, Predicate: storage.Everything, } err = cacher.List(context.TODO(), "pods/ns", options, result) if !errors.IsTimeout(err) { t.Errorf("Unexpected error: %v", err) } if !storage.IsTooLargeResourceVersion(err) { t.Errorf("expected 'Too large resource version' cause in error but got: %v", err) } }
explode_data.jsonl/41359
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 373 }
[ 2830, 3393, 31246, 34253, 4783, 5637, 852, 1155, 353, 8840, 836, 8, 341, 41057, 11, 1842, 4385, 5793, 1669, 501, 31860, 4385, 2271, 5793, 1155, 11, 1842, 4385, 18, 8840, 17474, 14335, 2398, 16867, 3538, 836, 261, 34016, 1155, 340, 1444,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestServerDisableHeaderNamesNormalizing(t *testing.T) { headerName := "CASE-senSITive-HEAder-NAME" headerNameLower := strings.ToLower(headerName) headerValue := "foobar baz" s := &Server{ Handler: func(ctx *RequestCtx) { hv := ctx.Request.Header.Peek(headerName) if string(hv) != headerValue { t.Fatalf("unexpected header value for %q: %q. Expecting %q", headerName, hv, headerValue) } hv = ctx.Request.Header.Peek(headerNameLower) if len(hv) > 0 { t.Fatalf("unexpected header value for %q: %q. Expecting empty value", headerNameLower, hv) } ctx.Response.Header.Set(headerName, headerValue) ctx.WriteString("ok") ctx.SetContentType("aaa") }, DisableHeaderNamesNormalizing: true, } rw := &readWriter{} rw.r.WriteString(fmt.Sprintf("GET / HTTP/1.1\r\n%s: %s\r\nHost: google.com\r\n\r\n", headerName, headerValue)) ch := make(chan error) go func() { ch <- s.ServeConn(rw) }() select { case err := <-ch: if err != nil { t.Fatalf("Unexpected error from serveConn: %s", err) } case <-time.After(100 * time.Millisecond): t.Fatalf("timeout") } br := bufio.NewReader(&rw.w) var resp Response resp.Header.DisableNormalizing() if err := resp.Read(br); err != nil { t.Fatalf("unexpected error: %s", err) } hv := resp.Header.Peek(headerName) if string(hv) != headerValue { t.Fatalf("unexpected header value for %q: %q. Expecting %q", headerName, hv, headerValue) } hv = resp.Header.Peek(headerNameLower) if len(hv) > 0 { t.Fatalf("unexpected header value for %q: %q. Expecting empty value", headerNameLower, hv) } }
explode_data.jsonl/73279
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 647 }
[ 2830, 3393, 5475, 25479, 4047, 7980, 12206, 4849, 1155, 353, 8840, 836, 8, 341, 20883, 675, 1669, 330, 40371, 1331, 268, 50, 952, 533, 12, 1799, 32, 1107, 12, 7535, 698, 20883, 675, 9053, 1669, 9069, 29983, 25534, 675, 340, 20883, 113...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestTerraform_StringifyContent(t *testing.T) { type fields struct { parser Parser } type args struct { content []byte } tests := []struct { name string fields fields args args want string wantErr bool }{ { name: "test stringify content", fields: fields{ parser: Parser{}, }, args: args{ content: []byte(` resource "aws_s3_bucket" "b" { bucket = "S3B_541" acl = "public-read" tags = { Name = "My bucket" Environment = "Dev" } } `), }, want: ` resource "aws_s3_bucket" "b" { bucket = "S3B_541" acl = "public-read" tags = { Name = "My bucket" Environment = "Dev" } } `, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := tt.fields.parser.StringifyContent(tt.args.content) require.Equal(t, tt.wantErr, (err != nil)) require.Equal(t, tt.want, got) }) } }
explode_data.jsonl/40766
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 441 }
[ 2830, 3393, 51, 13886, 627, 31777, 1437, 2762, 1155, 353, 8840, 836, 8, 341, 13158, 5043, 2036, 341, 197, 55804, 21102, 198, 197, 532, 13158, 2827, 2036, 341, 197, 27751, 3056, 3782, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSingleModifier(t *testing.T) { var data = `{"@key": "value"}` assert(t, Get(data, "@key").String() == "value") assert(t, Get(data, "\\@key").String() == "value") }
explode_data.jsonl/43472
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 69 }
[ 2830, 3393, 10888, 34405, 1155, 353, 8840, 836, 8, 341, 2405, 821, 284, 1565, 4913, 31, 792, 788, 330, 957, 9207, 3989, 6948, 1155, 11, 2126, 2592, 11, 8428, 792, 1827, 703, 368, 621, 330, 957, 1138, 6948, 1155, 11, 2126, 2592, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestControllerFilterSecret(t *testing.T) { t.Parallel() tests := []struct { name string secret metav1.Object wantAdd bool wantUpdate bool wantDelete bool }{ { name: "a secret of the right type", secret: &corev1.Secret{ Type: "secrets.pinniped.dev/oidc-client", ObjectMeta: metav1.ObjectMeta{Name: "some-name", Namespace: "some-namespace"}, }, wantAdd: true, wantUpdate: true, wantDelete: true, }, { name: "a secret of the wrong type", secret: &corev1.Secret{ Type: "secrets.pinniped.dev/not-the-oidc-client-type", ObjectMeta: metav1.ObjectMeta{Name: "some-name", Namespace: "some-namespace"}, }, }, { name: "resource of wrong data type", secret: &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: "some-name", Namespace: "some-namespace"}, }, }, } for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { t.Parallel() fakePinnipedClient := pinnipedfake.NewSimpleClientset() pinnipedInformers := pinnipedinformers.NewSharedInformerFactory(fakePinnipedClient, 0) fakeKubeClient := fake.NewSimpleClientset() kubeInformers := informers.NewSharedInformerFactory(fakeKubeClient, 0) testLog := testlogger.New(t) cache := provider.NewDynamicUpstreamIDPProvider() cache.SetIDPList([]provider.UpstreamOIDCIdentityProviderI{ &upstreamoidc.ProviderConfig{Name: "initial-entry"}, }) secretInformer := kubeInformers.Core().V1().Secrets() withInformer := testutil.NewObservableWithInformerOption() New( cache, nil, pinnipedInformers.IDP().V1alpha1().OIDCIdentityProviders(), secretInformer, testLog, withInformer.WithInformer, ) unrelated := corev1.Secret{} filter := withInformer.GetFilterForInformer(secretInformer) require.Equal(t, test.wantAdd, filter.Add(test.secret)) require.Equal(t, test.wantUpdate, filter.Update(&unrelated, test.secret)) require.Equal(t, test.wantUpdate, filter.Update(test.secret, &unrelated)) require.Equal(t, test.wantDelete, filter.Delete(test.secret)) }) } }
explode_data.jsonl/8269
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 879 }
[ 2830, 3393, 2051, 5632, 19773, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 78216, 1669, 3056, 1235, 341, 197, 11609, 981, 914, 198, 197, 197, 20474, 257, 77520, 16, 8348, 198, 197, 50780, 2212, 262, 1807, 198, 197, 50780, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReplicationMode(t *testing.T) { re := require.New(t) registerDefaultSchedulers() cfgData := ` [replication-mode] replication-mode = "dr-auto-sync" [replication-mode.dr-auto-sync] label-key = "zone" primary = "zone1" dr = "zone2" primary-replicas = 2 dr-replicas = 1 wait-store-timeout = "120s" ` cfg := NewConfig() meta, err := toml.Decode(cfgData, &cfg) re.NoError(err) err = cfg.Adjust(&meta, false) re.NoError(err) re.Equal("dr-auto-sync", cfg.ReplicationMode.ReplicationMode) re.Equal("zone", cfg.ReplicationMode.DRAutoSync.LabelKey) re.Equal("zone1", cfg.ReplicationMode.DRAutoSync.Primary) re.Equal("zone2", cfg.ReplicationMode.DRAutoSync.DR) re.Equal(2, cfg.ReplicationMode.DRAutoSync.PrimaryReplicas) re.Equal(1, cfg.ReplicationMode.DRAutoSync.DRReplicas) re.Equal(2*time.Minute, cfg.ReplicationMode.DRAutoSync.WaitStoreTimeout.Duration) cfg = NewConfig() meta, err = toml.Decode("", &cfg) re.NoError(err) err = cfg.Adjust(&meta, false) re.NoError(err) re.Equal("majority", cfg.ReplicationMode.ReplicationMode) }
explode_data.jsonl/78169
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 430 }
[ 2830, 3393, 18327, 1693, 3636, 1155, 353, 8840, 836, 8, 341, 17200, 1669, 1373, 7121, 1155, 340, 29422, 3675, 74674, 741, 50286, 1043, 1669, 22074, 58, 9995, 1693, 14982, 921, 9995, 1693, 14982, 284, 330, 3612, 19938, 65034, 698, 58, 99...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHitCondBreakpointEQ(t *testing.T) { withTestProcess("break", t, func(p *proc.Target, fixture protest.Fixture) { bp := setFileBreakpoint(p, t, fixture.Source, 7) bp.UserBreaklet().HitCond = &struct { Op token.Token Val int }{token.EQL, 3} assertNoError(p.Continue(), t, "Continue()") ivar := evalVariable(p, t, "i") i, _ := constant.Int64Val(ivar.Value) if i != 3 { t.Fatalf("Stopped on wrong hitcount %d\n", i) } err := p.Continue() if _, exited := err.(proc.ErrProcessExited); !exited { t.Fatalf("Unexpected error on Continue(): %v", err) } }) }
explode_data.jsonl/56240
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 247 }
[ 2830, 3393, 19498, 49696, 22524, 2768, 54007, 1155, 353, 8840, 836, 8, 341, 46948, 2271, 7423, 445, 8960, 497, 259, 11, 2915, 1295, 353, 15782, 35016, 11, 12507, 8665, 991, 12735, 8, 341, 197, 2233, 79, 1669, 738, 1703, 22524, 2768, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestRestoredPrivileges(t *testing.T) { defer leaktest.AfterTest(t)() const numAccounts = 1 _, _, sqlDB, dir, cleanupFn := BackupRestoreTestSetup(t, singleNode, numAccounts, InitNone) defer cleanupFn() args := base.TestServerArgs{ExternalIODir: dir} rootOnly := sqlDB.QueryStr(t, `SHOW GRANTS ON data.bank`) sqlDB.Exec(t, `CREATE USER someone`) sqlDB.Exec(t, `GRANT SELECT, INSERT, UPDATE, DELETE ON data.bank TO someone`) sqlDB.Exec(t, `CREATE DATABASE data2`) // Explicitly don't restore grants when just restoring a database since we // cannot ensure that the same users exist in the restoring cluster. data2Grants := sqlDB.QueryStr(t, `SHOW GRANTS ON DATABASE data2`) sqlDB.Exec(t, `GRANT SELECT, INSERT, UPDATE, DELETE ON DATABASE data2 TO someone`) withGrants := sqlDB.QueryStr(t, `SHOW GRANTS ON data.bank`) sqlDB.Exec(t, `BACKUP DATABASE data, data2 TO $1`, LocalFoo) sqlDB.Exec(t, `DROP TABLE data.bank`) t.Run("into fresh db", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) sqlDBRestore := sqlutils.MakeSQLRunner(tc.Conns[0]) sqlDBRestore.Exec(t, `CREATE DATABASE data`) sqlDBRestore.Exec(t, `RESTORE data.bank FROM $1`, LocalFoo) sqlDBRestore.CheckQueryResults(t, `SHOW GRANTS ON data.bank`, rootOnly) }) t.Run("into db with added grants", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) sqlDBRestore := sqlutils.MakeSQLRunner(tc.Conns[0]) sqlDBRestore.Exec(t, `CREATE DATABASE data`) sqlDBRestore.Exec(t, `CREATE USER someone`) sqlDBRestore.Exec(t, `GRANT SELECT, INSERT, UPDATE, DELETE ON DATABASE data TO someone`) sqlDBRestore.Exec(t, `RESTORE data.bank FROM $1`, LocalFoo) sqlDBRestore.CheckQueryResults(t, `SHOW GRANTS ON data.bank`, withGrants) }) t.Run("into db on db grants", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) sqlDBRestore := sqlutils.MakeSQLRunner(tc.Conns[0]) sqlDBRestore.Exec(t, `CREATE USER someone`) sqlDBRestore.Exec(t, `RESTORE DATABASE data2 FROM $1`, LocalFoo) sqlDBRestore.CheckQueryResults(t, `SHOW GRANTS ON DATABASE data2`, data2Grants) }) }
explode_data.jsonl/57602
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 927 }
[ 2830, 3393, 12416, 3018, 32124, 70838, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 2822, 4777, 1629, 41369, 284, 220, 16, 198, 197, 6878, 8358, 5704, 3506, 11, 5419, 11, 21290, 24911, 1669, 43438, 56284, 2271...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSortedKeys(t *testing.T) { service := kobject.ServiceConfig{ ContainerName: "name", Image: "image", } service1 := kobject.ServiceConfig{ ContainerName: "name", Image: "image", } c := []string{"a", "b"} komposeObject := kobject.KomposeObject{ ServiceConfigs: map[string]kobject.ServiceConfig{"b": service, "a": service1}, } a := SortedKeys(komposeObject) if !reflect.DeepEqual(a, c) { t.Logf("Test Fail output should be %s", c) } }
explode_data.jsonl/58963
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 198 }
[ 2830, 3393, 51051, 8850, 1155, 353, 8840, 836, 8, 341, 52934, 1669, 595, 1700, 13860, 2648, 515, 197, 197, 4502, 675, 25, 330, 606, 756, 197, 53397, 25, 260, 330, 1805, 756, 197, 532, 52934, 16, 1669, 595, 1700, 13860, 2648, 515, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestNewQuerier(t *testing.T) { app, ctx := createTestApp(false) keeper := app.SupplyKeeper() cdc := app.Codec() supplyCoins := chainType.NewCoins( chainType.NewCoin(constants.DefaultBondDenom, sdk.NewInt(100)), chainType.NewCoin(constants.ChainMainNameStr+"/"+"photon", sdk.NewInt(50)), chainType.NewCoin(constants.ChainMainNameStr+"/"+"atom", sdk.NewInt(2000)), chainType.NewCoin(constants.ChainMainNameStr+"/"+"btc", sdk.NewInt(21000000)), ) supplyAcc := keeper.GetModuleAccount(ctx, types.ModuleName).GetID() fInitCoins(t, ctx, *app.AssetKeeper(), supplyCoins, supplyAcc) query := abci.RequestQuery{ Path: "", Data: []byte{}, } // querier := keep.NewQuerier(*keeper) bz, err := querier(ctx, []string{"other"}, query) require.Error(t, err) require.Nil(t, bz) queryTotalSupplyParams := types.NewQueryTotalSupplyParams(1, 20) bz, errRes := cdc.MarshalJSON(queryTotalSupplyParams) require.Nil(t, errRes) query.Path = fmt.Sprintf("/custom/supply/%s", types.QueryTotalSupply) query.Data = bz _, err = querier(ctx, []string{types.QueryTotalSupply}, query) require.Nil(t, err) querySupplyParams := types.NewQuerySupplyOfParams(constants.DefaultBondDenom) bz, errRes = cdc.MarshalJSON(querySupplyParams) require.Nil(t, errRes) query.Path = fmt.Sprintf("/custom/supply/%s", types.QuerySupplyOf) query.Data = bz _, err = querier(ctx, []string{types.QuerySupplyOf}, query) require.Nil(t, err) }
explode_data.jsonl/14268
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 579 }
[ 2830, 3393, 3564, 2183, 261, 1268, 1155, 353, 8840, 836, 8, 341, 28236, 11, 5635, 1669, 1855, 2271, 2164, 3576, 340, 197, 18861, 1669, 906, 38736, 2541, 77233, 741, 1444, 7628, 1669, 906, 20274, 66, 2822, 1903, 83923, 69602, 1669, 8781,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHasTable(t *testing.T) { type Foo struct { Id int Stuff string } DB.DropTable(&Foo{}) // Table should not exist at this point, HasTable should return false if ok := DB.HasTable("foos"); ok { t.Errorf("Table should not exist, but does") } if ok := DB.HasTable(&Foo{}); ok { t.Errorf("Table should not exist, but does") } // We create the table if err := DB.CreateTable(&Foo{}).Error; err != nil { t.Errorf("Table should be created") } // And now it should exits, and HasTable should return true if ok := DB.HasTable("foos"); !ok { t.Errorf("Table should exist, but HasTable informs it does not") } if ok := DB.HasTable(&Foo{}); !ok { t.Errorf("Table should exist, but HasTable informs it does not") } }
explode_data.jsonl/28037
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 271 }
[ 2830, 3393, 10281, 2556, 1155, 353, 8840, 836, 8, 341, 13158, 33428, 2036, 341, 197, 67211, 262, 526, 198, 197, 197, 86622, 914, 198, 197, 532, 45409, 58626, 2099, 40923, 6257, 692, 197, 322, 6633, 1265, 537, 3000, 518, 419, 1459, 11,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestMapProxy_PutAllWithNonSerializableMapValue(t *testing.T) { testMap := make(map[interface{}]interface{}, 0) testMap[5] = student{} err := mp.PutAll(testMap) AssertErrorNotNil(t, err, "putAll did not return an error for nonserializable map value") mp.Clear() }
explode_data.jsonl/57071
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 98 }
[ 2830, 3393, 2227, 16219, 1088, 332, 2403, 2354, 8121, 29268, 2227, 1130, 1155, 353, 8840, 836, 8, 341, 18185, 2227, 1669, 1281, 9147, 58, 4970, 78134, 4970, 22655, 220, 15, 340, 18185, 2227, 58, 20, 60, 284, 5458, 16094, 9859, 1669, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIntegration_ExternalInitiator(t *testing.T) { t.Parallel() rpcClient, gethClient, _, assertMockCalls := cltest.NewEthMocksWithStartupAssertions(t) defer assertMockCalls() app, cleanup := cltest.NewApplication(t, eth.NewClientWith(rpcClient, gethClient), services.NewExternalInitiatorManager(), ) defer cleanup() require.NoError(t, app.Start()) exInitr := struct { Header http.Header Body services.JobSpecNotice }{} eiMockServer, assertCalled := cltest.NewHTTPMockServer(t, http.StatusOK, "POST", "", func(header http.Header, body string) { exInitr.Header = header err := json.Unmarshal([]byte(body), &exInitr.Body) require.NoError(t, err) }, ) defer assertCalled() eiCreate := map[string]string{ "name": "someCoin", "url": eiMockServer.URL, } eiCreateJSON, err := json.Marshal(eiCreate) require.NoError(t, err) eip := cltest.CreateExternalInitiatorViaWeb(t, app, string(eiCreateJSON)) eia := &auth.Token{ AccessKey: eip.AccessKey, Secret: eip.Secret, } ei, err := app.Store.FindExternalInitiator(eia) require.NoError(t, err) require.Equal(t, eiCreate["url"], ei.URL.String()) require.Equal(t, strings.ToLower(eiCreate["name"]), ei.Name) require.Equal(t, eip.AccessKey, ei.AccessKey) require.Equal(t, eip.OutgoingSecret, ei.OutgoingSecret) jobSpec := cltest.FixtureCreateJobViaWeb(t, app, "./testdata/external_initiator_job.json") assert.Equal(t, eip.OutgoingToken, exInitr.Header.Get(static.ExternalInitiatorAccessKeyHeader), ) assert.Equal(t, eip.OutgoingSecret, exInitr.Header.Get(static.ExternalInitiatorSecretHeader), ) expected := services.JobSpecNotice{ JobID: jobSpec.ID, Type: models.InitiatorExternal, Params: cltest.JSONFromString(t, `{"foo":"bar"}`), } assert.Equal(t, expected, exInitr.Body) jobRun := cltest.CreateJobRunViaExternalInitiator(t, app, jobSpec, *eia, "") _, err = app.Store.JobRunsFor(jobRun.JobSpecID) assert.NoError(t, err) cltest.WaitForJobRunToComplete(t, app.Store, jobRun) }
explode_data.jsonl/75902
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 809 }
[ 2830, 3393, 52464, 62, 25913, 3803, 36122, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 7000, 3992, 2959, 11, 633, 71, 2959, 11, 8358, 2060, 11571, 55292, 1669, 1185, 1944, 7121, 65390, 11571, 16056, 39076, 90206, 1155, 340, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLockWorks(t *testing.T) { dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) { ip, port, err := c.Port(defaultPort) if err != nil { t.Fatal(err) } addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", ip, port) p := &Mysql{} d, err := p.Open(addr) if err != nil { t.Fatal(err) } dt.Test(t, d, []byte("SELECT 1")) ms := d.(*Mysql) err = ms.Lock() if err != nil { t.Fatal(err) } err = ms.Unlock() if err != nil { t.Fatal(err) } // make sure the 2nd lock works (RELEASE_LOCK is very finicky) err = ms.Lock() if err != nil { t.Fatal(err) } err = ms.Unlock() if err != nil { t.Fatal(err) } }) }
explode_data.jsonl/47863
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 345 }
[ 2830, 3393, 11989, 37683, 1155, 353, 8840, 836, 8, 341, 2698, 74, 8840, 41288, 7957, 2271, 1155, 11, 32247, 11, 2915, 1155, 353, 8840, 836, 11, 272, 40204, 1944, 33672, 1731, 8, 341, 197, 46531, 11, 2635, 11, 1848, 1669, 272, 43013, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestMemHashPathSpec(t *testing.T) { assert := assert.New(t) s := types.String("hello") spec, err := ForPath("mem::#" + mustHash(s.Hash(types.Format_7_18)).String()) assert.NoError(err) defer spec.Close() assert.Equal("mem", spec.Protocol) assert.Equal("", spec.DatabaseName) assert.False(spec.Path.IsEmpty()) // This is a reasonable check but it causes the next GetValue to return nil: // assert.Nil(spec.GetValue()) spec.GetDatabase(context.Background()).WriteValue(context.Background(), s) assert.Equal(s, spec.GetValue(context.Background())) }
explode_data.jsonl/49588
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 196 }
[ 2830, 3393, 18816, 6370, 1820, 8327, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 692, 1903, 1669, 4494, 6431, 445, 14990, 5130, 98100, 11, 1848, 1669, 1752, 1820, 445, 10536, 486, 55543, 488, 1969, 6370, 1141, 15103, 526...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLedgerBackup(t *testing.T) { ledgerid := "TestLedger" originalPath := "/tmp/fabric/ledgertests/kvledger1" restorePath := "/tmp/fabric/ledgertests/kvledger2" viper.Set("ledger.history.enableHistoryDatabase", true) // create and populate a ledger in the original environment env := createTestEnv(t, originalPath) provider := testutilNewProvider(t) bg, gb := testutil.NewBlockGenerator(t, ledgerid, false) gbHash := gb.Header.Hash() ledger, _ := provider.Create(gb) txid := util.GenerateUUID() simulator, _ := ledger.NewTxSimulator(txid) simulator.SetState("ns1", "key1", []byte("value1")) simulator.SetState("ns1", "key2", []byte("value2")) simulator.SetState("ns1", "key3", []byte("value3")) simulator.Done() simRes, _ := simulator.GetTxSimulationResults() pubSimBytes, _ := simRes.GetPubSimulationBytes() block1 := bg.NextBlock([][]byte{pubSimBytes}) ledger.CommitWithPvtData(&lgr.BlockAndPvtData{Block: block1}) txid = util.GenerateUUID() simulator, _ = ledger.NewTxSimulator(txid) simulator.SetState("ns1", "key1", []byte("value4")) simulator.SetState("ns1", "key2", []byte("value5")) simulator.SetState("ns1", "key3", []byte("value6")) simulator.Done() simRes, _ = simulator.GetTxSimulationResults() pubSimBytes, _ = simRes.GetPubSimulationBytes() block2 := bg.NextBlock([][]byte{pubSimBytes}) ledger.CommitWithPvtData(&lgr.BlockAndPvtData{Block: block2}) ledger.Close() provider.Close() // Create restore environment env = createTestEnv(t, restorePath) // remove the statedb, historydb, and block indexes (they are supposed to be auto created during opening of an existing ledger) // and rename the originalPath to restorePath assert.NoError(t, os.RemoveAll(ledgerconfig.GetStateLevelDBPath())) assert.NoError(t, os.RemoveAll(ledgerconfig.GetHistoryLevelDBPath())) assert.NoError(t, os.RemoveAll(filepath.Join(ledgerconfig.GetBlockStorePath(), fsblkstorage.IndexDir))) assert.NoError(t, os.Rename(originalPath, restorePath)) defer env.cleanup() // Instantiate the ledger from restore environment and this should behave exactly as it would have in the original environment provider = testutilNewProvider(t) defer provider.Close() _, err := provider.Create(gb) assert.Equal(t, ErrLedgerIDExists, err) ledger, _ = provider.Open(ledgerid) defer ledger.Close() block1Hash := block1.Header.Hash() block2Hash := block2.Header.Hash() bcInfo, _ := ledger.GetBlockchainInfo() assert.Equal(t, &common.BlockchainInfo{ Height: 3, CurrentBlockHash: block2Hash, PreviousBlockHash: block1Hash, }, bcInfo) b0, _ := ledger.GetBlockByHash(gbHash) assert.True(t, proto.Equal(b0, gb), "proto messages are not equal") b1, _ := ledger.GetBlockByHash(block1Hash) assert.True(t, proto.Equal(b1, block1), "proto messages are not equal") b2, _ := ledger.GetBlockByHash(block2Hash) assert.True(t, proto.Equal(b2, block2), "proto messages are not equal") b0, _ = ledger.GetBlockByNumber(0) assert.True(t, proto.Equal(b0, gb), "proto messages are not equal") b1, _ = ledger.GetBlockByNumber(1) assert.True(t, proto.Equal(b1, block1), "proto messages are not equal") b2, _ = ledger.GetBlockByNumber(2) assert.True(t, proto.Equal(b2, block2), "proto messages are not equal") // get the tran id from the 2nd block, then use it to test GetTransactionByID() txEnvBytes2 := block1.Data.Data[0] txEnv2, err := putils.GetEnvelopeFromBlock(txEnvBytes2) assert.NoError(t, err, "Error upon GetEnvelopeFromBlock") payload2, err := putils.GetPayload(txEnv2) assert.NoError(t, err, "Error upon GetPayload") chdr, err := putils.UnmarshalChannelHeader(payload2.Header.ChannelHeader) assert.NoError(t, err, "Error upon GetChannelHeaderFromBytes") txID2 := chdr.TxId processedTran2, err := ledger.GetTransactionByID(txID2) assert.NoError(t, err, "Error upon GetTransactionByID") // get the tran envelope from the retrieved ProcessedTransaction retrievedTxEnv2 := processedTran2.TransactionEnvelope assert.Equal(t, txEnv2, retrievedTxEnv2) qe, _ := ledger.NewQueryExecutor() value1, _ := qe.GetState("ns1", "key1") assert.Equal(t, []byte("value4"), value1) hqe, err := ledger.NewHistoryQueryExecutor() assert.NoError(t, err) itr, err := hqe.GetHistoryForKey("ns1", "key1") assert.NoError(t, err) defer itr.Close() result1, err := itr.Next() assert.NoError(t, err) assert.Equal(t, []byte("value1"), result1.(*queryresult.KeyModification).Value) result2, err := itr.Next() assert.NoError(t, err) assert.Equal(t, []byte("value4"), result2.(*queryresult.KeyModification).Value) }
explode_data.jsonl/2822
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1658 }
[ 2830, 3393, 60850, 1389, 56245, 1155, 353, 8840, 836, 8, 341, 197, 50704, 307, 1669, 330, 2271, 60850, 1389, 698, 197, 9889, 1820, 1669, 3521, 5173, 6663, 28897, 14, 832, 70, 529, 17966, 14109, 85, 50704, 16, 698, 96027, 1820, 1669, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestOrderCount(t *testing.T) { setup() defer teardown() httpmock.RegisterResponder("GET", fmt.Sprintf("https://fooshop.myshopify.com/%s/orders/count.json", client.pathPrefix), httpmock.NewStringResponder(200, `{"count": 7}`)) params := map[string]string{"created_at_min": "2016-01-01T00:00:00Z"} httpmock.RegisterResponderWithQuery( "GET", fmt.Sprintf("https://fooshop.myshopify.com/%s/orders/count.json", client.pathPrefix), params, httpmock.NewStringResponder(200, `{"count": 2}`)) cnt, err := client.Order.Count(nil) if err != nil { t.Errorf("Order.Count returned error: %v", err) } expected := 7 if cnt != expected { t.Errorf("Order.Count returned %d, expected %d", cnt, expected) } date := time.Date(2016, time.January, 1, 0, 0, 0, 0, time.UTC) cnt, err = client.Order.Count(OrderCountOptions{CreatedAtMin: date}) if err != nil { t.Errorf("Order.Count returned error: %v", err) } expected = 2 if cnt != expected { t.Errorf("Order.Count returned %d, expected %d", cnt, expected) } }
explode_data.jsonl/17989
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 405 }
[ 2830, 3393, 4431, 2507, 1155, 353, 8840, 836, 8, 341, 84571, 741, 16867, 49304, 2822, 28080, 16712, 19983, 30884, 445, 3806, 497, 8879, 17305, 445, 2428, 1110, 824, 9267, 453, 12618, 8675, 1437, 905, 12627, 82, 82818, 81043, 4323, 497, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func Test_StrArray_Basic(t *testing.T) { gtest.C(t, func(t *gtest.T) { expect := []string{"0", "1", "2", "3"} array := garray.NewStrArrayFrom(expect) array2 := garray.NewStrArrayFrom(expect, true) array3 := garray.NewStrArrayFrom([]string{}) t.Assert(array.Slice(), expect) t.Assert(array.Interfaces(), expect) array.Set(0, "100") v, ok := array.Get(0) t.Assert(v, 100) t.Assert(ok, true) t.Assert(array.Search("100"), 0) t.Assert(array.Contains("100"), true) v, ok = array.Remove(0) t.Assert(v, 100) t.Assert(ok, true) v, ok = array.Remove(-1) t.Assert(v, "") t.Assert(ok, false) v, ok = array.Remove(100000) t.Assert(v, "") t.Assert(ok, false) t.Assert(array.Contains("100"), false) array.Append("4") t.Assert(array.Len(), 4) array.InsertBefore(0, "100") array.InsertAfter(0, "200") t.Assert(array.Slice(), []string{"100", "200", "1", "2", "3", "4"}) array.InsertBefore(5, "300") array.InsertAfter(6, "400") t.Assert(array.Slice(), []string{"100", "200", "1", "2", "3", "300", "4", "400"}) t.Assert(array.Clear().Len(), 0) t.Assert(array2.Slice(), expect) t.Assert(array3.Search("100"), -1) }) }
explode_data.jsonl/53080
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 553 }
[ 2830, 3393, 46171, 1857, 1668, 5971, 1155, 353, 8840, 836, 8, 341, 3174, 1944, 727, 1155, 11, 2915, 1155, 353, 82038, 836, 8, 341, 197, 24952, 1669, 3056, 917, 4913, 15, 497, 330, 16, 497, 330, 17, 497, 330, 18, 16707, 197, 11923, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPath(t *testing.T) { books := URL("https://example.com/books") book1 := books.Path("/1", "/2", "3") if books == book1 { t.Errorf("books and books1 should not be equal") } if books.URL != "https://example.com/books" { t.Error("books url is wrong") } want := "https://example.com/books/1/2/3" if book1.URL != want { t.Errorf("Unexpected book URL %s instead of %s", book1.URL, want) } }
explode_data.jsonl/24752
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 163 }
[ 2830, 3393, 1820, 1155, 353, 8840, 836, 8, 341, 197, 12110, 1669, 5548, 445, 2428, 1110, 8687, 905, 71263, 1138, 197, 2190, 16, 1669, 6467, 17474, 4283, 16, 497, 3521, 17, 497, 330, 18, 5130, 743, 6467, 621, 2311, 16, 341, 197, 3244...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestBlockService_Online_Inline(t *testing.T) { cfg := &configuration.Configuration{ Mode: configuration.Online, } mockIndexer := &mocks.Indexer{} servicer := NewBlockAPIService(cfg, mockIndexer) ctx := context.Background() rawBlock := &types.Block{ BlockIdentifier: &types.BlockIdentifier{ Index: 100, Hash: "block 100", }, } transaction := &types.Transaction{ TransactionIdentifier: &types.TransactionIdentifier{ Hash: "tx1", }, } block := &types.Block{ BlockIdentifier: &types.BlockIdentifier{ Index: 100, Hash: "block 100", }, Transactions: []*types.Transaction{ transaction, }, } blockResponse := &types.BlockResponse{ Block: block, } t.Run("nil identifier", func(t *testing.T) { mockIndexer.On( "GetBlockLazy", ctx, (*types.PartialBlockIdentifier)(nil), ).Return( &types.BlockResponse{ Block: rawBlock, OtherTransactions: []*types.TransactionIdentifier{ { Hash: "tx1", }, }, }, nil, ).Once() mockIndexer.On( "GetBlockTransaction", ctx, blockResponse.Block.BlockIdentifier, transaction.TransactionIdentifier, ).Return( transaction, nil, ).Once() b, err := servicer.Block(ctx, &types.BlockRequest{}) assert.Nil(t, err) assert.Equal(t, blockResponse, b) }) t.Run("populated identifier", func(t *testing.T) { pbIdentifier := types.ConstructPartialBlockIdentifier(block.BlockIdentifier) mockIndexer.On( "GetBlockLazy", ctx, pbIdentifier, ).Return( &types.BlockResponse{ Block: rawBlock, OtherTransactions: []*types.TransactionIdentifier{ { Hash: "tx1", }, }, }, nil, ).Once() mockIndexer.On( "GetBlockTransaction", ctx, blockResponse.Block.BlockIdentifier, transaction.TransactionIdentifier, ).Return( transaction, nil, ).Once() b, err := servicer.Block(ctx, &types.BlockRequest{ BlockIdentifier: pbIdentifier, }) assert.Nil(t, err) assert.Equal(t, blockResponse, b) }) mockIndexer.AssertExpectations(t) }
explode_data.jsonl/24696
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 867 }
[ 2830, 3393, 4713, 1860, 62, 19598, 62, 25324, 1155, 353, 8840, 836, 8, 341, 50286, 1669, 609, 21138, 17334, 515, 197, 197, 3636, 25, 6546, 8071, 1056, 345, 197, 532, 77333, 1552, 261, 1669, 609, 16712, 82, 18338, 261, 16094, 1903, 648...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRedis_List(t *testing.T) { runOnRedis(t, func(client *Redis) { _, err := NewRedis(client.Addr, "").Lpush("key", "value1", "value2") assert.NotNil(t, err) val, err := client.Lpush("key", "value1", "value2") assert.Nil(t, err) assert.Equal(t, 2, val) _, err = NewRedis(client.Addr, "").Rpush("key", "value3", "value4") assert.NotNil(t, err) val, err = client.Rpush("key", "value3", "value4") assert.Nil(t, err) assert.Equal(t, 4, val) _, err = NewRedis(client.Addr, "").Llen("key") assert.NotNil(t, err) val, err = client.Llen("key") assert.Nil(t, err) assert.Equal(t, 4, val) vals, err := client.Lrange("key", 0, 10) assert.Nil(t, err) assert.EqualValues(t, []string{"value2", "value1", "value3", "value4"}, vals) _, err = NewRedis(client.Addr, "").Lpop("key") assert.NotNil(t, err) v, err := client.Lpop("key") assert.Nil(t, err) assert.Equal(t, "value2", v) val, err = client.Lpush("key", "value1", "value2") assert.Nil(t, err) assert.Equal(t, 5, val) _, err = NewRedis(client.Addr, "").Rpop("key") assert.NotNil(t, err) v, err = client.Rpop("key") assert.Nil(t, err) assert.Equal(t, "value4", v) val, err = client.Rpush("key", "value4", "value3", "value3") assert.Nil(t, err) assert.Equal(t, 7, val) _, err = NewRedis(client.Addr, "").Lrem("key", 2, "value1") assert.NotNil(t, err) n, err := client.Lrem("key", 2, "value1") assert.Nil(t, err) assert.Equal(t, 2, n) _, err = NewRedis(client.Addr, "").Lrange("key", 0, 10) assert.NotNil(t, err) vals, err = client.Lrange("key", 0, 10) assert.Nil(t, err) assert.EqualValues(t, []string{"value2", "value3", "value4", "value3", "value3"}, vals) n, err = client.Lrem("key", -2, "value3") assert.Nil(t, err) assert.Equal(t, 2, n) vals, err = client.Lrange("key", 0, 10) assert.Nil(t, err) assert.EqualValues(t, []string{"value2", "value3", "value4"}, vals) }) }
explode_data.jsonl/39169
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 913 }
[ 2830, 3393, 48137, 27104, 1155, 353, 8840, 836, 8, 341, 56742, 1925, 48137, 1155, 11, 2915, 12805, 353, 48137, 8, 341, 197, 197, 6878, 1848, 1669, 1532, 48137, 12805, 93626, 11, 35229, 43, 9077, 445, 792, 497, 330, 957, 16, 497, 330, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetHistoricCandlesExtended(t *testing.T) { currencyPair, err := currency.NewPairFromString("BTC-USD") if err != nil { t.Fatal(err) } startTime := time.Unix(1588636800, 0) _, err = o.GetHistoricCandlesExtended(currencyPair, asset.Spot, startTime, time.Now(), kline.OneMin) if err != nil { t.Fatal(err) } _, err = o.GetHistoricCandles(currencyPair, asset.Spot, startTime, time.Now(), kline.Interval(time.Hour*7)) if err == nil { t.Fatal("unexpected result") } }
explode_data.jsonl/30210
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 195 }
[ 2830, 3393, 1949, 48983, 292, 34, 20125, 53190, 1155, 353, 8840, 836, 8, 341, 1444, 5088, 12443, 11, 1848, 1669, 11413, 7121, 12443, 44491, 445, 59118, 12, 26749, 1138, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 532, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestParseRepoRefParams(t *testing.T) { var testCases = []struct { name string wharfGroup string wharfProject string wantAzureOrg string wantAzureProject string wantAzureRepo string }{ { name: "old v1 format", wharfGroup: "Org", wharfProject: "Proj", wantAzureOrg: "Org", wantAzureProject: "Proj", wantAzureRepo: "", }, { name: "new v2 format", wharfGroup: "Org/Proj", wharfProject: "Repo", wantAzureOrg: "Org", wantAzureProject: "Proj", wantAzureRepo: "Repo", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { gotAzureOrg, gotAzureProject, gotAzureRepo := parseRepoRefParams(tc.wharfGroup, tc.wharfProject) assert.Equal(t, tc.wantAzureOrg, gotAzureOrg) assert.Equal(t, tc.wantAzureProject, gotAzureProject) assert.Equal(t, tc.wantAzureRepo, gotAzureRepo) }) } }
explode_data.jsonl/70541
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 480 }
[ 2830, 3393, 14463, 25243, 3945, 4870, 1155, 353, 8840, 836, 8, 341, 2405, 1273, 37302, 284, 3056, 1235, 341, 197, 11609, 1797, 914, 198, 197, 197, 1312, 59226, 2808, 981, 914, 198, 197, 197, 1312, 59226, 7849, 257, 914, 198, 197, 5078...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRunPipeWithoutMainFunc(t *testing.T) { folder := testlib.Mktmp(t) writeMainWithoutMainFunc(t, folder) config := config.Project{ Builds: []config.Build{ { Binary: "no-main", Hooks: config.HookConfig{}, Targets: []string{ runtimeTarget, }, }, }, } ctx := context.New(config) ctx.Git.CurrentTag = "5.6.7" t.Run("empty", func(t *testing.T) { ctx.Config.Builds[0].Main = "" require.EqualError(t, Default.Build(ctx, ctx.Config.Builds[0], api.Options{ Target: runtimeTarget, }), `build for no-main does not contain a main function`) }) t.Run("not main.go", func(t *testing.T) { ctx.Config.Builds[0].Main = "foo.go" require.EqualError(t, Default.Build(ctx, ctx.Config.Builds[0], api.Options{ Target: runtimeTarget, }), `couldn't find main file: stat foo.go: no such file or directory`) }) t.Run("glob", func(t *testing.T) { ctx.Config.Builds[0].Main = "." require.EqualError(t, Default.Build(ctx, ctx.Config.Builds[0], api.Options{ Target: runtimeTarget, }), `build for no-main does not contain a main function`) }) t.Run("fixed main.go", func(t *testing.T) { ctx.Config.Builds[0].Main = "main.go" require.EqualError(t, Default.Build(ctx, ctx.Config.Builds[0], api.Options{ Target: runtimeTarget, }), `build for no-main does not contain a main function`) }) }
explode_data.jsonl/54150
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 562 }
[ 2830, 3393, 6727, 34077, 26040, 6202, 9626, 1155, 353, 8840, 836, 8, 341, 1166, 2018, 1669, 1273, 2740, 1321, 74, 5173, 1155, 340, 24945, 6202, 26040, 6202, 9626, 1155, 11, 8527, 340, 25873, 1669, 2193, 30944, 515, 197, 197, 11066, 82, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIssue15613(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) tk.MustQuery("select sec_to_time(1e-4)").Check(testkit.Rows("00:00:00.000100")) tk.MustQuery("select sec_to_time(1e-5)").Check(testkit.Rows("00:00:00.000010")) tk.MustQuery("select sec_to_time(1e-6)").Check(testkit.Rows("00:00:00.000001")) tk.MustQuery("select sec_to_time(1e-7)").Check(testkit.Rows("00:00:00.000000")) }
explode_data.jsonl/65494
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 199 }
[ 2830, 3393, 42006, 16, 20, 21, 16, 18, 1155, 353, 8840, 836, 8, 341, 57279, 11, 4240, 1669, 1273, 8226, 7251, 11571, 6093, 1155, 340, 16867, 4240, 2822, 3244, 74, 1669, 1273, 8226, 7121, 2271, 7695, 1155, 11, 3553, 340, 3244, 74, 50...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInputService8ProtocolTestTimestampValuesCase1(t *testing.T) { svc := NewInputService8ProtocolTest(nil) svc.Endpoint = "https://test" input := &InputService8TestShapeInputShape{ TimeArg: aws.Time(time.Unix(1422172800, 0)), } req, _ := svc.InputService8TestCaseOperation1Request(input) r := req.HTTPRequest // build request ec2query.Build(req) assert.NoError(t, req.Error) // assert body assert.NotNil(t, r.Body) body, _ := ioutil.ReadAll(r.Body) assert.Equal(t, util.Trim(`Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`), util.Trim(string(body))) // assert URL assert.Equal(t, "https://test/", r.URL.String()) // assert headers }
explode_data.jsonl/53208
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 272 }
[ 2830, 3393, 2505, 1860, 23, 20689, 2271, 20812, 6227, 4207, 16, 1155, 353, 8840, 836, 8, 341, 1903, 7362, 1669, 1532, 2505, 1860, 23, 20689, 2271, 27907, 340, 1903, 7362, 90409, 284, 330, 2428, 1110, 1944, 1837, 22427, 1669, 609, 2505, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRemoveCredential(t *testing.T) { sp := &mockStreamProvider{credentialOfDesiredType: config.SDKKey("")} store := makeMockStore(nil, nil) es := NewEnvStreams([]StreamProvider{sp}, store, 0, ldlog.NewDisabledLoggers()) defer es.Close() sdkKey1, sdkKey2 := config.SDKKey("sdk-key1"), config.SDKKey("sdk-key2") es.AddCredential(sdkKey1) es.AddCredential(sdkKey2) require.Len(t, sp.createdStreams, 2) esp1, esp2 := sp.createdStreams[0], sp.createdStreams[1] assert.Equal(t, sdkKey1, esp1.credential) assert.Equal(t, sdkKey2, esp2.credential) assert.False(t, esp1.closed) assert.False(t, esp2.closed) es.RemoveCredential(sdkKey2) assert.False(t, esp1.closed) assert.True(t, esp2.closed) }
explode_data.jsonl/69846
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 305 }
[ 2830, 3393, 13021, 48265, 1155, 353, 8840, 836, 8, 341, 41378, 1669, 609, 16712, 3027, 5179, 90, 66799, 2124, 4896, 2690, 929, 25, 2193, 46822, 1592, 39047, 630, 57279, 1669, 1281, 11571, 6093, 27907, 11, 2092, 340, 78966, 1669, 1532, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_traps(t *testing.T) { type args struct { height []int } tests := []struct { name string args args want int }{ { "test 1", args{ []int{4, 2, 0, 3, 2, 5}, }, 9, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := trap(tt.args.height); got != tt.want { t.Errorf("traps() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/52733
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 204 }
[ 2830, 3393, 3547, 2625, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 30500, 3056, 396, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 31215, 2827, 198, 197, 50780, 526, 198, 197, 59403, 197, 197,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestBranchProtectionRelease(t *testing.T) { actual := readConfig(t) for _, currentRelease := range releases.GetAllKymaReleases() { relBranch := currentRelease.Branch() t.Run("repository kyma, branch "+relBranch, func(t *testing.T) { p, err := actual.GetBranchProtection("kyma-project", "kyma", relBranch, []config.Presubmit{}) require.NoError(t, err) assert.NotNil(t, p) assert.True(t, *p.Protect) require.NotNil(t, p.RequiredStatusChecks) assert.Contains(t, p.RequiredStatusChecks.Contexts, "license/cla") assert.Contains(t, p.RequiredStatusChecks.Contexts, "tide") }) } }
explode_data.jsonl/46217
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 245 }
[ 2830, 3393, 18197, 78998, 16077, 1155, 353, 8840, 836, 8, 341, 88814, 1669, 1349, 2648, 1155, 692, 2023, 8358, 1482, 16077, 1669, 2088, 19232, 45732, 42, 1600, 64, 693, 28299, 368, 341, 197, 197, 3748, 18197, 1669, 1482, 16077, 97249, 7...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetProxyConn(t *testing.T) { poolOnce = sync.Once{} redisClientOptions := ClientOptions{} redisClent := NewClient( redisClientOptions.WithProxy( func() interface{} { monitorProxyOptions := MonitorProxyOptions{} return NewMonitorProxy( monitorProxyOptions.WithLogger(log.NewLogger()), ) }, ), ) conn := redisClent.GetCtxRedisConn() assert.IsTypef(t, NewMonitorProxy(), conn, "MonitorProxy type") assert.NotNil(t, conn) err := conn.Close() assert.Nil(t, err) err = redisClent.Close() assert.Nil(t, err) }
explode_data.jsonl/59702
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 220 }
[ 2830, 3393, 1949, 16219, 9701, 1155, 353, 8840, 836, 8, 341, 85273, 12522, 284, 12811, 77946, 16094, 197, 21748, 2959, 3798, 1669, 8423, 3798, 16094, 197, 21748, 5066, 306, 1669, 1532, 2959, 1006, 197, 197, 21748, 2959, 3798, 26124, 16219...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUpdateMetric(t *testing.T) { ctx := context.Background() metric := &Metric{ ID: ltesting.UniqueID(testMetricIDPrefix), Description: "DESC", Filter: "FILTER", } // Updating a non-existent metric creates a new one. if err := client.UpdateMetric(ctx, metric); err != nil { t.Fatal(err) } defer client.DeleteMetric(ctx, metric.ID) got, err := client.Metric(ctx, metric.ID) if err != nil { t.Fatal(err) } if want := metric; !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } // Updating an existing metric changes it. metric.Description = "CHANGED" if err := client.UpdateMetric(ctx, metric); err != nil { t.Fatal(err) } got, err = client.Metric(ctx, metric.ID) if err != nil { t.Fatal(err) } if want := metric; !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } }
explode_data.jsonl/25534
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 365 }
[ 2830, 3393, 4289, 54310, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 741, 2109, 16340, 1669, 609, 54310, 515, 197, 29580, 25, 688, 326, 8840, 87443, 915, 8623, 54310, 915, 14335, 1326, 197, 47414, 25, 330, 30910, 756, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestPopulateSecretsAsEnvOnlySSM(t *testing.T) { secret1 := apicontainer.Secret{ Provider: "asm", Name: "secret1", Region: "us-west-2", Type: "MOUNT_POINT", ValueFrom: "arn:aws:secretsmanager:us-west-2:11111:secret:/test/secretName", } secret2 := apicontainer.Secret{ Provider: "asm", Name: "secret2", Region: "us-west-1", ValueFrom: "/test/secretName1", Target: "LOG_DRIVER", } secret3 := apicontainer.Secret{ Provider: "ssm", Name: "secret3", Region: "us-west-2", Type: "ENVIRONMENT_VARIABLE", ValueFrom: "/test/secretName", } container := &apicontainer.Container{ Name: "myName", Image: "image:tag", Secrets: []apicontainer.Secret{secret1, secret2, secret3}, TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), } task := &Task{ Arn: "test", ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), Containers: []*apicontainer.Container{container}, } asmRes := &asmsecret.ASMSecretResource{} asmRes.SetCachedSecretValue(asmSecretKeyWest1, "secretValue1") asmRes.SetCachedSecretValue(secKeyLogDriver, "secretValue2") ssmRes := &ssmsecret.SSMSecretResource{} ssmRes.SetCachedSecretValue(secretKeyWest1, "secretValue3") task.AddResource(ssmsecret.ResourceName, ssmRes) task.AddResource(asmsecret.ResourceName, asmRes) hostConfig := &dockercontainer.HostConfig{} task.PopulateSecrets(hostConfig, container) assert.Equal(t, "secretValue3", container.Environment["secret3"]) assert.Equal(t, 1, len(container.Environment)) }
explode_data.jsonl/37250
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 725 }
[ 2830, 3393, 11598, 6334, 19773, 82, 2121, 14359, 7308, 1220, 44, 1155, 353, 8840, 836, 8, 341, 197, 20474, 16, 1669, 1443, 51160, 1743, 74779, 515, 197, 197, 5179, 25, 220, 330, 10530, 756, 197, 21297, 25, 414, 330, 20474, 16, 756, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMarshal_roundtripFromDoc(t *testing.T) { before := D{ {"foo", "bar"}, {"baz", int64(-27)}, {"bing", A{nil, primitive.Regex{Pattern: "word", Options: "i"}}}, } b, err := Marshal(before) require.NoError(t, err) var after D require.NoError(t, Unmarshal(b, &after)) if !cmp.Equal(after, before) { t.Errorf("Documents to not match. got %v; want %v", after, before) } }
explode_data.jsonl/12831
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 164 }
[ 2830, 3393, 55438, 29896, 32981, 3830, 9550, 1155, 353, 8840, 836, 8, 341, 63234, 1669, 422, 515, 197, 197, 4913, 7975, 497, 330, 2257, 7115, 197, 197, 4913, 42573, 497, 526, 21, 19, 4080, 17, 22, 39781, 197, 197, 4913, 7132, 497, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMuxerDuplicate(t *testing.T) { _, err := MuxerConstructor(func(_ peer.ID, _ peer.ID) mux.Transport { return nil }) if err != nil { t.Fatal(err) } }
explode_data.jsonl/23664
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 70 }
[ 2830, 3393, 44, 2200, 261, 53979, 1155, 353, 8840, 836, 8, 341, 197, 6878, 1848, 1669, 386, 2200, 261, 13288, 18552, 2490, 14397, 9910, 11, 716, 14397, 9910, 8, 59807, 87669, 314, 470, 2092, 2751, 743, 1848, 961, 2092, 341, 197, 3244,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestInjector_Inject(t *testing.T) { // Create our injector injector := NewInjector() // Provided without name. Resolved through the type. If a type is provided again then the old is overwritten. injector.Provide(depA{prefix: "A"}, depB{prefix: "B"}) // Provided with name. Duplicate types allowed. injector.ProvideNamed(depC{prefix: "C"}, "myDepC") injector.ProvideNamed(depD{prefix: "D"}, "myDepD") iNeed := iNeedSomeDeps{} // Inject the deps into the struct which needs it. injector.MustInject(&iNeed) if iNeed.A.WhoAmI() != "A is my name" { t.Fatal("A says something wrong.") } if iNeed.B.WhoAmI() != "B is my name" { t.Fatal("B says something wrong.") } if iNeed.C.WhoAmI() != "C is my name" { t.Fatal("C says something wrong.") } if iNeed.D.WhoAmI() != "D is my name" { t.Fatal("D says something wrong.") } }
explode_data.jsonl/41768
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 332 }
[ 2830, 3393, 61836, 62, 13738, 1155, 353, 8840, 836, 8, 341, 197, 322, 4230, 1039, 62643, 198, 17430, 583, 269, 1669, 1532, 61836, 2822, 197, 322, 53874, 2041, 829, 13, 1800, 8731, 1526, 279, 943, 13, 1416, 264, 943, 374, 3897, 1549, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestClient_testCleanup(t *testing.T) { // Create a temporary dir to store the result file td, err := ioutil.TempDir("", "plugin") if err != nil { t.Fatalf("err: %s", err) } defer os.RemoveAll(td) // Create a path that the helper process will write on cleanup path := filepath.Join(td, "output") // Test the cleanup process := helperProcess("cleanup", path) c := NewClient(&ClientConfig{ Cmd: process, HandshakeConfig: testHandshake, Plugins: testPluginMap, }) // Grab the client so the process starts if _, err := c.Client(); err != nil { c.Kill() t.Fatalf("err: %s", err) } // Kill it gracefully c.Kill() // Test for the file if _, err := os.Stat(path); err != nil { t.Fatalf("err: %s", err) } }
explode_data.jsonl/57834
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 294 }
[ 2830, 3393, 2959, 4452, 67335, 1155, 353, 8840, 836, 8, 341, 197, 322, 4230, 264, 13340, 5419, 311, 3553, 279, 1102, 1034, 198, 76373, 11, 1848, 1669, 43144, 65009, 6184, 19814, 330, 9138, 1138, 743, 1848, 961, 2092, 341, 197, 3244, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestScannerTiming(t *testing.T) { defer leaktest.AfterTest(t)() const count = 3 const runTime = 100 * time.Millisecond const maxError = 7500 * time.Microsecond durations := []time.Duration{ 15 * time.Millisecond, 25 * time.Millisecond, } for i, duration := range durations { testutils.SucceedsSoon(t, func() error { ranges := newTestRangeSet(count, t) q := &testQueue{} mc := hlc.NewManualClock(123) clock := hlc.NewClock(mc.UnixNano, time.Nanosecond) s := newReplicaScanner(makeAmbCtx(), clock, duration, 0, 0, ranges) s.AddQueues(q) stopper := stop.NewStopper() s.Start(stopper) time.Sleep(runTime) stopper.Stop(context.TODO()) avg := s.avgScan() log.Infof(context.Background(), "%d: average scan: %s", i, avg) if avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() || duration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() { return errors.Errorf("expected %s, got %s: exceeds max error of %s", duration, avg, maxError) } return nil }) } }
explode_data.jsonl/78117
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 443 }
[ 2830, 3393, 31002, 62805, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 741, 4777, 1760, 284, 220, 18, 198, 4777, 1598, 1462, 284, 220, 16, 15, 15, 353, 882, 71482, 198, 4777, 1932, 1454, 284, 220, 22, 20, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestStreamWriter3(t *testing.T) { normalModeOpts := DefaultOptions managedModeOpts := DefaultOptions managedModeOpts.managedTxns = true for _, opts := range []*Options{&normalModeOpts, &managedModeOpts} { runBadgerTest(t, opts, func(t *testing.T, db *DB) { // write entries using stream writer noOfKeys := 1000 valueSize := 128 // insert keys which are even value := make([]byte, valueSize) y.Check2(rand.Read(value)) list := &pb.KVList{} counter := 0 for i := 0; i < noOfKeys; i++ { key := make([]byte, 8) binary.BigEndian.PutUint64(key, uint64(counter)) list.Kv = append(list.Kv, &pb.KV{ Key: key, Value: value, Version: 20, }) counter = counter + 2 } sw := db.NewStreamWriter() require.NoError(t, sw.Prepare(), "sw.Prepare() failed") require.NoError(t, sw.Write(list), "sw.Write() failed") // get max version of sw, will be used in transactions for managed mode maxVs := sw.maxVersion require.NoError(t, sw.Flush(), "sw.Flush() failed") // insert keys which are odd val := make([]byte, valueSize) y.Check2(rand.Read(val)) counter = 1 for i := 0; i < noOfKeys; i++ { txn := db.newTransaction(true, opts.managedTxns) if opts.managedTxns { txn.readTs = math.MaxUint64 txn.commitTs = maxVs } keybyte := make([]byte, 8) keyNo := uint64(counter) binary.BigEndian.PutUint64(keybyte, keyNo) require.NoError(t, txn.SetEntry(NewEntry(keybyte, val)), "error while inserting entries") require.NoError(t, txn.Commit(), "error while commit") counter = counter + 2 } // verify while iteration keys are in sorted order err := db.View(func(txn *Txn) error { keysCount := 0 itrOps := DefaultIteratorOptions it := txn.NewIterator(itrOps) defer it.Close() prev := uint64(0) for it.Rewind(); it.Valid(); it.Next() { item := it.Item() key := item.Key() current := binary.BigEndian.Uint64(key) if prev != 0 && current != (prev+uint64(1)) { t.Fatal("keys should be in increasing order") } keysCount++ prev = current } require.True(t, keysCount == 2*noOfKeys, "count of keys is not matching") return nil }) require.Nil(t, err, "error should be nil while iterating") }) } }
explode_data.jsonl/17981
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 995 }
[ 2830, 3393, 93031, 18, 1155, 353, 8840, 836, 8, 341, 197, 8252, 3636, 43451, 1669, 7899, 3798, 198, 197, 25270, 3636, 43451, 1669, 7899, 3798, 198, 197, 25270, 3636, 43451, 99052, 31584, 4412, 284, 830, 271, 2023, 8358, 12185, 1669, 208...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func Test_ORM_CreateManagerService(t *testing.T) { t.Parallel() store, cleanup := cltest.NewStore(t) t.Cleanup(cleanup) orm := feeds.NewORM(store.DB) mgr := &feeds.FeedsManager{ URI: uri, Name: name, PublicKey: publicKey, JobTypes: jobTypes, Network: network, } count, err := orm.CountManagers() require.NoError(t, err) require.Equal(t, int64(0), count) id, err := orm.CreateManager(context.Background(), mgr) require.NoError(t, err) count, err = orm.CountManagers() require.NoError(t, err) require.Equal(t, int64(1), count) assert.NotZero(t, id) }
explode_data.jsonl/71708
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 255 }
[ 2830, 3393, 62, 4365, 34325, 2043, 1860, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 57279, 11, 21290, 1669, 1185, 1944, 7121, 6093, 1155, 340, 3244, 727, 60639, 1337, 60639, 692, 197, 493, 1669, 34396, 7121, 4365, 31200, 225...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPublishVolume(t *testing.T) { // Boilerplate mocking code mockCtrl := gomock.NewController(t) // Set fake values backendUUID := "1234" // Create mocked backend that returns the expected object mockBackend := mockstorage.NewMockBackend(mockCtrl) mockBackend.EXPECT().PublishVolume(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(nil) // Create a mocked persistent store client mockStoreClient := mockpersistentstore.NewMockStoreClient(mockCtrl) // Set the store client behavior we don't care about for this testcase mockStoreClient.EXPECT().GetVolumeTransactions(gomock.Any()).Return([]*storage.VolumeTransaction{}, nil).AnyTimes() mockStoreClient.EXPECT().UpdateVolume(gomock.Any(), gomock.Any()).Times(1).Return(nil) // Create an instance of the orchestrator orchestrator := getOrchestrator(t) orchestrator.storeClient = mockStoreClient // Add the mocked backend to the orchestrator orchestrator.backends[backendUUID] = mockBackend volConfig := tu.GenerateVolumeConfig("fake-volume", 1, "fast", config.File) orchestrator.volumes["fake-volume"] = &storage.Volume{BackendUUID: backendUUID, Config: volConfig} // Run the test err := orchestrator.PublishVolume(ctx(), "fake-volume", &utils.VolumePublishInfo{}) // Verify the results assert.Nilf(t, err, "Error publishing volume; %v", err) }
explode_data.jsonl/62767
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 431 }
[ 2830, 3393, 50145, 18902, 1155, 353, 8840, 836, 8, 341, 197, 322, 45665, 1750, 66483, 2038, 198, 77333, 15001, 1669, 342, 316, 1176, 7121, 2051, 1155, 692, 197, 322, 2573, 12418, 2750, 198, 197, 20942, 24754, 1669, 330, 16, 17, 18, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWrongMSPID(t *testing.T) { localConfig := proto.Clone(insecureConfig).(*cb.Config) policyName := "foo" localConfig.ChannelGroup.Groups[channelconfig.OrdererGroupKey].Policies[policyName] = &cb.ConfigPolicy{ Policy: &cb.Policy{ Type: int32(cb.Policy_SIGNATURE), Value: utils.MarshalOrPanic(cauthdsl.SignedByMspAdmin("MissingOrg")), }, } result, err := Check(localConfig) assert.NoError(t, err, "Simple empty config") assert.Empty(t, result.GeneralErrors) assert.Empty(t, result.ElementErrors) assert.Len(t, result.ElementWarnings, 1) assert.Equal(t, ".groups."+channelconfig.OrdererGroupKey+".policies."+policyName, result.ElementWarnings[0].Path) }
explode_data.jsonl/4591
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 255 }
[ 2830, 3393, 29185, 44, 4592, 915, 1155, 353, 8840, 836, 8, 341, 8854, 2648, 1669, 18433, 64463, 5900, 25132, 2648, 568, 4071, 7221, 10753, 340, 3223, 8018, 675, 1669, 330, 7975, 698, 8854, 2648, 38716, 2808, 59800, 69324, 1676, 19664, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSchemaParser_SimpleFieldWithListArg(t *testing.T) { body := ` type Hello { world(things: [String]): String }` astDoc := parse(t, body) expected := &ast.Document{ Loc: testLoc(1, 49), Definitions: []ast.Node{ &ast.ObjectDefinition{ Loc: testLoc(1, 49), Name: &ast.Name{ Value: "Hello", Loc: testLoc(6, 11), }, Fields: []*ast.FieldDefinition{ { Loc: testLoc(16, 47), Name: &ast.Name{ Value: "world", Loc: testLoc(16, 21), }, Arguments: []*ast.InputValueDefinition{ { Loc: testLoc(22, 38), Name: &ast.Name{ Value: "things", Loc: testLoc(22, 28), }, Type: &ast.List{ Loc: testLoc(30, 38), Type: &ast.Named{ Loc: testLoc(31, 37), Name: &ast.Name{ Value: "String", Loc: testLoc(31, 37), }, }, }, DefaultValue: nil, }, }, Type: &ast.Named{ Loc: testLoc(41, 47), Name: &ast.Name{ Value: "String", Loc: testLoc(41, 47), }, }, }, }, }, }, } if !reflect.DeepEqual(astDoc, expected) { t.Fatalf("unexpected document, expected: %v, got: %v", expected, astDoc) } }
explode_data.jsonl/51227
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 720 }
[ 2830, 3393, 8632, 6570, 1098, 6456, 1877, 2354, 852, 2735, 1155, 353, 8840, 836, 8, 341, 35402, 1669, 22074, 1313, 21927, 341, 220, 1879, 24365, 819, 25, 508, 703, 24320, 923, 198, 31257, 88836, 9550, 1669, 4715, 1155, 11, 2487, 340, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestFunctionsWithAliases(t *testing.T) { cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore()) require.NoError(t, err) scc := &SCC{ BuiltinSCCs: map[string]struct{}{"lscc": {}}, Support: &MockSupport{}, ACLProvider: mockAclProvider, GetMSPIDs: getMSPIDs, BCCSP: cryptoProvider, BuildRegistry: &container.BuildRegistry{}, ChaincodeBuilder: &mock.ChaincodeBuilder{}, } stub := shimtest.NewMockStub("lscc", scc) res := stub.MockInit("1", nil) require.Equal(t, int32(shim.OK), res.Status, res.Message) identityDeserializer := &policymocks.MockIdentityDeserializer{Identity: []byte("Alice"), Msg: []byte("msg1")} policyManagerGetter := &policymocks.MockChannelPolicyManagerGetter{ Managers: map[string]policies.Manager{ "test": &policymocks.MockChannelPolicyManager{MockPolicy: &policymocks.MockPolicy{Deserializer: identityDeserializer}}, }, } scc.PolicyChecker = policy.NewPolicyChecker( policyManagerGetter, identityDeserializer, &policymocks.MockMSPPrincipalGetter{Principal: []byte("Alice")}, ) sProp, _ := protoutil.MockSignedEndorserProposalOrPanic("", &pb.ChaincodeSpec{}, []byte("Alice"), []byte("msg1")) identityDeserializer.Msg = sProp.ProposalBytes sProp.Signature = sProp.ProposalBytes testInvoke := func(function, resource string) { t.Run(function, func(t *testing.T) { res = stub.MockInvokeWithSignedProposal("1", [][]byte{[]byte(function), []byte("testchannel1")}, nil) require.NotEqual(t, int32(shim.OK), res.Status) require.Equal(t, "invalid number of arguments to lscc: 2", res.Message) mockAclProvider.Reset() mockAclProvider.On("CheckACL", resource, "testchannel1", sProp).Return(errors.New("bonanza")) res = stub.MockInvokeWithSignedProposal("1", [][]byte{[]byte(function), []byte("testchannel1"), []byte("chaincode")}, sProp) require.NotEqual(t, int32(shim.OK), res.Status, res.Message) require.Equal(t, fmt.Sprintf("access denied for [%s][testchannel1]: bonanza", function), res.Message) mockAclProvider.Reset() mockAclProvider.On("CheckACL", resource, "testchannel1", sProp).Return(nil) res = stub.MockInvokeWithSignedProposal("1", [][]byte{[]byte(function), []byte("testchannel1"), []byte("nonexistentchaincode")}, sProp) require.NotEqual(t, int32(shim.OK), res.Status, res.Message) require.Equal(t, res.Message, "could not find chaincode with name 'nonexistentchaincode'") }) } testInvoke("getid", "lscc/ChaincodeExists") testInvoke("ChaincodeExists", "lscc/ChaincodeExists") testInvoke("getdepspec", "lscc/GetDeploymentSpec") testInvoke("GetDeploymentSpec", "lscc/GetDeploymentSpec") testInvoke("getccdata", "lscc/GetChaincodeData") testInvoke("GetChaincodeData", "lscc/GetChaincodeData") }
explode_data.jsonl/11792
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1086 }
[ 2830, 3393, 5152, 16056, 95209, 1155, 353, 8840, 836, 8, 341, 1444, 9444, 5179, 11, 1848, 1669, 2021, 7121, 3675, 15352, 4449, 2354, 6608, 63373, 58902, 7121, 43344, 1592, 6093, 2398, 17957, 35699, 1155, 11, 1848, 340, 1903, 638, 1669, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLeftSplit(t *testing.T) { left, right := splitData(collegeData, 0, 25) for _, point := range left { if point.Values[0] >= 25 { t.Fail() } if len(left) != 3 { t.Fail() } } for _, point := range right { if point.Values[0] < 25 { t.Fail() } if len(right) != 7 { t.Fail() } } }
explode_data.jsonl/10540
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 152 }
[ 2830, 3393, 5415, 20193, 1155, 353, 8840, 836, 8, 341, 35257, 11, 1290, 1669, 6718, 1043, 19611, 4757, 1043, 11, 220, 15, 11, 220, 17, 20, 340, 2023, 8358, 1459, 1669, 2088, 2115, 341, 197, 743, 1459, 35145, 58, 15, 60, 2604, 220, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestGenerateCaptchaJWT(t *testing.T) { g := initTestGoogleCaptcha(t) tests := []struct { name string testFunc func() }{ { name: "test normal generation", testFunc: func() { w := httptest.NewRecorder() err := g.GenerateCaptchaJWT(w) require.Nil(t, err) resp := w.Result() defer resp.Body.Close() jwtToken := resp.Header.Get(testCaptchaHeader) require.NotEmpty(t, jwtToken) t.Logf("jwtToken %s", jwtToken) }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { tt.testFunc() }) } }
explode_data.jsonl/60457
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 277 }
[ 2830, 3393, 31115, 34, 25431, 55172, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 2930, 2271, 14444, 34, 25431, 1155, 340, 78216, 1669, 3056, 1235, 341, 197, 11609, 257, 914, 198, 197, 18185, 9626, 2915, 741, 197, 59403, 197, 197, 515, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMapGetNextID(t *testing.T) { testutils.SkipOnOldKernel(t, "4.13", "bpf_map_get_next_id") var next MapID var err error hash := createHash() defer hash.Close() if next, err = MapGetNextID(MapID(0)); err != nil { t.Fatal("Can't get next ID:", err) } if next == MapID(0) { t.Fatal("Expected next ID other than 0") } // As there can be multiple eBPF maps, we loop over all of them and // make sure, the IDs increase and the last call will return ErrNotExist for { last := next if next, err = MapGetNextID(last); err != nil { if !errors.Is(err, ErrNotExist) { t.Fatal("Expected ErrNotExist, got:", err) } break } if next <= last { t.Fatalf("Expected next ID (%d) to be higher than the last ID (%d)", next, last) } } }
explode_data.jsonl/21676
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 310 }
[ 2830, 3393, 2227, 1949, 5847, 915, 1155, 353, 8840, 836, 8, 341, 18185, 6031, 57776, 1925, 18284, 26343, 1155, 11, 330, 19, 13, 16, 18, 497, 330, 65, 15897, 5376, 3062, 11257, 842, 1138, 2405, 1790, 5027, 915, 198, 2405, 1848, 1465, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestTHeaderProtocolIDPtrMustPanic(t *testing.T) { var invalidProtoID = THeaderProtocolID(-1) if invalidProtoID.Validate() == nil { t.Fatalf("Expected %v to be an invalid THeaderProtocolID, it passes the validation", invalidProtoID) } defer func() { if recovered := recover(); recovered == nil { t.Error("Expected panic on invalid proto id, did not happen.") } }() THeaderProtocolIDPtrMust(invalidProtoID) }
explode_data.jsonl/9729
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 153 }
[ 2830, 3393, 51, 4047, 20689, 915, 5348, 31776, 47, 31270, 1155, 353, 8840, 836, 8, 341, 2405, 8318, 31549, 915, 284, 350, 4047, 20689, 915, 4080, 16, 340, 743, 8318, 31549, 915, 47667, 368, 621, 2092, 341, 197, 3244, 30762, 445, 18896...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMysqlConnLimit(t *testing.T) { requireMysqlVars(t) cfg := mysql.Config{ User: *mysqlUser, Passwd: *mysqlPass, DBName: *mysqlDb, AllowNativePasswords: true, } proxyConnLimitTest(t, *mysqlConnName, "mysql", cfg.FormatDSN(), mysqlPort) }
explode_data.jsonl/47452
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 154 }
[ 2830, 3393, 44, 14869, 9701, 16527, 1155, 353, 8840, 836, 8, 341, 17957, 44, 14869, 28305, 1155, 340, 50286, 1669, 10564, 10753, 515, 197, 31672, 25, 338, 353, 12272, 1474, 345, 197, 10025, 395, 6377, 25, 2290, 353, 12272, 12187, 345, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestApplicationRegistryJobPostsubmit(t *testing.T) { // WHEN jobConfig, err := tester.ReadJobConfig("./../../../../prow/jobs/kyma/components/application-registry/application-registry.yaml") // THEN require.NoError(t, err) assert.Len(t, jobConfig.Postsubmits, 1) kymaPost, ex := jobConfig.Postsubmits["kyma-project/kyma"] assert.True(t, ex) assert.Len(t, kymaPost, 1) actualPost := kymaPost[0] expName := "kyma-components-application-registry" assert.Equal(t, expName, actualPost.Name) assert.Equal(t, []string{"master"}, actualPost.Branches) assert.Equal(t, 10, actualPost.MaxConcurrency) assert.True(t, actualPost.Decorate) assert.Equal(t, "github.com/kyma-project/kyma", actualPost.PathAlias) tester.AssertThatHasExtraRefTestInfra(t, actualPost.JobBase.UtilityConfig) tester.AssertThatHasPresets(t, actualPost.JobBase, tester.PresetDindEnabled, tester.PresetDockerPushRepo, tester.PresetGcrPush, tester.PresetBuildMaster) assert.Equal(t, "^components/application-registry/", actualPost.RunIfChanged) assert.Equal(t, tester.ImageGolangBuildpackLatest, actualPost.Spec.Containers[0].Image) assert.Equal(t, []string{"/home/prow/go/src/github.com/kyma-project/test-infra/prow/scripts/build.sh"}, actualPost.Spec.Containers[0].Command) assert.Equal(t, []string{"/home/prow/go/src/github.com/kyma-project/kyma/components/application-registry"}, actualPost.Spec.Containers[0].Args) }
explode_data.jsonl/39032
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 526 }
[ 2830, 3393, 4988, 15603, 12245, 4133, 5955, 1155, 353, 8840, 836, 8, 341, 197, 322, 33633, 198, 68577, 2648, 11, 1848, 1669, 37111, 6503, 12245, 2648, 13988, 84257, 79, 651, 4437, 5481, 14109, 1600, 64, 20261, 33032, 12, 29172, 33032, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCustomerManager(t *testing.T) { const ( //fill your data here sk = "" cc = "" someAvailableUsername = "" ) //and here var ( testingCustomer = &CustomerRequest{ CompanyName: "", Username: "", Password: "", } ctx = context.Background() ) cli := NewClient(common.NewClient(sk, cc, "", nil, nil)) t.Run("test get customers", func(t *testing.T) { resp, err := cli.GetCustomers(ctx, map[string]string{}) if err != nil { t.Error(err) return } t.Log(resp) }) //works t.Run("test post customer", func(t *testing.T) { params := map[string]string{ "companyName": testingCustomer.CompanyName, } params["username"] = testingCustomer.Username params["password"] = testingCustomer.Password report, err := cli.SaveCustomer(ctx, params) if err != nil { t.Error(err) return } t.Log(report) }) t.Run("test verifyCustomerUser", func(t *testing.T) { isAvailable, err := cli.VerifyCustomerUser(ctx, testingCustomer.Username, testingCustomer.Password) if err != nil { t.Error(err) return } t.Log(isAvailable) }) t.Run("test validation of the username", func(t *testing.T) { isAvailable, err := cli.ValidateCustomerUsername(ctx, someAvailableUsername) if err != nil { t.Error(err) return } t.Log(isAvailable) }) }
explode_data.jsonl/66214
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 565 }
[ 2830, 3393, 12792, 2043, 1155, 353, 8840, 836, 8, 341, 4777, 2399, 197, 197, 322, 7559, 697, 821, 1588, 198, 197, 1903, 74, 503, 284, 8389, 197, 63517, 503, 284, 8389, 197, 1903, 635, 16485, 11115, 284, 8389, 197, 340, 197, 322, 437...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestRTCPReceiverReorderedPackets(t *testing.T) { v := uint32(0x65f83afb) rr := New(&v, 90000) srPkt := rtcp.SenderReport{ SSRC: 0xba9da416, NTPTime: 0xe363887a17ced916, RTPTime: 1287981738, PacketCount: 714, OctetCount: 859127, } ts := time.Date(2008, 0o5, 20, 22, 15, 20, 0, time.UTC) rr.ProcessPacketRTCP(ts, &srPkt) rtpPkt := rtp.Packet{ Header: rtp.Header{ Version: 2, Marker: true, PayloadType: 96, SequenceNumber: 0x43a7, Timestamp: 0xafb45733, SSRC: 0xba9da416, }, Payload: []byte("\x00\x00"), } ts = time.Date(2008, 0o5, 20, 22, 15, 20, 0, time.UTC) rr.ProcessPacketRTP(ts, &rtpPkt) rtpPkt = rtp.Packet{ Header: rtp.Header{ Version: 2, Marker: true, PayloadType: 96, SequenceNumber: 0x43a6, Timestamp: 0xafb45733, SSRC: 0xba9da416, }, Payload: []byte("\x00\x00"), } ts = time.Date(2008, 0o5, 20, 22, 15, 20, 0, time.UTC) rr.ProcessPacketRTP(ts, &rtpPkt) expectedPkt := rtcp.ReceiverReport{ SSRC: 0x65f83afb, Reports: []rtcp.ReceptionReport{ { SSRC: 0xba9da416, LastSequenceNumber: 0x43a7, LastSenderReport: 0x887a17ce, Delay: 1 * 65536, }, }, } ts = time.Date(2008, 0o5, 20, 22, 15, 21, 0, time.UTC) require.Equal(t, &expectedPkt, rr.Report(ts)) }
explode_data.jsonl/72793
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 774 }
[ 2830, 3393, 5350, 7123, 25436, 693, 10544, 47, 18382, 1155, 353, 8840, 836, 8, 341, 5195, 1669, 2622, 18, 17, 7, 15, 87, 21, 20, 69, 23, 18, 96834, 340, 197, 634, 1669, 1532, 2099, 85, 11, 220, 24, 15, 15, 15, 15, 692, 1903, 8...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDockerLogger_Success(t *testing.T) { ctu.DockerCompatible(t) t.Parallel() require := require.New(t) containerImage, containerImageName, containerImageTag := testContainerDetails() client, err := docker.NewClientFromEnv() if err != nil { t.Skip("docker unavailable:", err) } if img, err := client.InspectImage(containerImage); err != nil || img == nil { t.Log("image not found locally, downloading...") err = client.PullImage(docker.PullImageOptions{ Repository: containerImageName, Tag: containerImageTag, }, docker.AuthConfiguration{}) require.NoError(err, "failed to pull image") } containerConf := docker.CreateContainerOptions{ Config: &docker.Config{ Cmd: []string{ "sh", "-c", "touch ~/docklog; tail -f ~/docklog", }, Image: containerImage, }, Context: context.Background(), } container, err := client.CreateContainer(containerConf) require.NoError(err) defer client.RemoveContainer(docker.RemoveContainerOptions{ ID: container.ID, Force: true, }) err = client.StartContainer(container.ID, nil) require.NoError(err) testutil.WaitForResult(func() (bool, error) { container, err = client.InspectContainer(container.ID) if err != nil { return false, err } if !container.State.Running { return false, fmt.Errorf("container not running") } return true, nil }, func(err error) { require.NoError(err) }) stdout := &noopCloser{bytes.NewBuffer(nil)} stderr := &noopCloser{bytes.NewBuffer(nil)} dl := NewDockerLogger(testlog.HCLogger(t)).(*dockerLogger) dl.stdout = stdout dl.stderr = stderr require.NoError(dl.Start(&StartOpts{ ContainerID: container.ID, })) echoToContainer(t, client, container.ID, "abc") echoToContainer(t, client, container.ID, "123") testutil.WaitForResult(func() (bool, error) { act := stdout.String() if "abc\n123\n" != act { return false, fmt.Errorf("expected abc\\n123\\n for stdout but got %s", act) } return true, nil }, func(err error) { require.NoError(err) }) }
explode_data.jsonl/76333
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 747 }
[ 2830, 3393, 35, 13659, 7395, 87161, 1155, 353, 8840, 836, 8, 341, 89216, 84, 909, 13659, 29161, 1155, 692, 3244, 41288, 7957, 741, 17957, 1669, 1373, 7121, 1155, 692, 53290, 1906, 11, 5476, 1906, 675, 11, 5476, 1906, 5668, 1669, 1273, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestDefaultClientset(t *testing.T) { c, err := New(zap.NewNop(), k8sconfig.APIConfig{}, ExtractionRules{}, Filters{}, []Association{}, Excludes{}, nil, nil, nil) assert.Error(t, err) assert.Equal(t, "invalid authType for kubernetes: ", err.Error()) assert.Nil(t, c) c, err = New(zap.NewNop(), k8sconfig.APIConfig{}, ExtractionRules{}, Filters{}, []Association{}, Excludes{}, newFakeAPIClientset, nil, nil) assert.NoError(t, err) assert.NotNil(t, c) }
explode_data.jsonl/56837
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 191 }
[ 2830, 3393, 3675, 2959, 746, 1155, 353, 8840, 836, 8, 341, 1444, 11, 1848, 1669, 1532, 13174, 391, 7121, 45, 453, 1507, 595, 23, 82, 1676, 24922, 2648, 22655, 94506, 26008, 22655, 45012, 22655, 3056, 63461, 22655, 1374, 7396, 22655, 209...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReconcile_CloudEvents(t *testing.T) { names.TestingSeed() prs := []*v1beta1.PipelineRun{ parse.MustParsePipelineRun(t, ` metadata: name: test-pipelinerun namespace: foo selfLink: /pipeline/1234 spec: pipelineRef: name: test-pipeline `), } ps := []*v1beta1.Pipeline{ parse.MustParsePipeline(t, ` metadata: name: test-pipeline namespace: foo spec: tasks: - name: test-1 taskRef: name: test-task `), } ts := []*v1beta1.Task{ parse.MustParseTask(t, ` metadata: name: test-task namespace: foo spec: steps: - name: simple-step image: foo command: ["/mycmd"] env: - name: foo value: bar `), } cms := []*corev1.ConfigMap{ { ObjectMeta: metav1.ObjectMeta{Name: config.GetDefaultsConfigName(), Namespace: system.Namespace()}, Data: map[string]string{ "default-cloud-events-sink": "http://synk:8080", }, }, } t.Logf("config maps: %s", cms) d := test.Data{ PipelineRuns: prs, Pipelines: ps, Tasks: ts, ConfigMaps: cms, } prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ "Normal Started", "Normal Running Tasks Completed: 0", } reconciledRun, clients := prt.reconcileRun("foo", "test-pipelinerun", wantEvents, false) // This PipelineRun is in progress now and the status should reflect that condition := reconciledRun.Status.GetCondition(apis.ConditionSucceeded) if condition == nil || condition.Status != corev1.ConditionUnknown { t.Errorf("Expected PipelineRun status to be in progress, but was %v", condition) } if condition != nil && condition.Reason != v1beta1.PipelineRunReasonRunning.String() { t.Errorf("Expected reason %q but was %s", v1beta1.PipelineRunReasonRunning.String(), condition.Reason) } verifyTaskRunStatusesCount(t, cms[0].Data[embeddedStatusFeatureFlag], reconciledRun.Status, 1) wantCloudEvents := []string{ `(?s)dev.tekton.event.pipelinerun.started.v1.*test-pipelinerun`, `(?s)dev.tekton.event.pipelinerun.running.v1.*test-pipelinerun`, } ceClient := clients.CloudEvents.(cloudevent.FakeClient) err := eventstest.CheckEventsUnordered(t, ceClient.Events, "reconcile-cloud-events", wantCloudEvents) if !(err == nil) { t.Errorf(err.Error()) } }
explode_data.jsonl/27327
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 922 }
[ 2830, 3393, 693, 40446, 457, 920, 52178, 7900, 1155, 353, 8840, 836, 8, 341, 93940, 8787, 287, 41471, 2822, 25653, 82, 1669, 29838, 85, 16, 19127, 16, 1069, 8790, 6727, 515, 197, 75115, 50463, 14463, 34656, 6727, 1155, 11, 22074, 17637,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestCancelParallelQuery(t *testing.T) { defer leaktest.AfterTest(t)() const queryToBlock = "INSERT INTO nums VALUES (1) RETURNING NOTHING;" const queryToCancel = "INSERT INTO nums2 VALUES (2) RETURNING NOTHING;" const sqlToRun = "BEGIN TRANSACTION; " + queryToBlock + queryToCancel + " COMMIT;" // conn1 is used for the txn above. conn2 is solely for the CANCEL statement. var conn1 *gosql.DB var conn2 *gosql.DB // Up to two goroutines could generate errors (one for each query). errChan := make(chan error, 1) errChan2 := make(chan error, 1) sem := make(chan struct{}) tc := serverutils.StartTestCluster(t, 2, /* numNodes */ base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{ UseDatabase: "test", Knobs: base.TestingKnobs{ SQLExecutor: &sql.ExecutorTestingKnobs{ BeforeExecute: func(ctx context.Context, stmt string, _ /* isParallel */ bool) { // if queryToBlock if strings.Contains(stmt, "(1)") { // Block start of execution until queryToCancel has been canceled <-sem } }, AfterExecute: func(ctx context.Context, stmt string, err error) { // if queryToBlock if strings.Contains(stmt, "(1)") { // Ensure queryToBlock errored out with the cancellation error. if err == nil { errChan <- errors.New("didn't get an error from query that should have been indirectly canceled") } else if !sqlbase.IsQueryCanceledError(err) { errChan <- err } close(errChan) } else if strings.Contains(stmt, "(2)") { // if queryToCancel // This query should have finished successfully; if not, // report that error. if err != nil { errChan2 <- err } // Cancel this query, even though it has already completed execution. // The other query (queryToBlock) should return a cancellation error. const cancelQuery = "CANCEL QUERIES SELECT query_id FROM [SHOW CLUSTER QUERIES] WHERE node_id = 1 AND query LIKE '%INSERT INTO nums2 VALUES (2%'" if _, err := conn2.Exec(cancelQuery); err != nil { errChan2 <- err } close(errChan2) // Unblock queryToBlock sem <- struct{}{} close(sem) } }, }, }, }, }) defer tc.Stopper().Stop(context.TODO()) conn1 = tc.ServerConn(0) conn2 = tc.ServerConn(1) sqlutils.CreateTable(t, conn1, "nums", "num INT", 0, nil) sqlutils.CreateTable(t, conn1, "nums2", "num INT", 0, nil) // Start the txn. Both queries should run in parallel - and queryToBlock // should error out. _, err := conn1.Exec(sqlToRun) if err != nil && !isClientsideQueryCanceledErr(err) { t.Fatal(err) } else if err == nil { t.Fatal("didn't get an error from txn that should have been canceled") } // Ensure both channels are closed. if err := <-errChan2; err != nil { t.Fatal(err) } if err := <-errChan; err != nil { t.Fatal(err) } }
explode_data.jsonl/54859
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1242 }
[ 2830, 3393, 9269, 16547, 2859, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 741, 4777, 3239, 1249, 4713, 284, 330, 12698, 12496, 10307, 14710, 320, 16, 8, 30880, 1718, 86081, 41655, 4777, 3239, 1249, 9269, 284...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestEDSServiceResolutionUpdate(t *testing.T) { s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{}) addEdsCluster(s, "edsdns.svc.cluster.local", "http", "10.0.0.53", 8080) addEdsCluster(s, "other.local", "http", "1.1.1.1", 8080) adscConn := s.Connect(nil, nil, watchAll) // Validate that endpoints are pushed correctly. testEndpoints("10.0.0.53", "outbound|8080||edsdns.svc.cluster.local", adscConn, t) // Now update the service resolution to DNSLB with a DNS endpoint. updateServiceResolution(s) if _, err := adscConn.Wait(5*time.Second, v3.EndpointType); err != nil { t.Fatal(err) } // Validate that endpoints are skipped. lbe := adscConn.GetEndpoints()["outbound|8080||edsdns.svc.cluster.local"] if lbe != nil && len(lbe.Endpoints) > 0 { t.Fatalf("endpoints not expected for %s, but got %v", "edsdns.svc.cluster.local", adscConn.EndpointsJSON()) } }
explode_data.jsonl/7951
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 354 }
[ 2830, 3393, 1479, 1220, 1017, 38106, 4289, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 856, 5356, 7121, 52317, 67400, 5475, 1155, 11, 856, 5356, 991, 726, 3798, 37790, 12718, 2715, 82, 28678, 1141, 11, 330, 6767, 45226, 514, 7362, 40501, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestRobotStartAutoRun(t *testing.T) { adaptor1 := newTestAdaptor("Connection1", "/dev/null") driver1 := newTestDriver(adaptor1, "Device1", "0") //work := func() {} r := NewRobot("autorun", []Connection{adaptor1}, []Device{driver1}, //work, ) go func() { gobottest.Assert(t, r.Start(), nil) }() time.Sleep(10 * time.Millisecond) gobottest.Assert(t, r.Running(), true) // stop it gobottest.Assert(t, r.Stop(), nil) gobottest.Assert(t, r.Running(), false) }
explode_data.jsonl/24445
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 198 }
[ 2830, 3393, 43374, 3479, 13253, 6727, 1155, 353, 8840, 836, 8, 341, 98780, 32657, 16, 1669, 501, 2271, 2589, 32657, 445, 4526, 16, 497, 3521, 3583, 19293, 1138, 33652, 16, 1669, 501, 2271, 11349, 43779, 32657, 16, 11, 330, 6985, 16, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAssumeRole(t *testing.T) { creds := &sts.Credentials{ AccessKeyId: aws.String("foo"), SecretAccessKey: aws.String("bar"), SessionToken: aws.String("my-token"), } cases := []struct { name string ambient bool role string expErr bool expCreds *sts.Credentials expRegion string key string secret string region string mockSTS *mockSTS }{ { name: "should assume role w/ ambient creds", role: "my-role", key: "key", secret: "secret", region: "", ambient: true, expErr: false, expCreds: creds, expRegion: "", mockSTS: &mockSTS{ AssumeRoleFn: func(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { return &sts.AssumeRoleOutput{ Credentials: creds, }, nil }, }, }, { name: "should assume role w/o ambient", ambient: false, role: "my-role", key: "key", secret: "secret", region: "eu-central-1", expErr: false, expCreds: creds, mockSTS: &mockSTS{ AssumeRoleFn: func(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { return &sts.AssumeRoleOutput{ Credentials: creds, }, nil }, }, }, { name: "no role set: do NOT assume role and use provided credentials", ambient: true, role: "", key: "my-explicit-key", secret: "my-explicit-secret", region: "eu-central-1", expErr: false, expCreds: &sts.Credentials{ AccessKeyId: aws.String("my-explicit-key"), // from <key> above SecretAccessKey: aws.String("my-explicit-secret"), // from <secret> above }, mockSTS: &mockSTS{ AssumeRoleFn: func(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { return &sts.AssumeRoleOutput{ Credentials: creds, }, nil }, }, }, { // AssumeRole() error should be forwarded by provider name: "error assuming role w/ ambient", ambient: true, role: "my-role", key: "key", secret: "secret", region: "eu-central-1", expErr: true, expCreds: nil, mockSTS: &mockSTS{ AssumeRoleFn: func(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { return nil, fmt.Errorf("error assuming mock role") }, }, }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { provider, err := makeMockSessionProvider(func(sess *session.Session) stsiface.STSAPI { return c.mockSTS }, c.key, c.secret, c.region, c.role, c.ambient) assert.NoError(t, err) sess, err := provider.GetSession() if c.expErr { assert.NotNil(t, err) } else { sessCreds, _ := sess.Config.Credentials.Get() assert.Equal(t, c.mockSTS.assumedRole, c.role) assert.Equal(t, *c.expCreds.SecretAccessKey, sessCreds.SecretAccessKey) assert.Equal(t, *c.expCreds.AccessKeyId, sessCreds.AccessKeyID) assert.Equal(t, c.region, *sess.Config.Region) } }) } }
explode_data.jsonl/65006
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1439 }
[ 2830, 3393, 5615, 3885, 9030, 1155, 353, 8840, 836, 8, 341, 197, 85734, 1669, 609, 36279, 727, 15735, 515, 197, 197, 6054, 81343, 25, 257, 31521, 6431, 445, 7975, 4461, 197, 7568, 50856, 6054, 1592, 25, 31521, 6431, 445, 2257, 4461, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSubOperationExecution(t *testing.T) { // ARRANGE cdata := map[uint64]map[string][]byte{ subOperationRootID: {"subOperationRootID": []byte(`{"title":"title_1","content":"hello world"}`)}, } inContainers := []*Container{{Data: cdata[subOperationRootID], ID: subOperationRootID}} _, runner, output, input, tracker := build(t, inContainers, SwitcherStateOn, SwitcherStateOn, nil, nil) // ACT if err := Run(context.Background(), 0, runner); err != nil { t.Fatalf("run failed: %v", err) } // ASSERT assert.Equalf(t, 2, len(output.elements), "output elements number mismatch") assert.Truef(t, output.isShutDown, "output expected to be shut down after run") assert.Truef(t, input.isShutDown, "input expected to be shut down after run") if assert.Equalf(t, 1, len(tracker.containers), "tracker containers number mismatch") { cont := input.containers[0] assert.Equalf(t, subOperationRootID, cont.ID, "input container id mismatch") } if assert.Equalf(t, 1, len(tracker.issues), "tracker issues number mismatch") { issue := tracker.issues[0] assert.Equalf(t, StepExecutor, issue.Step, "issue step mismatch") assert.Equalf(t, subOperationFailedID, issue.Container.ID, "issue container id mismatch") } if assert.Equalf(t, 2, len(output.elements), "output elements number mismatch") { _, ex := output.elements[fmt.Sprintf("%d_doc", subOperationRootID)] assert.Truef(t, ex, "element %s is expected to present in the output", subOperationRootID) data, ex := output.elements[fmt.Sprintf("%d_doc", subOperationCreateID)] if assert.Truef(t, ex, "element %s is expected to present in the output", subOperationCreateID) { var element elementMock if err := json.Unmarshal(data, &element); err != nil { t.Errorf("failed to unmarshal data to element: %v", err) } else { assert.Equalf(t, subOperationCreateTitle, element.Title, "subOperationCreateID element title mismatch") } } } assert.Truef(t, tracker.isShutDown, "tracker expected to be shut down after run") }
explode_data.jsonl/33797
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 690 }
[ 2830, 3393, 3136, 8432, 20294, 1155, 353, 8840, 836, 8, 341, 197, 322, 82079, 11503, 198, 1444, 691, 1669, 2415, 58, 2496, 21, 19, 60, 2186, 14032, 45725, 3782, 515, 197, 28624, 8432, 8439, 915, 25, 5212, 1966, 8432, 8439, 915, 788, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestRejectOldCluster(t *testing.T) { defer testutil.AfterTest(t) // 2 endpoints to test multi-endpoint Status clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, SkipCreatingClient: true}) defer clus.Terminate(t) cfg := clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}, DialTimeout: 5 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, RejectOldCluster: true, } cli, err := clientv3.New(cfg) if err != nil { t.Fatal(err) } cli.Close() }
explode_data.jsonl/30356
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 232 }
[ 2830, 3393, 78413, 18284, 28678, 1155, 353, 8840, 836, 8, 341, 16867, 1273, 1314, 36892, 2271, 1155, 340, 197, 322, 220, 17, 36342, 311, 1273, 7299, 13068, 2768, 8104, 198, 197, 4163, 1669, 17590, 7121, 28678, 53, 18, 1155, 11, 609, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestFileLogrotate(t *testing.T) { dir, err := ioutil.TempDir("", "tlog_rotate_") if err != nil { t.Fatalf("create tmp dir: %v", err) } defer func() { if !t.Failed() { os.RemoveAll(dir) return } t.Logf("dir: %v", dir) }() fname := fmt.Sprintf("file.%d.log", os.Getpid()) f := CreateLogrotate(filepath.Join(dir, fname)) defer f.Close() for i := 0; i < 3; i++ { _, err = fmt.Fprintf(f, "some info %v\n", i) assert.NoError(t, err) err = os.Rename( filepath.Join(dir, fname), filepath.Join(dir, fmt.Sprintf("file_moved_%d.%d.log", i, os.Getpid())), ) require.NoError(t, err) _, err = fmt.Fprintf(f, "after move %v\n", i) assert.NoError(t, err) err = f.Rotate() assert.NoError(t, err) } fs, err := ioutil.ReadDir(dir) if err != nil { t.Fatalf("list dir: %v", err) } assert.Len(t, fs, 4) for _, f := range fs { b, err := ioutil.ReadFile(path.Join(dir, f.Name())) if err != nil { t.Fatalf("read file: %v", err) } switch { case strings.HasPrefix(f.Name(), "file."): assert.Equal(t, "", string(b)) case strings.HasPrefix(f.Name(), "file_moved_"): var n int fmt.Sscanf(f.Name(), "file_moved_%d", &n) assert.Equal(t, fmt.Sprintf("some info %v\nafter move %v\n", n, n), string(b)) } } }
explode_data.jsonl/3486
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 616 }
[ 2830, 3393, 1703, 2201, 16213, 1155, 353, 8840, 836, 8, 341, 48532, 11, 1848, 1669, 43144, 65009, 6184, 19814, 330, 83, 839, 60834, 62, 1138, 743, 1848, 961, 2092, 341, 197, 3244, 30762, 445, 3182, 4174, 5419, 25, 1018, 85, 497, 1848,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestEndpointTemplateMalformed(t *testing.T) { _, buildErr := Builder(openrtb_ext.BidderTappx, config.Adapter{ Endpoint: "{{Malformed}}"}) assert.Error(t, buildErr) }
explode_data.jsonl/51704
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 69 }
[ 2830, 3393, 27380, 7275, 29600, 10155, 1155, 353, 8840, 836, 8, 341, 197, 6878, 1936, 7747, 1669, 20626, 30981, 3342, 65, 9927, 1785, 307, 1107, 51, 676, 87, 11, 2193, 34190, 515, 197, 197, 27380, 25, 47219, 29600, 10155, 23386, 8824, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestMocker_CreateWebhook(t *testing.T) { t.Run("success", func(t *testing.T) { m, s := NewSession(t) data := api.CreateWebhookData{Name: "abc"} expect := discord.Webhook{ ID: 123, Name: "abc", ChannelID: 456, User: discord.User{ID: 789}, } m.CreateWebhook(data, expect) actual, err := s.CreateWebhook(expect.ChannelID, data) require.NoError(t, err) assert.Equal(t, expect, *actual) }) t.Run("failure", func(t *testing.T) { tMock := new(testing.T) m, s := NewSession(tMock) expect := discord.Webhook{ ID: 123, Name: "abc", ChannelID: 456, User: discord.User{ID: 789}, } m.CreateWebhook(api.CreateWebhookData{Name: "abc"}, expect) actual, err := s.CreateWebhook(expect.ChannelID, api.CreateWebhookData{ Name: "cba", }) require.NoError(t, err) assert.Equal(t, expect, *actual) assert.True(t, tMock.Failed()) }) }
explode_data.jsonl/49374
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 430 }
[ 2830, 3393, 11571, 261, 34325, 5981, 20873, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 5630, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 2109, 11, 274, 1669, 1532, 5283, 1155, 692, 197, 8924, 1669, 6330, 7251, 5981, 20873, 1043...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_NewFile(t *testing.T) { file, err := os.Create("/tmp/file") if err != nil { log.Println(err) } defer file.Close() fileDl, err := NewFileDl("https://d10.baidupcs.com/file/c98e49c3a3b477332c34cc23ebc88fd7?bkt=p3-1400c98e49c3a3b477332c34cc23ebc88fd7b6fedaef000000341e00&xcode=2d2e45fa551b13775d7ceaea994bffde94d0f479fb285bdc&fid=4185473307-250528-477645975447732&time=1527248264&sign=FDTAXGERQBHSK-DCb740ccc5511e5e8fedcff06b081203-cs9ghSC%2BBq0mvxxH6y2PJPjD3iU%3D&to=d10&size=3415552&sta_dx=3415552&sta_cs=160&sta_ft=dmg&sta_ct=1&sta_mt=1&fm2=MH%2CYangquan%2CAnywhere%2C%2Cshanghai%2Ccmnet&vuk=1751568585&iv=2&newver=1&newfm=1&secfm=1&flow_ver=3&pkey=1400c98e49c3a3b477332c34cc23ebc88fd7b6fedaef000000341e00&expires=8h&rt=sh&r=904924522&mlogid=3358735367544085625&vbdid=457171215&fin=Throng_1.11_xclient.info.dmg&fn=Throng_1.11_xclient.info.dmg&rtype=1&dp-logid=3358735367544085625&dp-callid=0.1.1&hps=1&tsl=0&csl=0&csign=1xTqR5%2B0dDn1R3hqGw3PlqzeuPQ%3D&so=0&ut=1&uter=4&serv=0&uc=2152311193&ic=2834468265&ti=8a6c9448563694cbd6ef8bdcb571c1fbb712ee51eb7fcfa5&by=themis", file, -1) if err != nil { log.Println(err) } var exit = make(chan bool) var resume = make(chan bool) var pause bool var wg sync.WaitGroup wg.Add(1) fileDl.OnStart(func() { fmt.Println("download started") format := "\033[2K\r%v/%v [%s] %v byte/s %v" for { status := fileDl.GetStatus() var i = float64(status.Downloaded) / 50 h := strings.Repeat("=", int(i)) + strings.Repeat(" ", 50-int(i)) select { case <-exit: fmt.Printf(format, status.Downloaded, fileDl.Size, h, 0, "[FINISH]") fmt.Println("\ndownload finished") wg.Done() default: if !pause { time.Sleep(time.Second * 1) fmt.Printf(format, status.Downloaded, fileDl.Size, h, status.Speeds, "[DOWNLOADING]") os.Stdout.Sync() } else { fmt.Printf(format, status.Downloaded, fileDl.Size, h, 0, "[PAUSE]") os.Stdout.Sync() <-resume pause = false } } } }) fileDl.OnPause(func() { pause = true }) fileDl.OnResume(func() { resume <- true }) fileDl.OnFinish(func() { exit <- true }) fileDl.OnError(func(errCode int, err error) { log.Println(errCode, err) }) fmt.Printf("%+v\n", fileDl) fileDl.Start() time.Sleep(time.Second * 2) fileDl.Pause() time.Sleep(time.Second * 3) fileDl.Resume() wg.Wait() }
explode_data.jsonl/63469
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1232 }
[ 2830, 3393, 39582, 1703, 1155, 353, 8840, 836, 8, 341, 17661, 11, 1848, 1669, 2643, 7251, 4283, 5173, 23903, 1138, 743, 1848, 961, 2092, 341, 197, 6725, 12419, 3964, 340, 197, 532, 16867, 1034, 10421, 2822, 17661, 35, 75, 11, 1848, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestAddMemberAfterClusterFullRotation(t *testing.T) { defer testutil.AfterTest(t) c := NewCluster(t, 3) c.Launch(t) defer c.Terminate(t) // remove all the previous three members and add in three new members. for i := 0; i < 3; i++ { c.RemoveMember(t, uint64(c.Members[0].s.ID())) c.waitLeader(t, c.Members) c.AddMember(t) c.waitLeader(t, c.Members) } c.AddMember(t) c.waitLeader(t, c.Members) clusterMustProgress(t, c.Members) }
explode_data.jsonl/16298
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 193 }
[ 2830, 3393, 2212, 9366, 6025, 28678, 9432, 18440, 1155, 353, 8840, 836, 8, 341, 16867, 1273, 1314, 36892, 2271, 1155, 340, 1444, 1669, 1532, 28678, 1155, 11, 220, 18, 340, 1444, 1214, 18423, 1155, 340, 16867, 272, 836, 261, 34016, 1155,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMarshalProof(t *testing.T) { for _, test := range []struct { desc string p log.Proof want string }{ { desc: "valid", p: log.Proof{ []byte("one"), []byte("two"), []byte("three"), }, want: "b25l\ndHdv\ndGhyZWU=\n", }, { desc: "valid empty", p: log.Proof{}, want: "", }, { desc: "valid default entry", p: log.Proof{ []byte("one"), []byte{}, []byte("three"), }, want: "b25l\n\ndGhyZWU=\n", }, } { t.Run(test.desc, func(t *testing.T) { got := test.p.Marshal() if got != test.want { t.Fatalf("Got %q, want %q", got, test.want) } }) } }
explode_data.jsonl/61513
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 321 }
[ 2830, 3393, 55438, 31076, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 1273, 1669, 2088, 3056, 1235, 341, 197, 41653, 914, 198, 197, 3223, 262, 1487, 7763, 1055, 198, 197, 50780, 914, 198, 197, 59403, 197, 197, 515, 298, 41653, 25, 330, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestBrokenFile(t *testing.T) { // Test write file with broken file struct. f := File{} t.Run("SaveWithoutName", func(t *testing.T) { assert.EqualError(t, f.Save(), "no path defined for file, consider File.WriteTo or File.Write") }) t.Run("SaveAsEmptyStruct", func(t *testing.T) { // Test write file with broken file struct with given path. assert.NoError(t, f.SaveAs(filepath.Join("test", "BrokenFile.SaveAsEmptyStruct.xlsx"))) }) t.Run("OpenBadWorkbook", func(t *testing.T) { // Test set active sheet without BookViews and Sheets maps in xl/workbook.xml. f3, err := OpenFile(filepath.Join("test", "BadWorkbook.xlsx")) f3.GetActiveSheetIndex() f3.SetActiveSheet(2) assert.NoError(t, err) }) t.Run("OpenNotExistsFile", func(t *testing.T) { // Test open a XLSX file with given illegal path. _, err := OpenFile(filepath.Join("test", "NotExistsFile.xlsx")) if assert.Error(t, err) { assert.True(t, os.IsNotExist(err), "Expected os.IsNotExists(err) == true") } }) }
explode_data.jsonl/36959
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 392 }
[ 2830, 3393, 90526, 1703, 1155, 353, 8840, 836, 8, 341, 197, 322, 3393, 3270, 1034, 448, 10865, 1034, 2036, 624, 1166, 1669, 2887, 31483, 3244, 16708, 445, 8784, 26040, 675, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 6948, 12808, 14...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDdlErrors(t *testing.T) { var err error if err = DB.Close(); err != nil { t.Errorf("Closing DDL test db connection err=%s", err) } defer func() { // Reopen DB connection. if DB, err = OpenTestConnection(); err != nil { t.Fatalf("Failed re-opening db connection: %s", err) } }() if err := DB.Find(&User{}).Error; err == nil { t.Errorf("Expected operation on closed db to produce an error, but err was nil") } }
explode_data.jsonl/28064
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 167 }
[ 2830, 3393, 35, 8736, 13877, 1155, 353, 8840, 836, 8, 341, 2405, 1848, 1465, 271, 743, 1848, 284, 5952, 10421, 2129, 1848, 961, 2092, 341, 197, 3244, 13080, 445, 36294, 422, 16524, 1273, 2927, 3633, 1848, 7846, 82, 497, 1848, 340, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestReconcileWithDifferentServiceAccounts(t *testing.T) { names.TestingSeed() ps := []*v1beta1.Pipeline{{ ObjectMeta: baseObjectMeta("test-pipeline", "foo"), Spec: v1beta1.PipelineSpec{ Tasks: []v1beta1.PipelineTask{ { Name: "hello-world-0", TaskRef: &v1beta1.TaskRef{ Name: "hello-world-task", }, }, { Name: "hello-world-1", TaskRef: &v1beta1.TaskRef{ Name: "hello-world-task", }, }, }, }, }} prs := []*v1beta1.PipelineRun{{ ObjectMeta: baseObjectMeta("test-pipeline-run-different-service-accs", "foo"), Spec: v1beta1.PipelineRunSpec{ PipelineRef: &v1beta1.PipelineRef{Name: "test-pipeline"}, ServiceAccountName: "test-sa-0", ServiceAccountNames: []v1beta1.PipelineRunSpecServiceAccountName{{ TaskName: "hello-world-1", ServiceAccountName: "test-sa-1", }}, }, }} ts := []*v1beta1.Task{ {ObjectMeta: baseObjectMeta("hello-world-task", "foo")}, } d := test.Data{ PipelineRuns: prs, Pipelines: ps, Tasks: ts, } prt := newPipelineRunTest(d, t) defer prt.Cancel() _, clients := prt.reconcileRun("foo", "test-pipeline-run-different-service-accs", []string{}, false) taskRunNames := []string{"test-pipeline-run-different-service-accs-hello-world-0-9l9zj", "test-pipeline-run-different-service-accs-hello-world-1-mz4c7"} expectedTaskRuns := []*v1beta1.TaskRun{ { ObjectMeta: taskRunObjectMeta(taskRunNames[0], "foo", "test-pipeline-run-different-service-accs", "test-pipeline", "hello-world-0", false), Spec: v1beta1.TaskRunSpec{ TaskRef: &v1beta1.TaskRef{ Name: "hello-world-task", }, ServiceAccountName: "test-sa-0", Resources: &v1beta1.TaskRunResources{}, Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, { ObjectMeta: taskRunObjectMeta(taskRunNames[1], "foo", "test-pipeline-run-different-service-accs", "test-pipeline", "hello-world-1", false), Spec: v1beta1.TaskRunSpec{ TaskRef: &v1beta1.TaskRef{ Name: "hello-world-task", }, ServiceAccountName: "test-sa-1", Resources: &v1beta1.TaskRunResources{}, Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, } for i := range ps[0].Spec.Tasks { // Check that the expected TaskRun was created actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").Get(prt.TestAssets.Ctx, taskRunNames[i], metav1.GetOptions{}) if err != nil { t.Fatalf("Expected a TaskRun to be created, but it wasn't: %s", err) } if d := cmp.Diff(actual, expectedTaskRuns[i], ignoreResourceVersion); d != "" { t.Errorf("expected to see TaskRun %v created. Diff %s", expectedTaskRuns[i], diff.PrintWantGot(d)) } } }
explode_data.jsonl/68277
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1263 }
[ 2830, 3393, 693, 40446, 457, 2354, 69123, 1860, 41369, 1155, 353, 8840, 836, 8, 341, 93940, 8787, 287, 41471, 2822, 35009, 1669, 29838, 85, 16, 19127, 16, 1069, 8790, 90, 515, 197, 23816, 12175, 25, 2331, 1190, 12175, 445, 1944, 2268, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestRunnerError(t *testing.T) { store := NewMockStoreIface() ticker := glock.NewMockTicker(time.Second) refreshTicker := glock.NewMockTicker(time.Second * 30) store.ListFunc.SetDefaultReturn([]Migration{ {ID: 1, Progress: 0.5}, }, nil) runner := newRunner(store, refreshTicker, &observation.TestContext) migrator := NewMockMigrator() migrator.ProgressFunc.SetDefaultReturn(0.5, nil) migrator.UpFunc.SetDefaultReturn(errors.New("uh-oh")) if err := runner.Register(1, migrator, MigratorOptions{ticker: ticker}); err != nil { t.Fatalf("unexpected error registering migrator: %s", err) } go runner.Start() tickN(ticker, 1) runner.Stop() if calls := store.AddErrorFunc.history; len(calls) != 1 { t.Fatalf("unexpected number of calls to AddError. want=%d have=%d", 1, len(calls)) } else { if calls[0].Arg1 != 1 { t.Errorf("unexpected migrationId. want=%d have=%d", 1, calls[0].Arg1) } if calls[0].Arg2 != "uh-oh" { t.Errorf("unexpected error message. want=%s have=%s", "uh-oh", calls[0].Arg2) } } }
explode_data.jsonl/11530
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 419 }
[ 2830, 3393, 19486, 1454, 1155, 353, 8840, 836, 8, 341, 57279, 1669, 1532, 11571, 6093, 40, 1564, 741, 3244, 5215, 1669, 342, 1023, 7121, 11571, 87278, 9730, 32435, 340, 197, 17168, 87278, 1669, 342, 1023, 7121, 11571, 87278, 9730, 32435, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestNodeInfoAddChannel(t *testing.T) { nodeInfo := testNodeInfo(t, testNodeID(), "testing") nodeInfo.Channels = []byte{} require.Empty(t, nodeInfo.Channels) nodeInfo.AddChannel(2) require.Contains(t, nodeInfo.Channels, byte(0x02)) // adding the same channel again shouldn't be a problem nodeInfo.AddChannel(2) require.Contains(t, nodeInfo.Channels, byte(0x02)) }
explode_data.jsonl/14844
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 135 }
[ 2830, 3393, 1955, 1731, 2212, 9629, 1155, 353, 8840, 836, 8, 341, 20831, 1731, 1669, 1273, 1955, 1731, 1155, 11, 1273, 1955, 915, 1507, 330, 8840, 1138, 20831, 1731, 6353, 6680, 284, 3056, 3782, 16094, 17957, 11180, 1155, 11, 2436, 1731...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLocalizeDefaultMessage(t *testing.T) { t.Run("fine", func(t *testing.T) { api := &plugintest.API{} p := setupTestPlugin(t, api, &mockstore.Store{}) l := p.getServerLocalizer() m := &i18n.Message{ Other: "test message", } assert.Equal(t, m.Other, p.LocalizeDefaultMessage(l, m)) }) t.Run("empty message", func(t *testing.T) { api := &plugintest.API{} defer api.AssertExpectations(t) p := setupTestPlugin(t, api, &mockstore.Store{}) l := p.getServerLocalizer() m := &i18n.Message{} assert.Equal(t, "", p.LocalizeDefaultMessage(l, m)) }) }
explode_data.jsonl/8756
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 251 }
[ 2830, 3393, 7319, 551, 3675, 2052, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 62057, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 54299, 1669, 609, 47474, 396, 477, 24922, 31483, 197, 3223, 1669, 6505, 2271, 11546, 1155, 11, 633...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValueWrite(t *testing.T) { writer := inspect.NewInspector(new(inspect.TextWriteInspector[json.Writer])) value := defaultIntValue var buffer bytes.Buffer writer.SetWriter(&buffer, 10) value.Inspect(writer) expected := strconv.Itoa(int(defaultIntValue)) writer.Flush() result := string(buffer.Bytes()) if result != expected { t.Fatal("got", result, "expected", expected) } else { t.Log("ok", result) } }
explode_data.jsonl/54479
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 151 }
[ 2830, 3393, 1130, 7985, 1155, 353, 8840, 836, 8, 341, 38959, 1669, 24085, 7121, 46230, 1755, 56337, 987, 1979, 7985, 46230, 58, 2236, 47838, 10907, 16309, 1669, 1638, 1072, 1130, 198, 2405, 4147, 5820, 22622, 198, 38959, 4202, 6492, 2099,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestResolveIdentifyImplicitTeamWithIdentifyFailures(t *testing.T) { tt := newTeamTester(t) defer tt.cleanup() tt.addUser("abc") g := tt.users[0].tc.G tt.addUser("wong") wong := tt.users[1] iTeamNameCreate := strings.Join([]string{tt.users[0].username, wong.username}, ",") t.Logf("make an implicit team") iTeam, _, _, err := teams.LookupOrCreateImplicitTeam(context.TODO(), g, iTeamNameCreate, false /*isPublic*/) require.NoError(t, err) cli, err := client.GetIdentifyClient(g) require.NoError(t, err, "failed to get new identifyclient") iui := newSimpleIdentifyUI() attachIdentifyUI(t, g, iui) t.Logf("try but fail on assertion") res, err := cli.ResolveIdentifyImplicitTeam(context.Background(), keybase1.ResolveIdentifyImplicitTeamArg{ // lookup with a compound assertion, the first part will resolve, the second part will fail Assertions: strings.Join([]string{tt.users[0].username, wong.username + "&&" + wong.username + "@rooter"}, ","), Suffix: "", IsPublic: false, DoIdentifies: true, Create: false, IdentifyBehavior: keybase1.TLFIdentifyBehavior_DEFAULT_KBFS, }) require.Error(t, err) require.IsType(t, libkb.IdentifiesFailedError{}, err, "%v", err) require.Equal(t, res.DisplayName, iTeamNameCreate) require.Equal(t, res.TeamID, iTeam.ID) require.True(t, compareUserVersionSets([]keybase1.UserVersion{tt.users[0].userVersion(), wong.userVersion()}, res.Writers)) require.Nil(t, res.TrackBreaks, "expect no track breaks") t.Logf("prove rooter and track") g.ProofCache.DisableDisk() wong.proveRooter() iui.confirmRes = keybase1.ConfirmResult{IdentityConfirmed: true, RemoteConfirmed: true, AutoConfirmed: true} tt.users[0].track(wong.username) iui.confirmRes = keybase1.ConfirmResult{} t.Logf("make rooter unreachable") g.XAPI = &flakeyRooterAPI{orig: g.XAPI, hardFail: true, G: g} err = g.ProofCache.Reset() require.NoError(t, err) t.Logf("try but fail on tracking (1)") res, err = cli.ResolveIdentifyImplicitTeam(context.Background(), keybase1.ResolveIdentifyImplicitTeamArg{ // lookup by username, but the dead rooter proof fails our tracking Assertions: strings.Join([]string{tt.users[0].username, wong.username}, ","), Suffix: "", IsPublic: false, DoIdentifies: true, Create: false, IdentifyBehavior: keybase1.TLFIdentifyBehavior_DEFAULT_KBFS, }) require.Error(t, err) require.IsType(t, libkb.IdentifiesFailedError{}, err, "%v", err) require.Equal(t, res.DisplayName, iTeamNameCreate) require.Equal(t, res.TeamID, iTeam.ID) require.True(t, compareUserVersionSets([]keybase1.UserVersion{tt.users[0].userVersion(), wong.userVersion()}, res.Writers)) require.Nil(t, res.TrackBreaks) // counter-intuitively, there are no track breaks when the error is fatal in this mode. t.Logf("try but fail on tracking (2)") res, err = cli.ResolveIdentifyImplicitTeam(context.Background(), keybase1.ResolveIdentifyImplicitTeamArg{ // lookup by username, but the dead rooter proof fails our tracking Assertions: strings.Join([]string{tt.users[0].username, wong.username}, ","), Suffix: "", IsPublic: false, DoIdentifies: true, Create: false, IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI, // Pass a weird IdentifyBehavior to get TrackBreaks to come out. }) require.Error(t, err) require.IsType(t, libkb.IdentifiesFailedError{}, err, "%v", err) require.Equal(t, res.DisplayName, iTeamNameCreate) require.Equal(t, res.TeamID, iTeam.ID) require.True(t, compareUserVersionSets([]keybase1.UserVersion{tt.users[0].userVersion(), wong.userVersion()}, res.Writers)) // In this mode, in addition to the error TrackBreaks is filled. require.NotNil(t, res.TrackBreaks) require.NotNil(t, res.TrackBreaks[wong.userVersion()]) require.Len(t, res.TrackBreaks[wong.userVersion()].Proofs, 1) require.Equal(t, keybase1.ProofType_ROOTER, res.TrackBreaks[wong.userVersion()].Proofs[0].RemoteProof.ProofType) }
explode_data.jsonl/42664
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1571 }
[ 2830, 3393, 56808, 28301, 1437, 59558, 14597, 2354, 28301, 1437, 19524, 1413, 1155, 353, 8840, 836, 8, 341, 3244, 83, 1669, 501, 14597, 58699, 1155, 340, 16867, 17853, 87689, 2822, 3244, 83, 1364, 1474, 445, 13683, 1138, 3174, 1669, 17853...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetGroupProjects(t *testing.T) { projectID1, err := dao.AddProject(models.Project{ Name: "grouptest01", OwnerID: 1, }) if err != nil { t.Errorf("Error occurred when AddProject: %v", err) } defer dao.DeleteProject(projectID1) projectID2, err := dao.AddProject(models.Project{ Name: "grouptest02", OwnerID: 1, }) if err != nil { t.Errorf("Error occurred when AddProject: %v", err) } groupID, err := AddUserGroup(models.UserGroup{ GroupName: "test_group_03", GroupType: 1, LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com", }) if err != nil { t.Errorf("Error occurred when AddUserGroup: %v", err) } defer DeleteUserGroup(groupID) pmid, err := project.AddProjectMember(models.Member{ ProjectID: projectID1, EntityID: groupID, EntityType: "g", }) defer project.DeleteProjectMemberByID(pmid) pmid2, err := project.AddProjectMember(models.Member{ ProjectID: projectID2, EntityID: groupID, EntityType: "g", }) defer project.DeleteProjectMemberByID(pmid2) if err := dao.DeleteProject(projectID2); err != nil { t.Errorf("Error occurred when DeleteProject: %v", err) } type args struct { query *models.ProjectQueryParam groupIDs []int } member := &models.MemberQuery{ Name: "grouptestu09", } tests := []struct { name string args args wantSize int wantErr bool }{ {"Query with group DN", args{ query: &models.ProjectQueryParam{ Member: member, }, groupIDs: []int{groupID}, }, 1, false}, {"Query without group DN", args{ query: &models.ProjectQueryParam{ Member: member, }, groupIDs: []int{}, }, 0, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := dao.GetGroupProjects(tt.args.groupIDs, tt.args.query) if (err != nil) != tt.wantErr { t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr) return } if len(got) != tt.wantSize { t.Errorf("GetGroupProjects() size: %v, want %v", len(got), tt.wantSize) } }) } }
explode_data.jsonl/73820
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 894 }
[ 2830, 3393, 1949, 2808, 29958, 1155, 353, 8840, 836, 8, 341, 72470, 915, 16, 11, 1848, 1669, 24775, 1904, 7849, 20289, 30944, 515, 197, 21297, 25, 262, 330, 70, 581, 70334, 15, 16, 756, 197, 197, 13801, 915, 25, 220, 16, 345, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGetChannelStateChannelNotFound(t *testing.T) { channelId := big.NewInt(42) stateServiceTest.channelServiceMock.Clear() reply, err := stateServiceTest.service.GetChannelState( nil, &ChannelStateRequest{ ChannelId: bigIntToBytes(channelId), Signature: getSignature(bigIntToBytes(channelId), stateServiceTest.signerPrivateKey), }, ) assert.Equal(t, errors.New("channel is not found, channelId: 42"), err) assert.Nil(t, reply) }
explode_data.jsonl/6075
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 158 }
[ 2830, 3393, 1949, 9629, 1397, 9629, 10372, 1155, 353, 8840, 836, 8, 341, 71550, 764, 1669, 2409, 7121, 1072, 7, 19, 17, 340, 24291, 1860, 2271, 16195, 1860, 11571, 13524, 2822, 86149, 11, 1848, 1669, 1584, 1860, 2271, 5736, 2234, 9629, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRecvTimeout(t *testing.T) { p := newTestPeer() msg, err := RecvTimeout(p, time.Millisecond) if err == nil { t.Fatal("Expected timeout error") } go func() { p.Send(&Hello{}) }() msg, err = RecvTimeout(p, time.Millisecond) if err != nil || msg == nil { t.Fatal("Failed to recv message") } p.Close() _, err = RecvTimeout(p, time.Millisecond) if err == nil || err.Error() != "receive channel closed" { t.Fatal("Expected closed channel error") } }
explode_data.jsonl/18474
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 192 }
[ 2830, 3393, 63483, 7636, 1155, 353, 8840, 836, 8, 341, 3223, 1669, 501, 2271, 30888, 741, 21169, 11, 1848, 1669, 4067, 85, 7636, 1295, 11, 882, 71482, 340, 743, 1848, 621, 2092, 341, 197, 3244, 26133, 445, 18896, 9632, 1465, 1138, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUnescape(t *testing.T) { tests := []struct { name string in string want string }{ {"string that needs no escaping", "lorem ipsum", "lorem ipsum"}, {"escaped with backslash", "lorem\\ ipsum", "lorem ipsum"}, {"escaped with quotes", "'lorem ipsum'", "lorem ipsum"}, {"backslash within quotes", "'lorem\\ ipsum'", "lorem\\ ipsum"}, {"different escapes in same", "lorem\\ ipsum 'lorem ipsum'", "lorem ipsum lorem ipsum"}, {"escaped quotes", "lorem\\' ipsum\\'", "lorem' ipsum'"}, {"mixed escapes", "lorem\\' ip'su\\'m", "lorem' ipsu\\m"}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := Unescape(tt.in); got != tt.want { t.Errorf("Unescape() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/11003
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 332 }
[ 2830, 3393, 1806, 12998, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 17430, 256, 914, 198, 197, 50780, 914, 198, 197, 59403, 197, 197, 4913, 917, 429, 3880, 902, 52654, 497, 330, 385, 1826, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestLRUCapacityBytes(t *testing.T) { require := require.New(t) const cacheSize = 5 cache, err := New( Capacity(uint64(cacheSize*sha256.Size), true), ) require.NoError(err, "New") entries := makeEntries(cacheSize) for _, ent := range entries { err = cache.Put(ent.key, ent) require.NoError(err, "Put") } hugeEnt := &testEntry{ key: "huge entry - should fail", value: make([]byte, 1024768), } err = cache.Put(hugeEnt.key, hugeEnt) require.Error(err, "Put - huge entry") newEnt := makeEntry("new entry") err = cache.Put(newEnt.key, newEnt) require.NoError(err, "Put - evict") _, ok := cache.Peek(entries[0].key) require.False(ok, "Put - expected entry evicted") }
explode_data.jsonl/69519
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 274 }
[ 2830, 3393, 20117, 5459, 391, 4018, 7078, 1155, 353, 8840, 836, 8, 341, 17957, 1669, 1373, 7121, 1155, 692, 4777, 6500, 1695, 284, 220, 20, 271, 52680, 11, 1848, 1669, 1532, 1006, 197, 6258, 391, 4018, 8488, 21, 19, 31933, 1695, 9, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestBackwardCompatibilityWithChunksStorage(t *testing.T) { for previousImage, flagsFn := range previousVersionImages { t.Run(fmt.Sprintf("Backward compatibility upgrading from %s", previousImage), func(t *testing.T) { flags := ChunksStorageFlags if flagsFn != nil { flags = flagsFn(flags) } runBackwardCompatibilityTestWithChunksStorage(t, previousImage, flags) }) } }
explode_data.jsonl/77205
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 136 }
[ 2830, 3393, 3707, 1606, 85880, 2354, 89681, 5793, 1155, 353, 8840, 836, 8, 341, 2023, 3681, 1906, 11, 8042, 24911, 1669, 2088, 3681, 5637, 14228, 341, 197, 3244, 16708, 28197, 17305, 445, 3707, 1606, 24748, 45935, 504, 1018, 82, 497, 36...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestContext_Session2(t *testing.T) { c := NewContext() c.Input.CruSession = &session.MemSessionStore{} if store, err := c.Session(); store == nil || err != nil { t.FailNow() } }
explode_data.jsonl/39541
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 70 }
[ 2830, 3393, 1972, 84664, 17, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 1532, 1972, 741, 1444, 16130, 727, 2672, 5283, 284, 609, 5920, 1321, 336, 5283, 6093, 31483, 743, 3553, 11, 1848, 1669, 272, 20674, 2129, 3553, 621, 2092, 1369, 18...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
3
func TestCollection_UpdateId(t *testing.T) { ast := require.New(t) cli := initClient("test") defer cli.Close(context.Background()) defer cli.DropCollection(context.Background()) cli.EnsureIndexes(context.Background(), []string{"name"}, nil) id1 := primitive.NewObjectID() id2 := primitive.NewObjectID() docs := []interface{}{ bson.D{{Key: "_id", Value: id1}, {Key: "name", Value: "Alice"}}, bson.D{{Key: "_id", Value: id2}, {Key: "name", Value: "Lucas"}}, } _, _ = cli.InsertMany(context.Background(), docs) var err error // update already exist record update1 := bson.M{ operator.Set: bson.M{ "name": "Alice1", "age": 18, }, } opts := options.UpdateOptions{} opts.UpdateOptions = officialOpts.Update().SetBypassDocumentValidation(false) err = cli.UpdateId(context.Background(), id1, update1, opts) ast.NoError(err) // id is nil or not exist update3 := bson.M{ "name": "Geek", "age": 21, } err = cli.UpdateId(context.Background(), nil, update3) ast.Error(err) err = cli.UpdateId(context.Background(), 1, update3) ast.Error(err) err = cli.UpdateId(context.Background(), "not_exist_id", nil) ast.Error(err) err = cli.UpdateId(context.Background(), "not_exist_id", 1) ast.Error(err) }
explode_data.jsonl/18377
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 479 }
[ 2830, 3393, 6482, 47393, 764, 1155, 353, 8840, 836, 8, 341, 88836, 1669, 1373, 7121, 1155, 340, 86448, 1669, 2930, 2959, 445, 1944, 1138, 16867, 21348, 10421, 5378, 19047, 2398, 16867, 21348, 21688, 6482, 5378, 19047, 2398, 86448, 22834, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWithinSlice(t *testing.T) { outer := make([]byte, 0, 50) inner := outer[2:34] if !WithinBytes(outer, inner) { t.Fatal("unexpected outer/inner result") } if WithinBytes(outer, make([]byte, 0, 10)) { t.Fatal("new slice is incorrectly within outer") } }
explode_data.jsonl/9686
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 104 }
[ 2830, 3393, 41961, 33236, 1155, 353, 8840, 836, 8, 341, 197, 2676, 1669, 1281, 10556, 3782, 11, 220, 15, 11, 220, 20, 15, 340, 197, 4382, 1669, 15955, 58, 17, 25, 18, 19, 2533, 743, 753, 41961, 7078, 7, 2676, 11, 9179, 8, 341, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestProcess_OnAnalyzeProjectUsecase_WhenAnalyzingIdentifiers_ShouldReturnAnalysisResults(t *testing.T) { project := entity.Project{ ID: uuid.MustParse("f9b76fde-c342-4328-8650-85da8f21e2be"), Reference: "eroatta/test", Metadata: entity.Metadata{ Fullname: "eroatta/test", }, SourceCode: entity.SourceCode{ Hash: "asdf1234asdf", Location: "/tmp/repositories/eroatta/test", Files: []string{"main.go"}, }, } projectRepositoryMock := projectRepositoryMock{ project: project, } sourceCodeRepositoryMock := sourceCodeFileReaderMock{ files: map[string][]byte{ "main.go": []byte("package main"), }, err: nil, } identifierRepositoryMock := identifierRepositoryMock{ err: nil, } analysisRepositoryMock := analysisRepositoryMock{ analysisResults: entity.AnalysisResults{}, getErr: repository.ErrAnalysisNoResults, addErr: nil, } config := &entity.AnalysisConfig{ Miners: []string{}, ExtractorFactory: newExtractorMock, Splitters: []string{"conserv"}, SplittingAlgorithmFactory: splitter.NewSplitterFactory(), Expanders: []string{"mock"}, ExpansionAlgorithmFactory: expanderAbstractFactoryMock{}, } uc := usecase.NewAnalyzeProjectUsecase(projectRepositoryMock, sourceCodeRepositoryMock, identifierRepositoryMock, analysisRepositoryMock, config) projectID, _ := uuid.NewUUID() results, err := uc.Process(context.TODO(), projectID) assert.NoError(t, err) assert.NotEmpty(t, results.ID) assert.Equal(t, "eroatta/test", results.ProjectName) assert.Equal(t, 1, results.FilesTotal) assert.Equal(t, 1, results.FilesValid) assert.Equal(t, 0, results.FilesError) assert.Empty(t, results.FilesErrorSamples) assert.EqualValues(t, []string{}, results.PipelineMiners) assert.EqualValues(t, []string{"conserv"}, results.PipelineSplitters) assert.EqualValues(t, []string{"mock"}, results.PipelineExpanders) assert.Equal(t, 1, results.IdentifiersTotal) assert.Equal(t, 1, results.IdentifiersValid) assert.Equal(t, 0, results.IdentifiersError) assert.Empty(t, results.IdentifiersErrorSamples) }
explode_data.jsonl/64110
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 847 }
[ 2830, 3393, 7423, 35482, 2082, 55856, 7849, 52, 5024, 519, 62, 4498, 73307, 20371, 28301, 11836, 36578, 616, 5598, 26573, 9801, 1155, 353, 8840, 836, 8, 341, 72470, 1669, 5387, 30944, 515, 197, 29580, 25, 286, 16040, 50463, 14463, 445, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMVCCStatsDelDelGC(t *testing.T) { defer leaktest.AfterTest(t)() engine := createTestEngine() defer engine.Close() ctx := context.Background() aggMS := &enginepb.MVCCStats{} assertEq(t, engine, "initially", aggMS, &enginepb.MVCCStats{}) key := roachpb.Key("a") ts1 := hlc.Timestamp{WallTime: 1E9} ts2 := hlc.Timestamp{WallTime: 2E9} // Write tombstones at ts1 and ts2. if err := MVCCDelete(ctx, engine, aggMS, key, ts1, nil); err != nil { t.Fatal(err) } if err := MVCCDelete(ctx, engine, aggMS, key, ts2, nil); err != nil { t.Fatal(err) } mKeySize := int64(mvccKey(key).EncodedSize()) // 2 vKeySize := mvccVersionTimestampSize // 12 expMS := enginepb.MVCCStats{ LastUpdateNanos: 2E9, KeyBytes: mKeySize + 2*vKeySize, // 26 KeyCount: 1, ValCount: 2, GCBytesAge: 1 * vKeySize, // first tombstone, aged from ts1 to ts2 } assertEq(t, engine, "after two puts", aggMS, &expMS) // Run a GC invocation that clears it all. There used to be a bug here when // we allowed limiting the number of deleted keys. Passing zero (i.e. remove // one key and then bail) would mess up the stats, since the implementation // would assume that the (implicit or explicit) meta entry was going to be // removed, but this is only true when all values actually go away. if err := MVCCGarbageCollect( ctx, engine, aggMS, []roachpb.GCRequest_GCKey{{ Key: key, Timestamp: ts2, }}, ts2, ); err != nil { t.Fatal(err) } expAggMS := enginepb.MVCCStats{ LastUpdateNanos: 2E9, } assertEq(t, engine, "after GC", aggMS, &expAggMS) }
explode_data.jsonl/41644
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 660 }
[ 2830, 3393, 66626, 3706, 16635, 16532, 16532, 22863, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 741, 80118, 1669, 1855, 2271, 4571, 741, 16867, 4712, 10421, 2822, 20985, 1669, 2266, 19047, 741, 197, 15718, 482...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestKnativeProbeHeader(t *testing.T) { req, err := http.NewRequest(http.MethodGet, "http://example.com/", nil) if err != nil { t.Fatal("Error building request:", err) } if h := KnativeProbeHeader(req); h != "" { t.Errorf("KnativeProbeHeader(req)=%v, want empty string", h) } const want = "activator" req.Header.Set(ProbeHeaderName, want) if h := KnativeProbeHeader(req); h != want { t.Errorf("KnativeProbeHeader(req)=%v, want %v", h, want) } req.Header.Set(ProbeHeaderName, "") if h := KnativeProbeHeader(req); h != "" { t.Errorf("KnativeProbeHeader(req)=%v, want empty string", h) } }
explode_data.jsonl/58829
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 244 }
[ 2830, 3393, 42, 29738, 81426, 4047, 1155, 353, 8840, 836, 8, 341, 24395, 11, 1848, 1669, 1758, 75274, 19886, 20798, 1949, 11, 330, 1254, 1110, 8687, 905, 28105, 2092, 340, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 445, 1454, 4752, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestTlfNameChangePrivateWithoutObservation(t *testing.T) { test(t, users("alice", "bob"), inPrivateTlf("alice,bob@twitter"), as(bob, expectError(initRoot(), "bob does not have read access to directory /keybase/private/alice,bob@twitter"), ), addNewAssertion("bob", "bob@twitter"), inPrivateTlfNonCanonical("alice,bob@twitter", "alice,bob"), as(alice, mkfile("foo.txt", "hello world"), ), as(bob, read("foo.txt", "hello world"), ), ) }
explode_data.jsonl/41324
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 202 }
[ 2830, 3393, 51, 11008, 675, 4072, 16787, 26040, 37763, 367, 1155, 353, 8840, 836, 8, 341, 18185, 1155, 345, 197, 90896, 445, 63195, 497, 330, 47086, 4461, 197, 17430, 16787, 51, 11008, 445, 63195, 8402, 674, 31, 14679, 4461, 197, 60451,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDebRules(t *testing.T) { accept(t, acceptParms{ Name: "rules.deb", Conf: "rules.deb.yaml", Format: "deb", Dockerfile: "rules.deb.dockerfile", }) }
explode_data.jsonl/15464
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 89 }
[ 2830, 3393, 1912, 65, 26008, 1155, 353, 8840, 836, 8, 341, 197, 10330, 1155, 11, 4193, 85440, 515, 197, 21297, 25, 981, 330, 21977, 80731, 756, 197, 197, 15578, 25, 981, 330, 21977, 80731, 33406, 756, 197, 197, 4061, 25, 257, 330, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func Test_adaptStreams(t *testing.T) { t.SkipNow() tests := []struct { name string terraform string expected []kinesis.Stream }{ { name: "basic", terraform: ` resource "" "example" { } `, expected: []kinesis.Stream{}, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { modules := testutil.CreateModulesFromSource(test.terraform, ".tf", t) adapted := adaptStreams(modules) testutil.AssertDefsecEqual(t, test.expected, adapted) }) } }
explode_data.jsonl/16194
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 218 }
[ 2830, 3393, 10027, 2689, 73576, 1155, 353, 8840, 836, 8, 341, 3244, 57776, 7039, 741, 78216, 1669, 3056, 1235, 341, 197, 11609, 414, 914, 198, 197, 197, 61385, 914, 198, 197, 42400, 220, 3056, 74, 82789, 33308, 198, 197, 59403, 197, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPrintJsonVersion(t *testing.T) { pkg.Commit = "abc123" pkg.Version = "v1.2.3" m := NewTestMixin(t) opts := version.Options{} opts.RawFormat = string(printer.FormatJson) err := opts.Validate() require.NoError(t, err) m.PrintVersion(opts) gotOutput := m.TestContext.GetOutput() wantOutput := `{ "name": "exec", "version": "v1.2.3", "commit": "abc123", "author": "DeisLabs" } ` if !strings.Contains(gotOutput, wantOutput) { t.Fatalf("invalid output:\nWANT:\t%q\nGOT:\t%q\n", wantOutput, gotOutput) } }
explode_data.jsonl/64488
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 230 }
[ 2830, 3393, 8994, 5014, 5637, 1155, 353, 8840, 836, 8, 341, 3223, 7351, 53036, 284, 330, 13683, 16, 17, 18, 698, 3223, 7351, 35842, 284, 330, 85, 16, 13, 17, 13, 18, 1837, 2109, 1669, 1532, 2271, 38456, 1155, 692, 64734, 1669, 2319,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestAddAudiences(t *testing.T) { type RequestBody struct { AudienceGroupID int `json:"audienceGroupId,omitempty"` UploadDescription string `json:"uploadDescription,omitempty"` Audiences []audience `json:"audiences,omitempty"` } type want struct { URLPath string RequestBody RequestBody Response *BasicResponse Error error } testCases := []struct { Label string AudienceGroupID int UploadDescription string Audiences []string RequestID string AcceptedRequestID string ResponseCode int Response []byte Want want }{ { Label: "Add Audience Fail", AudienceGroupID: 4389303728991, UploadDescription: "audienceGroupNameJob_01", Audiences: []string{ "U4af4980627", "U4af4980628", "U4af4980629", }, RequestID: "12222", ResponseCode: http.StatusBadRequest, Response: []byte(``), Want: want{ URLPath: APIAudienceGroupUpload, RequestBody: RequestBody{ AudienceGroupID: 4389303728991, UploadDescription: "audienceGroupNameJob_01", Audiences: []audience{ { ID: "U4af4980627", }, { ID: "U4af4980628", }, { ID: "U4af4980629", }, }, }, Error: &APIError{ Code: http.StatusBadRequest, }, }, }, { Label: "add Audience no uploadDescription", AudienceGroupID: 4389303728991, Audiences: []string{ "U4af4980627", "U4af4980628", "U4af4980629", }, RequestID: "12222", ResponseCode: http.StatusOK, Response: []byte(``), Want: want{ URLPath: APIAudienceGroupUpload, RequestBody: RequestBody{ AudienceGroupID: 4389303728991, Audiences: []audience{ { ID: "U4af4980627", }, { ID: "U4af4980628", }, { ID: "U4af4980629", }, }, }, Response: &BasicResponse{ RequestID: "12222", }, }, }, { Label: "add Audience", AudienceGroupID: 4389303728991, UploadDescription: "audienceGroupNameJob_01", Audiences: []string{ "U4af4980627", "U4af4980628", "U4af4980629", }, RequestID: "12222", ResponseCode: http.StatusOK, Response: []byte(``), Want: want{ URLPath: APIAudienceGroupUpload, RequestBody: RequestBody{ AudienceGroupID: 4389303728991, UploadDescription: "audienceGroupNameJob_01", Audiences: []audience{ { ID: "U4af4980627", }, { ID: "U4af4980628", }, { ID: "U4af4980629", }, }, }, Response: &BasicResponse{ RequestID: "12222", }, }, }, } var currentTestIdx int server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() tc := testCases[currentTestIdx] if r.Method != http.MethodPut { t.Errorf("Method %s; want %s", r.Method, http.MethodPut) } if r.URL.Path != tc.Want.URLPath { t.Errorf("URLPath %s; want %s", r.URL.Path, tc.Want.URLPath) } var result RequestBody if err := json.NewDecoder(r.Body).Decode(&result); err != nil { t.Fatal(err) } if !reflect.DeepEqual(result, tc.Want.RequestBody) { t.Errorf("Request %v; want %v", result, tc.Want.RequestBody) } w.Header().Set("X-Line-Request-Id", tc.RequestID) if tc.AcceptedRequestID != "" { w.Header().Set("X-Line-Accepted-Request-Id", tc.AcceptedRequestID) } w.WriteHeader(tc.ResponseCode) w.Write(tc.Response) })) defer server.Close() dataServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() t.Error("Unexpected data API call") w.WriteHeader(http.StatusNotFound) w.Write([]byte(`{"message":"Not found"}`)) })) defer dataServer.Close() client, err := mockClient(server, dataServer) if err != nil { t.Fatal(err) } var res *BasicResponse for i, tc := range testCases { currentTestIdx = i t.Run(strconv.Itoa(i)+"/"+tc.Label, func(t *testing.T) { var options []IAddAudiencesOption if tc.UploadDescription != "" { options = append(options, WithAddAudiencesCallUploadDescription(tc.UploadDescription)) } timeoutCtx, cancelFn := context.WithTimeout(context.Background(), 1*time.Second) defer cancelFn() res, err = client.AddAudiences(tc.AudienceGroupID, tc.Audiences, options...).WithContext(timeoutCtx).Do() if tc.Want.Error != nil { log.Println(err) log.Println(tc.Want.Error) if !reflect.DeepEqual(err, tc.Want.Error) { t.Errorf("Error %v; want %v", err, tc.Want.Error) } } else { if err != nil { t.Error(err) } } if tc.Want.Response != nil { if !reflect.DeepEqual(res, tc.Want.Response) { t.Errorf("Response %v; want %v", res, tc.Want.Response) } } }) } }
explode_data.jsonl/37771
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2309 }
[ 2830, 3393, 2212, 52949, 11975, 1155, 353, 8840, 836, 8, 341, 13158, 6145, 5444, 2036, 341, 197, 22985, 661, 1835, 2808, 915, 256, 526, 286, 1565, 2236, 2974, 7880, 1835, 48410, 20478, 8805, 197, 197, 13844, 5009, 914, 257, 1565, 2236, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestMallocCrashesOnNil(t *testing.T) { t.Parallel() cmd := exec.Command("go", "run", path("malloc.go")) out, err := cmd.CombinedOutput() if err == nil { t.Logf("%#q:\n%s", strings.Join(cmd.Args, " "), out) t.Fatalf("succeeded unexpectedly") } }
explode_data.jsonl/5830
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 111 }
[ 2830, 3393, 53681, 16001, 14051, 1925, 19064, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 25920, 1669, 3883, 12714, 445, 3346, 497, 330, 6108, 497, 1815, 445, 16166, 18002, 5455, 13967, 11, 1848, 1669, 5439, 727, 2855, 1589, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_getWorkflowHandler_AsProvider(t *testing.T) { api, tsURL := newTestServer(t) admin, _ := assets.InsertAdminUser(t, api.mustDB()) localConsumer, err := authentication.LoadConsumerByTypeAndUserID(context.TODO(), api.mustDB(), sdk.ConsumerLocal, admin.ID, authentication.LoadConsumerOptions.WithAuthentifiedUser) require.NoError(t, err) _, jws, err := builtin.NewConsumer(context.TODO(), api.mustDB(), sdk.RandomString(10), sdk.RandomString(10), localConsumer, admin.GetGroupIDs(), sdk.NewAuthConsumerScopeDetails(sdk.AuthConsumerScopeProject)) u, _ := assets.InsertLambdaUser(t, api.mustDB()) pkey := sdk.RandomString(10) proj := assets.InsertTestProject(t, api.mustDB(), api.Cache, pkey, pkey) require.NoError(t, group.InsertLinkGroupUser(context.TODO(), api.mustDB(), &group.LinkGroupUser{ GroupID: proj.ProjectGroups[0].Group.ID, AuthentifiedUserID: u.ID, Admin: true, })) pip := sdk.Pipeline{ ProjectID: proj.ID, ProjectKey: proj.Key, Name: "pip1", } test.NoError(t, pipeline.InsertPipeline(api.mustDB(), &pip)) proj, _ = project.LoadByID(api.mustDB(), proj.ID, project.LoadOptions.WithApplications, project.LoadOptions.WithPipelines, project.LoadOptions.WithEnvironments, project.LoadOptions.WithGroups, ) wf := sdk.Workflow{ Name: "workflow1", ProjectID: proj.ID, ProjectKey: proj.Key, WorkflowData: sdk.WorkflowData{ Node: sdk.Node{ Name: "root", Context: &sdk.NodeContext{ PipelineID: pip.ID, }, }, }, } test.NoError(t, workflow.Insert(context.TODO(), api.mustDB(), api.Cache, *proj, &wf)) sdkclient := cdsclient.NewProviderClient(cdsclient.ProviderConfig{ Host: tsURL, Token: jws, }) w, err := sdkclient.WorkflowLoad(pkey, wf.Name) test.NoError(t, err) t.Logf("%+v", w) /// }
explode_data.jsonl/31070
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 756 }
[ 2830, 3393, 3062, 62768, 3050, 62741, 5179, 1155, 353, 8840, 836, 8, 341, 54299, 11, 10591, 3144, 1669, 501, 2271, 5475, 1155, 692, 64394, 11, 716, 1669, 11770, 23142, 7210, 1474, 1155, 11, 6330, 69419, 3506, 2398, 8854, 29968, 11, 1848...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSTMConflict(t *testing.T) { clus := NewClusterV3(t, &ClusterConfig{Size: 3}) defer clus.Terminate(t) etcdc := clus.RandClient() keys := make([]string, 5) for i := 0; i < len(keys); i++ { keys[i] = fmt.Sprintf("foo-%d", i) if _, err := etcdc.Put(context.TODO(), keys[i], "100"); err != nil { t.Fatalf("could not make key (%v)", err) } } errc := make(chan error) for i := range keys { curEtcdc := clus.RandClient() srcKey := keys[i] applyf := func(stm concurrency.STM) error { src := stm.Get(srcKey) // must be different key to avoid double-adding dstKey := srcKey for dstKey == srcKey { dstKey = keys[rand.Intn(len(keys))] } dst := stm.Get(dstKey) srcV, _ := strconv.ParseInt(src, 10, 64) dstV, _ := strconv.ParseInt(dst, 10, 64) if srcV == 0 { // can't rand.Intn on 0, so skip this transaction return nil } xfer := int64(rand.Intn(int(srcV)) / 2) stm.Put(srcKey, fmt.Sprintf("%d", srcV-xfer)) stm.Put(dstKey, fmt.Sprintf("%d", dstV+xfer)) return nil } go func() { iso := concurrency.WithIsolation(concurrency.RepeatableReads) _, err := concurrency.NewSTM(curEtcdc, applyf, iso) errc <- err }() } // wait for txns for range keys { if err := <-errc; err != nil { t.Fatalf("apply failed (%v)", err) } } // ensure sum matches initial sum sum := 0 for _, oldkey := range keys { rk, err := etcdc.Get(context.TODO(), oldkey) if err != nil { t.Fatalf("couldn't fetch key %s (%v)", oldkey, err) } v, _ := strconv.ParseInt(string(rk.Kvs[0].Value), 10, 64) sum += int(v) } if sum != len(keys)*100 { t.Fatalf("bad sum. got %d, expected %d", sum, len(keys)*100) } }
explode_data.jsonl/51049
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 773 }
[ 2830, 3393, 35849, 57974, 1155, 353, 8840, 836, 8, 341, 197, 4163, 1669, 1532, 28678, 53, 18, 1155, 11, 609, 28678, 2648, 90, 1695, 25, 220, 18, 3518, 16867, 1185, 355, 836, 261, 34016, 1155, 692, 197, 295, 72026, 1669, 1185, 355, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestRunCallbacks(t *testing.T) { p := Product{Code: "unique_code", Price: 100} DB.Save(&p) if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 1, 0, 0, 0, 0}) { t.Errorf("Callbacks should be invoked successfully, %v", p.GetCallTimes()) } DB.Where("Code = ?", "unique_code").First(&p) if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 0, 0, 0, 0, 1}) { t.Errorf("After callbacks values are not saved, %v", p.GetCallTimes()) } p.Price = 200 DB.Save(&p) if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 1, 1, 0, 0, 1}) { t.Errorf("After update callbacks should be invoked successfully, %v", p.GetCallTimes()) } var products []Product DB.Find(&products, "code = ?", "unique_code") if products[0].AfterFindCallTimes != 2 { t.Errorf("AfterFind callbacks should work with slice") } DB.Where("Code = ?", "unique_code").First(&p) if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 0, 0, 2}) { t.Errorf("After update callbacks values are not saved, %v", p.GetCallTimes()) } DB.Delete(&p) if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 1, 1, 2}) { t.Errorf("After delete callbacks should be invoked successfully, %v", p.GetCallTimes()) } if DB.Where("Code = ?", "unique_code").First(&p).Error == nil { t.Errorf("Can't find a deleted record") } }
explode_data.jsonl/13358
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 538 }
[ 2830, 3393, 6727, 44461, 1155, 353, 8840, 836, 8, 341, 3223, 1669, 5643, 90, 2078, 25, 330, 9587, 4136, 497, 8483, 25, 220, 16, 15, 15, 532, 45409, 13599, 2099, 79, 692, 743, 753, 34913, 94750, 1295, 2234, 7220, 18889, 1507, 3056, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8