text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func TestAsyncCommitCalTSFail(t *testing.T) {
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.SafeWindow = time.Second
conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0
})
store, clean := createMockStoreAndSetup(t)
defer clean()
tk := createAsyncCommitTestKit(t, store)
tk2 := createAsyncCommitTestKit(t, store)
tk.MustExec("drop table if exists tk")
tk.MustExec("create table tk (c1 int primary key, c2 int)")
tk.MustExec("insert into tk values (1, 1)")
tk.MustExec("set tidb_enable_1pc = true")
tk.MustExec("begin pessimistic")
tk.MustQuery("select * from tk for update").Check(testkit.Rows("1 1"))
require.NoError(t, failpoint.Enable("tikvclient/failCheckSchemaValid", "return"))
require.Error(t, tk.ExecToErr("commit"))
require.NoError(t, failpoint.Disable("tikvclient/failCheckSchemaValid"))
// The lock should not be blocked.
tk2.MustExec("set innodb_lock_wait_timeout = 5")
tk2.MustExec("begin pessimistic")
tk2.MustExec("update tk set c2 = c2 + 1")
tk2.MustExec("commit")
} | explode_data.jsonl/12504 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 394
} | [
2830,
3393,
6525,
33441,
8851,
9951,
19524,
1155,
353,
8840,
836,
8,
341,
16867,
2193,
31129,
460,
9626,
368,
741,
25873,
16689,
11646,
18552,
29879,
353,
1676,
10753,
8,
341,
197,
67850,
836,
72,
82707,
2959,
44119,
33441,
89828,
4267,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMiddlewareShouldInterceptLinkHeader(t *testing.T) {
// given
request, err := http.NewRequest(http.MethodGet, "/index.html", nil)
writer := httptest.NewRecorder()
if err != nil {
t.Fatalf("Could not create HTTP request: %v", err)
}
middleware := Middleware{
Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
w.Header().Add("Link", "</index.css>; rel=preload; as=stylesheet;")
w.Header().Add("Link", "</index2.css>; rel=preload; as=stylesheet;")
w.Header().Add("Link", "")
w.Header().Add("Link", "</index3.css>")
w.Header().Add("Link", "</index4.css>; rel=preload; nopush")
return 0, nil
}),
Rules: []Rule{},
}
pushingWriter := &MockedPusher{ResponseWriter: writer}
// when
_, err2 := middleware.ServeHTTP(pushingWriter, request)
// then
if err2 != nil {
t.Error("Should not return error")
}
expectedPushedResources := map[string]*http.PushOptions{
"/index.css": {
Method: http.MethodGet,
Header: http.Header{},
},
"/index2.css": {
Method: http.MethodGet,
Header: http.Header{},
},
"/index3.css": {
Method: http.MethodGet,
Header: http.Header{},
},
}
comparePushedResources(t, expectedPushedResources, pushingWriter.pushed)
} | explode_data.jsonl/66104 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 495
} | [
2830,
3393,
24684,
14996,
3306,
1484,
3939,
4047,
1155,
353,
8840,
836,
8,
341,
197,
322,
2661,
198,
23555,
11,
1848,
1669,
1758,
75274,
19886,
20798,
1949,
11,
3521,
1252,
2564,
497,
2092,
340,
38959,
1669,
54320,
70334,
7121,
47023,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReportFunctions(t *testing.T) {
t.Run("Failed if ctx.Err() == nil", func(t *testing.T) {
cb := circuitbreaker.New(nil)
cb.FailWithContext(context.Background())
assert.Equal(t, int64(1), cb.Counters().Failures)
})
t.Run("ctx.Err() == context.Canceled", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
cb := circuitbreaker.New(nil)
cb.FailWithContext(ctx)
assert.Equal(t, int64(0), cb.Counters().Failures)
cb = circuitbreaker.New(&circuitbreaker.Options{FailOnContextCancel: true})
cb.FailWithContext(ctx)
assert.Equal(t, int64(1), cb.Counters().Failures)
})
t.Run("ctx.Err() == context.DeadlineExceeded", func(t *testing.T) {
ctx, cancel := context.WithDeadline(context.Background(), time.Time{})
defer cancel()
cb := circuitbreaker.New(nil)
cb.FailWithContext(ctx)
assert.Equal(t, int64(0), cb.Counters().Failures)
cb = circuitbreaker.New(&circuitbreaker.Options{FailOnContextDeadline: true})
cb.FailWithContext(ctx)
assert.Equal(t, int64(1), cb.Counters().Failures)
})
} | explode_data.jsonl/8223 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 420
} | [
2830,
3393,
10361,
25207,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
9408,
421,
5635,
27862,
368,
621,
2092,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
63810,
1669,
16224,
64121,
7121,
27907,
340,
197,
63810,
57243,
91101,
5378,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestWaitOnPermit(t *testing.T) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod",
UID: types.UID("pod"),
},
}
tests := []struct {
name string
action func(f framework.Framework)
want *framework.Status
}{
{
name: "Reject Waiting Pod",
action: func(f framework.Framework) {
f.GetWaitingPod(pod.UID).Reject(permitPlugin, "reject message")
},
want: framework.NewStatus(framework.Unschedulable, "reject message").WithFailedPlugin(permitPlugin),
},
{
name: "Allow Waiting Pod",
action: func(f framework.Framework) {
f.GetWaitingPod(pod.UID).Allow(permitPlugin)
},
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
testPermitPlugin := &TestPermitPlugin{}
r := make(Registry)
r.Register(permitPlugin,
func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
return testPermitPlugin, nil
})
plugins := &config.Plugins{
Permit: config.PluginSet{Enabled: []config.Plugin{{Name: permitPlugin, Weight: 1}}},
}
f, err := newFrameworkWithQueueSortAndBind(r, plugins, emptyArgs)
if err != nil {
t.Fatalf("Failed to create framework for testing: %v", err)
}
runPermitPluginsStatus := f.RunPermitPlugins(context.Background(), nil, pod, "")
if runPermitPluginsStatus.Code() != framework.Wait {
t.Fatalf("Expected RunPermitPlugins to return status %v, but got %v",
framework.Wait, runPermitPluginsStatus.Code())
}
go tt.action(f)
got := f.WaitOnPermit(context.Background(), pod)
if !reflect.DeepEqual(tt.want, got) {
t.Errorf("Unexpected status: want %v, but got %v", tt.want, got)
}
})
}
} | explode_data.jsonl/35751 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 694
} | [
2830,
3393,
14190,
1925,
3889,
1763,
1155,
353,
8840,
836,
8,
341,
3223,
347,
1669,
609,
85,
16,
88823,
515,
197,
23816,
12175,
25,
77520,
16,
80222,
515,
298,
21297,
25,
330,
39073,
756,
298,
197,
6463,
25,
220,
4494,
5255,
915,
44... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBuilder_BuildQuery(t *testing.T) {
t.Run("with-range-query", func(t *testing.T) {
const expectedQueryStringWithRange = "test_metric{test_label1=\"test_value1\",test_label2=\"test_value2\"}[5s]"
assert.Equal(t, expectedQueryStringWithRange, testBuilderWithRangeQuery.BuildQuery())
})
t.Run("without-range-query", func(t *testing.T) {
const expectedQueryStringWithoutRange = "test_metric{test_label1=\"test_value1\",test_label2=\"test_value2\"}"
assert.Equal(t, expectedQueryStringWithoutRange, testBuilderWithoutRangeQuery.BuildQuery())
})
} | explode_data.jsonl/20479 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 192
} | [
2830,
3393,
3297,
96686,
2859,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
4197,
30508,
65489,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
4777,
3601,
67001,
2354,
6046,
284,
330,
1944,
41294,
90,
1944,
6106,
16,
4070,
1944,
314... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetReplicaStatus(t *testing.T) {
tests := map[string]struct {
expectedOutput string
Volume VolumeInfo
}{
"Fetching ReplicaStatus from openebs.io/replica-status": {
Volume: VolumeInfo{
Volume: v1alpha1.CASVolume{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"openebs.io/replica-status": "running, running, running",
},
},
},
},
expectedOutput: "running, running, running",
},
"Fetching ReplicaStatus from vsm.openebs.io/replica-status": {
Volume: VolumeInfo{
Volume: v1alpha1.CASVolume{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"vsm.openebs.io/replica-status": "running, running, running",
},
},
},
},
expectedOutput: "running, running, running",
},
"Fetching Replica status when both keys are present": {
Volume: VolumeInfo{
Volume: v1alpha1.CASVolume{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"openebs.io/replica-status": "running, running, running",
"vsm.openebs.io/replica-status": "running, running, running",
},
},
},
},
expectedOutput: "running, running, running",
},
"Fetching ReplicaStatus when no key is present": {
Volume: VolumeInfo{
Volume: v1alpha1.CASVolume{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{},
},
},
},
expectedOutput: "",
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
got := tt.Volume.GetReplicaStatus()
if got != tt.expectedOutput {
t.Fatalf("Test: %v Expected: %v but got: %v", name, tt.expectedOutput, got)
}
})
}
} | explode_data.jsonl/78053 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 750
} | [
2830,
3393,
1949,
18327,
15317,
2522,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
2415,
14032,
60,
1235,
341,
197,
42400,
5097,
914,
198,
197,
17446,
4661,
260,
20265,
1731,
198,
197,
59403,
197,
197,
1,
52416,
94036,
2522,
504,
1787,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestHeartbeat(t *testing.T) {
f := heartbeatFixture{t: t}
defer f.Cleanup()
f.Bootstrap()
f.Grow()
f.Grow()
leader := f.Leader()
leaderState := f.State(leader)
// Artificially mark all nodes as down
err := leaderState.Cluster.Transaction(func(tx *db.ClusterTx) error {
nodes, err := tx.Nodes()
require.NoError(t, err)
for _, node := range nodes {
err := tx.NodeHeartbeat(node.Address, time.Now().Add(-time.Minute))
require.NoError(t, err)
}
return nil
})
require.NoError(t, err)
// Perform the heartbeat requests.
leader.Cluster = leaderState.Cluster
heartbeat, _ := cluster.HeartbeatTask(leader)
ctx := context.Background()
heartbeat(ctx)
// The heartbeat timestamps of all nodes got updated
err = leaderState.Cluster.Transaction(func(tx *db.ClusterTx) error {
nodes, err := tx.Nodes()
require.NoError(t, err)
offlineThreshold, err := tx.NodeOfflineThreshold()
require.NoError(t, err)
for _, node := range nodes {
assert.False(t, node.IsOffline(offlineThreshold))
}
return nil
})
require.NoError(t, err)
} | explode_data.jsonl/64684 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 404
} | [
2830,
3393,
45384,
22227,
1155,
353,
8840,
836,
8,
341,
1166,
1669,
52105,
18930,
90,
83,
25,
259,
532,
16867,
282,
727,
60639,
2822,
1166,
13,
45511,
741,
1166,
1224,
651,
741,
1166,
1224,
651,
2822,
197,
37391,
1669,
282,
11824,
998... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestSetSheetBackgroundErrors(t *testing.T) {
f, err := OpenFile(filepath.Join("test", "Book1.xlsx"))
if !assert.NoError(t, err) {
t.FailNow()
}
err = f.SetSheetBackground("Sheet2", filepath.Join("test", "not_exists", "not_exists.png"))
if assert.Error(t, err) {
assert.True(t, os.IsNotExist(err), "Expected os.IsNotExists(err) == true")
}
err = f.SetSheetBackground("Sheet2", filepath.Join("test", "Book1.xlsx"))
assert.EqualError(t, err, "unsupported image extension")
} | explode_data.jsonl/36966 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 199
} | [
2830,
3393,
1649,
10541,
8706,
13877,
1155,
353,
8840,
836,
8,
341,
1166,
11,
1848,
1669,
5264,
1703,
34793,
22363,
445,
1944,
497,
330,
7134,
16,
46838,
5455,
743,
753,
2207,
35699,
1155,
11,
1848,
8,
341,
197,
3244,
57243,
7039,
741... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestRouter_Redirect_RegistersInternalRedirections(t *testing.T) {
mainRouter := NewRouter()
_ = mainRouter.Redirect("/from", "/to", http.StatusMovedPermanently)
req, _ := http.NewRequest(http.MethodGet, "/from", nil)
getResponse := httptest.NewRecorder()
mainRouter.ServeHTTP(getResponse, req)
assertEqual(t, http.StatusMovedPermanently, getResponse.Code)
assertStringContains(t, "/to", getResponse.Header().Get("Location"))
} | explode_data.jsonl/31754 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 152
} | [
2830,
3393,
9523,
92940,
1226,
49384,
9303,
11569,
6033,
2866,
82,
1155,
353,
8840,
836,
8,
341,
36641,
9523,
1669,
1532,
9523,
2822,
197,
62,
284,
1887,
9523,
38869,
4283,
1499,
497,
3521,
983,
497,
1758,
10538,
53232,
3889,
1515,
4402... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEnvVarsToMap(t *testing.T) {
vars := []EnvVar{
{
Name: "foo",
Value: "bar",
},
{
Name: "zoo",
Value: "baz",
},
}
varMap := EnvVarsToMap(vars)
if e, a := len(vars), len(varMap); e != a {
t.Errorf("Unexpected map length; expected: %d, got %d", e, a)
}
if a := varMap["foo"]; a != "bar" {
t.Errorf("Unexpected value of key 'foo': %v", a)
}
if a := varMap["zoo"]; a != "baz" {
t.Errorf("Unexpected value of key 'zoo': %v", a)
}
} | explode_data.jsonl/18698 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 237
} | [
2830,
3393,
14359,
28305,
1249,
2227,
1155,
353,
8840,
836,
8,
341,
2405,
82,
1669,
3056,
14359,
3962,
515,
197,
197,
515,
298,
21297,
25,
220,
330,
7975,
756,
298,
47399,
25,
330,
2257,
756,
197,
197,
1583,
197,
197,
515,
298,
2129... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestQuerierWithBoundaryChunks(t *testing.T) {
db, delete := openTestDB(t, nil)
defer func() {
testutil.Ok(t, db.Close())
delete()
}()
app := db.Appender()
blockRange := DefaultOptions.BlockRanges[0]
label := labels.FromStrings("foo", "bar")
for i := int64(0); i < 5; i++ {
_, err := app.Add(label, i*blockRange, 0)
testutil.Ok(t, err)
}
err := app.Commit()
testutil.Ok(t, err)
err = db.compact()
testutil.Ok(t, err)
testutil.Assert(t, len(db.blocks) >= 3, "invalid test, less than three blocks in DB")
q, err := db.Querier(blockRange, 2*blockRange)
testutil.Ok(t, err)
defer q.Close()
// The requested interval covers 2 blocks, so the querier should contain 2 blocks.
count := len(q.(*querier).blocks)
testutil.Assert(t, count == 2, "expected 2 blocks in querier, got %d", count)
} | explode_data.jsonl/64383 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 325
} | [
2830,
3393,
2183,
261,
1268,
2354,
57977,
89681,
1155,
353,
8840,
836,
8,
341,
20939,
11,
3698,
1669,
1787,
2271,
3506,
1155,
11,
2092,
340,
16867,
2915,
368,
341,
197,
18185,
1314,
54282,
1155,
11,
2927,
10421,
2398,
197,
15618,
741,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSchemaWriteNoPrefixNotRequired(t *testing.T) {
conn, cleanup, _ := testserver.NewTestServer(require.New(t), 0, memdb.DisableGC, 0, false, testfixtures.EmptyDatastore)
t.Cleanup(cleanup)
client := v1alpha1.NewSchemaServiceClient(conn)
resp, err := client.WriteSchema(context.Background(), &v1alpha1.WriteSchemaRequest{
Schema: `definition user {}`,
})
require.NoError(t, err)
rev, err := nspkg.DecodeV1Alpha1Revision(resp.ComputedDefinitionsRevision)
require.NoError(t, err)
require.Len(t, rev, 1)
} | explode_data.jsonl/54542 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 192
} | [
2830,
3393,
8632,
7985,
2753,
14335,
2623,
8164,
1155,
353,
8840,
836,
8,
341,
32917,
11,
21290,
11,
716,
1669,
1273,
4030,
7121,
2271,
5475,
23482,
7121,
1155,
701,
220,
15,
11,
1833,
1999,
10166,
480,
22863,
11,
220,
15,
11,
895,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSendAlertRuleDoesntExist(t *testing.T) {
sqsMock := &mockSqs{}
sqsClient = sqsMock
mockRoundTripper := &mockRoundTripper{}
httpClient = &http.Client{Transport: mockRoundTripper}
mockRoundTripper.On("RoundTrip", mock.Anything).Return(generateResponse(testRuleResponse, http.StatusNotFound), nil).Once()
assert.NoError(t, SendAlert(testAlertDedupEvent))
sqsMock.AssertExpectations(t)
mockRoundTripper.AssertExpectations(t)
} | explode_data.jsonl/39038 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 165
} | [
2830,
3393,
11505,
9676,
11337,
21468,
406,
25613,
1155,
353,
8840,
836,
8,
341,
1903,
26358,
11571,
1669,
609,
16712,
50,
26358,
16094,
1903,
26358,
2959,
284,
18031,
82,
11571,
271,
77333,
27497,
21884,
6922,
1669,
609,
16712,
27497,
21... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIsOdd(t *testing.T) {
tests := []struct {
in string //十六进制编码值
expected bool //预期的奇怪
}{
{"0", false},
{"1", true},
{"2", false},
//2 ^ 32—1
{"ffffffff", true},
//2 ^ 64—2
{"fffffffffffffffe", false},
//SECP256K1质数
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", true},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal).SetHex(test.in)
result := f.IsOdd()
if result != test.expected {
t.Errorf("fieldVal.IsOdd #%d wrong result\n"+
"got: %v\nwant: %v", i, result, test.expected)
continue
}
}
} | explode_data.jsonl/360 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 294
} | [
2830,
3393,
3872,
67389,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
258,
981,
914,
442,
102853,
41299,
43316,
112950,
25511,
198,
7325,
1807,
256,
442,
104394,
9370,
106097,
198,
197,
59403,
197,
197,
4913,
15,
497,
895... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestMeshEmitter(t *testing.T) {
assert := asserts.NewTesting(t, asserts.FailStop)
ctx, cancel := context.WithCancel(context.Background())
sigc := make(chan interface{})
behaviorFunc := func(cell mesh.Cell, in mesh.Receptor, out mesh.Emitter) error {
i := 0
for {
select {
case <-cell.Context().Done():
return nil
case evt := <-in.Pull():
i++
if evt.Topic() == "get-i" {
sigc <- i
}
}
}
}
msh := mesh.New(ctx)
emtr, err := msh.Emitter("testing")
assert.ErrorContains(err, "cell 'testing' does not exist")
msh.Go("testing", mesh.BehaviorFunc(behaviorFunc))
emtr, err = msh.Emitter("testing")
assert.NoError(err)
emtr.Emit("one")
emtr.Emit("two")
emtr.Emit("three")
emtr.Emit("get-i")
assert.Wait(sigc, 4, time.Second)
cancel()
} | explode_data.jsonl/18924 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 341
} | [
2830,
3393,
14194,
21971,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
56776,
7121,
16451,
1155,
11,
56776,
57243,
10674,
340,
20985,
11,
9121,
1669,
2266,
26124,
9269,
5378,
19047,
2398,
84841,
66,
1669,
1281,
35190,
3749,
37790,
197,
55866... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestLoopInTimeout(t *testing.T) {
testAmt := int64(testLoopInRequest.Amount)
t.Run("internal htlc", func(t *testing.T) {
testLoopInTimeout(t, 0)
})
t.Run("external htlc", func(t *testing.T) {
testLoopInTimeout(t, testAmt)
})
t.Run("external amount too high", func(t *testing.T) {
testLoopInTimeout(t, testAmt+1)
})
t.Run("external amount too low", func(t *testing.T) {
testLoopInTimeout(t, testAmt-1)
})
} | explode_data.jsonl/10214 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 181
} | [
2830,
3393,
14620,
641,
7636,
1155,
353,
8840,
836,
8,
341,
18185,
81278,
1669,
526,
21,
19,
8623,
14620,
641,
1900,
62192,
340,
3244,
16708,
445,
10481,
305,
11544,
66,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
18185,
14620,
641,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_Handler_CheckConvert(t *testing.T) {
var handlerType func(*mockContext) error
var converterCalled, convertedHandlerCalled bool
var convert Converter = func(f GenericHandlerFunc) interface{} {
converterCalled = true
return func(ctx *mockContext) error {
convertedHandlerCalled = true
return f(ctx)
}
}
h, err := New(pipesMock, tMock, convert)
handler := h.Handler()
err = handler.(func(*mockContext) error)(&mockContext{})
assert.NoError(t, err)
assert.True(t, converterCalled)
assert.True(t, convertedHandlerCalled)
assert.IsType(t, handler, handlerType)
} | explode_data.jsonl/45770 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 209
} | [
2830,
3393,
41879,
28188,
12012,
1155,
353,
8840,
836,
8,
341,
2405,
7013,
929,
2915,
4071,
16712,
1972,
8,
1465,
271,
2405,
27058,
20960,
11,
16099,
3050,
20960,
1807,
271,
2405,
5508,
39328,
284,
2915,
955,
21281,
3050,
9626,
8,
3749,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEnclosingFunction(t *testing.T) {
tests := []struct {
input string // the input file
substr string // first occurrence of this string denotes interval
fn string // name of expected containing function
}{
// We use distinctive numbers as syntactic landmarks.
// Ordinary function:
{`package main
func f() { println(1003) }`,
"100", "main.f"},
// Methods:
{`package main
type T int
func (t T) f() { println(200) }`,
"200", "(main.T).f"},
// Function literal:
{`package main
func f() { println(func() { print(300) }) }`,
"300", "func@2.24"},
// Doubly nested
{`package main
func f() { println(func() { print(func() { print(350) })})}`,
"350", "func@2.39"},
// Implicit init for package-level var initializer.
{"package main; var a = 400", "400", "main.init"},
// No code for constants:
{"package main; const a = 500", "500", "(none)"},
// Explicit init()
{"package main; func init() { println(600) }", "600", "main.init$1"},
// Multiple explicit init functions:
{`package main
func init() { println("foo") }
func init() { println(800) }`,
"800", "main.init$2"},
// init() containing FuncLit.
{`package main
func init() { println(func(){print(900)}) }`,
"900", "func@2.27"},
}
for _, test := range tests {
imp := importer.New(new(importer.Config)) // (NB: no go/build.Config)
f, start, end := findInterval(t, imp.Fset, test.input, test.substr)
if f == nil {
continue
}
path, exact := importer.PathEnclosingInterval(f, start, end)
if !exact {
t.Errorf("EnclosingFunction(%q) not exact", test.substr)
continue
}
mainInfo := imp.CreatePackage("main", f)
prog := ssa.NewProgram(imp.Fset, 0)
if err := prog.CreatePackages(imp); err != nil {
t.Error(err)
continue
}
pkg := prog.Package(mainInfo.Pkg)
pkg.Build()
name := "(none)"
fn := ssa.EnclosingFunction(pkg, path)
if fn != nil {
name = fn.String()
}
if name != test.fn {
t.Errorf("EnclosingFunction(%q in %q) got %s, want %s",
test.substr, test.input, name, test.fn)
continue
}
// While we're here: test HasEnclosingFunction.
if has := ssa.HasEnclosingFunction(pkg, path); has != (fn != nil) {
t.Errorf("HasEnclosingFunction(%q in %q) got %v, want %v",
test.substr, test.input, has, fn != nil)
continue
}
}
} | explode_data.jsonl/40771 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 957
} | [
2830,
3393,
7408,
17831,
5152,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
22427,
220,
914,
442,
279,
1946,
1034,
198,
197,
28624,
495,
914,
442,
1156,
31559,
315,
419,
914,
71114,
9873,
198,
197,
40095,
257,
914,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestBalanceReconciliation(t *testing.T) {
var (
account = &types.AccountIdentifier{
Address: "blah",
}
subAccountMetadata2 = &types.AccountIdentifier{
Address: "blah",
SubAccount: &types.SubAccountIdentifier{
Address: "stake",
Metadata: map[string]interface{}{
"cool": float64(10),
},
},
}
currency = &types.Currency{
Symbol: "BLAH",
Decimals: 2,
}
currency2 = &types.Currency{
Symbol: "BLAH2",
Decimals: 4,
}
genesisBlock = &types.BlockIdentifier{
Hash: "0",
Index: 0,
}
newBlock = &types.BlockIdentifier{
Hash: "kdasdj",
Index: 123890,
}
)
ctx := context.Background()
newDir, err := utils.CreateTempDir()
assert.NoError(t, err)
defer utils.RemoveTempDir(newDir)
database, err := newTestBadgerDatabase(ctx, newDir)
assert.NoError(t, err)
defer database.Close(ctx)
storage := NewBalanceStorage(database)
mockHelper := &mocks.BalanceStorageHelper{}
mockHandler := &mocks.BalanceStorageHandler{}
mockHelper.On("Asserter").Return(baseAsserter())
mockHelper.On("ExemptFunc").Return(exemptFunc())
mockHelper.On("BalanceExemptions").Return([]*types.BalanceExemption{})
t.Run("test estimated before helper/handler", func(t *testing.T) {
coverage, err := storage.EstimatedReconciliationCoverage(ctx)
assert.Equal(t, float64(-1), coverage)
assert.True(t, errors.Is(err, storageErrs.ErrHelperHandlerMissing))
})
storage.Initialize(mockHelper, mockHandler)
t.Run("attempt to store reconciliation for non-existent account", func(t *testing.T) {
err := storage.Reconciled(ctx, account, currency, genesisBlock)
assert.NoError(t, err)
coverage, err := storage.ReconciliationCoverage(ctx, 0)
assert.NoError(t, err)
assert.Equal(t, 0.0, coverage)
})
t.Run("set balance", func(t *testing.T) {
txn := storage.db.Transaction(ctx)
newAccount, err := storage.UpdateBalance(
ctx,
txn,
&parser.BalanceChange{
Account: account,
Currency: currency,
Block: genesisBlock,
Difference: "100",
},
genesisBlock,
)
assert.True(t, newAccount)
assert.NoError(t, err)
assert.NoError(t, txn.Commit(ctx))
coverage, err := storage.ReconciliationCoverage(ctx, 0)
assert.NoError(t, err)
assert.Equal(t, 0.0, coverage)
})
t.Run("store reconciliation", func(t *testing.T) {
err := storage.Reconciled(ctx, account, currency, genesisBlock)
assert.NoError(t, err)
txn := storage.db.Transaction(ctx)
newAccount, err := storage.UpdateBalance(
ctx,
txn,
&parser.BalanceChange{
Account: account,
Currency: currency2,
Block: genesisBlock,
Difference: "200",
},
genesisBlock,
)
assert.True(t, newAccount)
assert.NoError(t, err)
assert.NoError(t, txn.Commit(ctx))
coverage, err := storage.ReconciliationCoverage(ctx, 0)
assert.NoError(t, err)
assert.Equal(t, 0.5, coverage)
coverage, err = storage.ReconciliationCoverage(ctx, 1)
assert.NoError(t, err)
assert.Equal(t, 0.0, coverage)
})
t.Run("update reconciliation", func(t *testing.T) {
err := storage.Reconciled(ctx, account, currency, newBlock)
assert.NoError(t, err)
coverage, err := storage.ReconciliationCoverage(ctx, 0)
assert.NoError(t, err)
assert.Equal(t, 0.5, coverage)
coverage, err = storage.ReconciliationCoverage(ctx, 1)
assert.NoError(t, err)
assert.Equal(t, 0.5, coverage)
})
t.Run("update reconciliation to old block", func(t *testing.T) {
err := storage.Reconciled(ctx, account, currency, genesisBlock)
assert.NoError(t, err)
coverage, err := storage.ReconciliationCoverage(ctx, 0)
assert.NoError(t, err)
assert.Equal(t, 0.5, coverage)
// We should skip update so this stays 0.5
coverage, err = storage.ReconciliationCoverage(ctx, 1)
assert.NoError(t, err)
assert.Equal(t, 0.5, coverage)
})
t.Run("add unreconciled", func(t *testing.T) {
txn := storage.db.Transaction(ctx)
newAccount, err := storage.UpdateBalance(
ctx,
txn,
&parser.BalanceChange{
Account: subAccountMetadata2,
Currency: currency2,
Block: newBlock,
Difference: "200",
},
newBlock,
)
assert.True(t, newAccount)
assert.NoError(t, err)
assert.NoError(t, txn.Commit(ctx))
coverage, err := storage.ReconciliationCoverage(ctx, 1)
assert.NoError(t, err)
assert.Equal(t, float64(1)/float64(3), coverage)
})
t.Run("test estimated no reconciliations", func(t *testing.T) {
mockHelper.On("AccountsReconciled", ctx, mock.Anything).Return(big.NewInt(0), nil).Once()
mockHelper.On("AccountsSeen", ctx, mock.Anything).Return(big.NewInt(0), nil).Once()
coverage, err := storage.EstimatedReconciliationCoverage(ctx)
assert.Equal(t, float64(0), coverage)
assert.NoError(t, err)
})
t.Run("test estimated some reconciliations", func(t *testing.T) {
mockHelper.On("AccountsReconciled", ctx, mock.Anything).Return(big.NewInt(1), nil).Once()
mockHelper.On("AccountsSeen", ctx, mock.Anything).Return(big.NewInt(2), nil).Once()
coverage, err := storage.EstimatedReconciliationCoverage(ctx)
assert.Equal(t, float64(0.5), coverage)
assert.NoError(t, err)
})
mockHelper.AssertExpectations(t)
mockHandler.AssertExpectations(t)
} | explode_data.jsonl/14456 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2124
} | [
2830,
3393,
21190,
693,
98240,
1155,
353,
8840,
836,
8,
341,
2405,
2399,
197,
86866,
284,
609,
9242,
30877,
8714,
515,
298,
98090,
25,
330,
70614,
756,
197,
197,
532,
197,
28624,
7365,
14610,
17,
284,
609,
9242,
30877,
8714,
515,
298,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSubValues(t *testing.T) {
m := map[string]interface{}{
"mock": map[string]interface{}{
"somekey": "${mock.flat.otherkey.value}",
"flat": map[string]interface{}{
"otherkey": map[string]interface{}{
"value": "mockReplaceValue",
},
},
},
}
subValues(m, m, nil)
testValue := m["mock"].(map[string]interface{})["somekey"]
assert.Equal(t, "mockReplaceValue", testValue)
} | explode_data.jsonl/4146 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 177
} | [
2830,
3393,
3136,
6227,
1155,
353,
8840,
836,
8,
341,
2109,
1669,
2415,
14032,
31344,
67066,
197,
197,
1,
16712,
788,
2415,
14032,
31344,
67066,
298,
197,
1,
14689,
792,
788,
10857,
16712,
46405,
47781,
792,
2824,
24375,
298,
197,
1,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTxExecutorPrepare(t *testing.T) {
txe, tsv, db := newTestTxExecutor(t)
defer db.Close()
defer tsv.StopService()
txid := newTxForPrep(tsv)
err := txe.Prepare(txid, "aa")
require.NoError(t, err)
err = txe.RollbackPrepared("aa", 1)
require.NoError(t, err)
// A retry should still succeed.
err = txe.RollbackPrepared("aa", 1)
require.NoError(t, err)
// A retry with no original id should also succeed.
err = txe.RollbackPrepared("aa", 0)
require.NoError(t, err)
} | explode_data.jsonl/25160 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 199
} | [
2830,
3393,
31584,
25255,
50590,
1155,
353,
8840,
836,
8,
341,
3244,
8371,
11,
259,
3492,
11,
2927,
1669,
501,
2271,
31584,
25255,
1155,
340,
16867,
2927,
10421,
741,
16867,
259,
3492,
30213,
1860,
741,
46237,
307,
1669,
501,
31584,
246... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStoreRecoverWithExpiration(t *testing.T) {
s := newStore()
s.clock = newFakeClock()
fc := newFakeClock()
var eidx uint64 = 4
s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent})
s.Create("/foo/x", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
s.Create("/foo/y", false, "baz", false, TTLOptionSet{ExpireTime: fc.Now().Add(5 * time.Millisecond)})
b, err := s.Save()
testutil.AssertNil(t, err)
time.Sleep(10 * time.Millisecond)
s2 := newStore()
s2.clock = fc
s2.Recovery(b)
fc.Advance(600 * time.Millisecond)
s.DeleteExpiredKeys(fc.Now())
e, err := s.Get("/foo/x", false, false)
testutil.AssertNil(t, err)
testutil.AssertEqual(t, e.EtcdIndex, eidx)
testutil.AssertEqual(t, *e.Node.Value, "bar")
e, err = s.Get("/foo/y", false, false)
testutil.AssertNotNil(t, err)
testutil.AssertNil(t, e)
} | explode_data.jsonl/44127 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 368
} | [
2830,
3393,
6093,
693,
3688,
2354,
66301,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
501,
6093,
741,
1903,
50546,
284,
501,
52317,
26104,
2822,
1166,
66,
1669,
501,
52317,
26104,
2822,
2405,
384,
6361,
2622,
21,
19,
284,
220,
19,
198,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReloadCACert(t *testing.T) {
testCases := map[string]struct {
gracePeriodRatio float32
minGracePeriod time.Duration
k8sCaCertFile string
dnsNames []string
secretNames []string
serviceNamespaces []string
expectFaill bool
expectChanged bool
}{
"reload from valid CA cert path": {
gracePeriodRatio: 0.6,
dnsNames: []string{"foo"},
secretNames: []string{"istio.webhook.foo"},
serviceNamespaces: []string{"foo.ns"},
k8sCaCertFile: "./test-data/example-ca-cert.pem",
expectFaill: false,
expectChanged: false,
},
}
for _, tc := range testCases {
client := fake.NewSimpleClientset()
wc, err := NewWebhookController(tc.gracePeriodRatio, tc.minGracePeriod,
client.CoreV1(), client.AdmissionregistrationV1beta1(), client.CertificatesV1beta1(),
tc.k8sCaCertFile, tc.secretNames, tc.dnsNames, tc.serviceNamespaces)
if err != nil {
t.Errorf("failed at creating webhook controller: %v", err)
continue
}
changed, err := reloadCACert(wc)
if tc.expectFaill {
if err == nil {
t.Errorf("should have failed at reloading CA cert")
}
continue
} else if err != nil {
t.Errorf("failed at reloading CA cert: %v", err)
continue
}
if tc.expectChanged {
if !changed {
t.Error("expect changed but not changed")
}
} else {
if changed {
t.Error("expect unchanged but changed")
}
}
}
} | explode_data.jsonl/18246 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 622
} | [
2830,
3393,
50035,
92832,
529,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
2415,
14032,
60,
1235,
341,
197,
90059,
578,
23750,
22777,
220,
2224,
18,
17,
198,
197,
25320,
86543,
23750,
262,
882,
33795,
198,
197,
16463,
23,
82,
22... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func TestRequestBodyTimeoutClosesConnection(t *testing.T) {
if testing.Short() {
t.Skip("skipping in -short mode")
}
defer afterTest(t)
for _, handler := range testHandlerBodyConsumers {
conn := &slowTestConn{
script: []interface{}{
"POST /public HTTP/1.1\r\n" +
"Host: test\r\n" +
"Content-Length: 10000\r\n" +
"\r\n",
"foo bar baz",
600 * time.Millisecond, // Request deadline should hit here
"GET /secret HTTP/1.1\r\n" +
"Host: test\r\n" +
"\r\n",
},
closec: make(chan bool, 1),
}
ls := &oneConnListener{conn}
var numReqs int
s := Server{
Handler: HandlerFunc(func(_ ResponseWriter, req *Request) {
numReqs++
if strings.Contains(req.URL.Path, "secret") {
t.Error("Request for /secret encountered, should not have happened.")
}
handler.f(req.Body)
}),
ReadTimeout: 400 * time.Millisecond,
}
go s.Serve(ls)
<-conn.closec
if numReqs != 1 {
t.Errorf("Handler %v: got %d reqs; want 1", handler.name, numReqs)
}
}
} | explode_data.jsonl/22420 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 462
} | [
2830,
3393,
33334,
7636,
34,
49341,
4526,
1155,
353,
8840,
836,
8,
341,
743,
7497,
55958,
368,
341,
197,
3244,
57776,
445,
4886,
5654,
304,
481,
8676,
3856,
1138,
197,
532,
16867,
1283,
2271,
1155,
340,
2023,
8358,
7013,
1669,
2088,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestDb_RecoverWithLargeJournal(t *testing.T) {
h := newDbHarness(t)
defer h.close()
h.put("big1", strings.Repeat("1", 200000))
h.put("big2", strings.Repeat("2", 200000))
h.put("small3", strings.Repeat("3", 10))
h.put("small4", strings.Repeat("4", 10))
h.tablesPerLevel("")
// Make sure that if we re-open with a small write buffer size that
// we flush table files in the middle of a large journal file.
h.o.WriteBuffer = 100000
h.reopenDB()
h.getVal("big1", strings.Repeat("1", 200000))
h.getVal("big2", strings.Repeat("2", 200000))
h.getVal("small3", strings.Repeat("3", 10))
h.getVal("small4", strings.Repeat("4", 10))
v := h.db.s.version()
if v.tLen(0) <= 1 {
t.Errorf("tables-0 less than one")
}
v.release()
} | explode_data.jsonl/6018 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 295
} | [
2830,
3393,
7994,
50693,
3688,
2354,
34253,
43494,
1155,
353,
8840,
836,
8,
341,
9598,
1669,
501,
7994,
74248,
1155,
340,
16867,
305,
4653,
2822,
9598,
3597,
445,
16154,
16,
497,
9069,
2817,
10979,
445,
16,
497,
220,
17,
15,
15,
15,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestColumnConstValue(t *testing.T) {
var tests = []struct {
inColumn ColumnConst
outValue string
}{
{
inColumn: ColumnConst{
payload: ColumnConstPayload{
Value: "foo",
},
},
outValue: "foo",
},
}
for _, test := range tests {
actual, _ := test.inColumn.Value(nil)
if actual != test.outValue {
t.Fatalf("Expected: %v\nActual: %v", test.outValue, actual)
}
}
} | explode_data.jsonl/60719 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 178
} | [
2830,
3393,
2933,
19167,
1130,
1155,
353,
8840,
836,
8,
341,
2405,
7032,
284,
3056,
1235,
341,
197,
17430,
2933,
9332,
19167,
198,
197,
13967,
1130,
914,
198,
197,
59403,
197,
197,
515,
298,
17430,
2933,
25,
9332,
19167,
515,
571,
762... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func Test_Decode_NotEnoughBytes(t *testing.T) {
dec := simple8b.NewDecoder([]byte{0})
if dec.Next() {
t.Fatalf("Expected Next to return false but it returned true")
}
} | explode_data.jsonl/78363 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 66
} | [
2830,
3393,
78668,
534,
60816,
95801,
7078,
1155,
353,
8840,
836,
8,
341,
197,
8169,
1669,
4285,
23,
65,
7121,
20732,
10556,
3782,
90,
15,
3518,
743,
1622,
18501,
368,
341,
197,
3244,
30762,
445,
18896,
9295,
311,
470,
895,
714,
432,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestBackupRestoreIncrementalTrucateTable(t *testing.T) {
defer leaktest.AfterTest(t)()
const numAccounts = 1
_, _, sqlDB, _, cleanupFn := BackupRestoreTestSetup(t, singleNode, numAccounts, InitNone)
defer cleanupFn()
sqlDB.Exec(t, `CREATE TABLE data.t (s string PRIMARY KEY)`)
full, inc := LocalFoo+"/full", LocalFoo+"/inc"
sqlDB.Exec(t, `INSERT INTO data.t VALUES ('before')`)
sqlDB.Exec(t, `BACKUP DATABASE data TO $1`, full)
sqlDB.Exec(t, `UPDATE data.t SET s = 'after'`)
sqlDB.Exec(t, `TRUNCATE data.t`)
sqlDB.Exec(t, "BACKUP DATABASE data TO $1 INCREMENTAL FROM $2", inc, full)
} | explode_data.jsonl/57612 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 252
} | [
2830,
3393,
56245,
56284,
38311,
278,
1282,
1754,
349,
2556,
1155,
353,
8840,
836,
8,
341,
16867,
23352,
1944,
36892,
2271,
1155,
8,
2822,
4777,
1629,
41369,
284,
220,
16,
198,
197,
6878,
8358,
5704,
3506,
11,
8358,
21290,
24911,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetNonAPIRequestInfo(t *testing.T) {
tests := map[string]struct {
url string
expected bool
}{
"simple groupless": {"/api/version/resource", true},
"simple group": {"/apis/group/version/resource/name/subresource", true},
"more steps": {"/api/version/resource/name/subresource", true},
"group list": {"/apis/batch/v1/job", true},
"group get": {"/apis/batch/v1/job/foo", true},
"group subresource": {"/apis/batch/v1/job/foo/scale", true},
"bad root": {"/not-api/version/resource", false},
"group without enough steps": {"/apis/extensions/v1beta1", false},
"group without enough steps 2": {"/apis/extensions/v1beta1/", false},
"not enough steps": {"/api/version", false},
"one step": {"/api", false},
"zero step": {"/", false},
"empty": {"", false},
}
resolver := newTestRequestInfoResolver()
for testName, tc := range tests {
req, _ := http.NewRequest("GET", tc.url, nil)
apiRequestInfo, err := resolver.NewRequestInfo(req)
if err != nil {
t.Errorf("%s: Unexpected error %v", testName, err)
}
if e, a := tc.expected, apiRequestInfo.IsResourceRequest; e != a {
t.Errorf("%s: expected %v, actual %v", testName, e, a)
}
}
} | explode_data.jsonl/46628 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 581
} | [
2830,
3393,
1949,
8121,
7082,
1900,
1731,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
2415,
14032,
60,
1235,
341,
197,
19320,
414,
914,
198,
197,
42400,
1807,
198,
197,
59403,
197,
197,
1,
22944,
1874,
1717,
788,
220,
314,
3115,
2068,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestGetDistrictFormat(t *testing.T) {
testObjects := []struct {
City string
IdDistrict string
Expected string
}{
{City: "testCity", IdDistrict: "testIdDistrict", Expected: "testCity_district_testIdDistrict"},
{City: "testCity2", IdDistrict: "testIdDistrict2", Expected: "testCity2_district_testIdDistrict2"},
}
for _, testObject := range testObjects {
actual := getFormatDistrict(testObject.City, testObject.IdDistrict)
if actual != testObject.Expected {
t.Errorf("Error actual = %v, expected %v\n", actual, testObject.Expected)
}
}
} | explode_data.jsonl/29029 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 204
} | [
2830,
3393,
1949,
47840,
4061,
1155,
353,
8840,
836,
8,
341,
18185,
11543,
1669,
3056,
1235,
341,
197,
6258,
487,
981,
914,
198,
197,
67211,
47840,
914,
198,
197,
197,
18896,
256,
914,
198,
197,
59403,
197,
197,
90,
12730,
25,
330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestEventsErrorFromServer(t *testing.T) {
client := &Client{
transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")),
}
_, err := client.Events(context.Background(), types.EventsOptions{})
if err == nil || err.Error() != "Error response from daemon: Server error" {
t.Fatalf("expected a Server Error, got %v", err)
}
} | explode_data.jsonl/29017 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 119
} | [
2830,
3393,
7900,
1454,
3830,
5475,
1155,
353,
8840,
836,
8,
341,
25291,
1669,
609,
2959,
515,
197,
197,
26445,
25,
501,
11571,
2959,
27907,
11,
1465,
11571,
19886,
66760,
11,
330,
5475,
1465,
30154,
197,
532,
197,
6878,
1848,
1669,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestADS1x15StartConnectError(t *testing.T) {
d, adaptor := initTestADS1015DriverWithStubbedAdaptor()
adaptor.Testi2cConnectErr(true)
gobottest.Assert(t, d.Start(), errors.New("Invalid i2c connection"))
} | explode_data.jsonl/42582 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 78
} | [
2830,
3393,
49541,
16,
87,
16,
20,
3479,
14611,
1454,
1155,
353,
8840,
836,
8,
341,
2698,
11,
91941,
1669,
2930,
2271,
49541,
16,
15,
16,
20,
11349,
2354,
33838,
2721,
2589,
32657,
741,
98780,
32657,
8787,
72,
17,
66,
14611,
7747,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestErrorRequiredNotSetWithDefault(t *testing.T) {
type config struct {
IsRequired string `env:"IS_REQUIRED,required" envDefault:"important"`
}
cfg := &config{}
assert.EqualError(t, Parse(cfg), "env: required environment variable \"IS_REQUIRED\" is not set")
} | explode_data.jsonl/78781 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 90
} | [
2830,
3393,
1454,
8164,
2623,
1649,
2354,
3675,
1155,
353,
8840,
836,
8,
341,
13158,
2193,
2036,
341,
197,
197,
58541,
914,
1565,
3160,
2974,
1637,
53912,
11,
6279,
1,
6105,
3675,
2974,
15333,
8805,
197,
630,
50286,
1669,
609,
1676,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInstrumentationLibraryMetrics_InstrumentationLibrary(t *testing.T) {
ms := NewInstrumentationLibraryMetrics()
ms.InitEmpty()
assert.EqualValues(t, true, ms.InstrumentationLibrary().IsNil())
ms.InstrumentationLibrary().InitEmpty()
assert.EqualValues(t, false, ms.InstrumentationLibrary().IsNil())
fillTestInstrumentationLibrary(ms.InstrumentationLibrary())
assert.EqualValues(t, generateTestInstrumentationLibrary(), ms.InstrumentationLibrary())
} | explode_data.jsonl/19503 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 140
} | [
2830,
3393,
56324,
367,
16915,
27328,
25972,
19474,
367,
16915,
1155,
353,
8840,
836,
8,
341,
47691,
1669,
1532,
56324,
367,
16915,
27328,
741,
47691,
26849,
3522,
741,
6948,
12808,
6227,
1155,
11,
830,
11,
9829,
5337,
19474,
367,
16915,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInlineFN(t *testing.T) {
e := Wrap(func() error {
return Wrap(func() error {
return Wrap(func() error {
return Wrap(func() error {
return New("Turtles").WithContext(" all the way down.").WithTrace()
}()).WithContext("%d", 1).WithTrace()
}()).WithContext("%d", 2).WithTrace()
}()).WithContext("%d", 3).WithTrace()
}()).WithContext("%d", 4).WithTrace()
et := e.(ErrMadNet)
fmt.Printf("%s\n", et)
} | explode_data.jsonl/54723 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 183
} | [
2830,
3393,
25324,
41604,
1155,
353,
8840,
836,
8,
341,
7727,
1669,
42187,
18552,
368,
1465,
341,
197,
853,
42187,
18552,
368,
1465,
341,
298,
853,
42187,
18552,
368,
1465,
341,
571,
853,
42187,
18552,
368,
1465,
341,
464,
853,
1532,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBatchObserve(t *testing.T) {
session := createSession(t)
defer session.Close()
if session.cfg.ProtoVersion == 1 {
t.Skip("atomic batches not supported. Please use Cassandra >= 2.0")
}
if err := createTable(session, `CREATE TABLE gocql_test.batch_observe_table (id int, other int, PRIMARY KEY (id))`); err != nil {
t.Fatal("create table:", err)
}
type observation struct {
observedErr error
observedKeyspace string
observedStmts []string
}
var observedBatch *observation
batch := session.NewBatch(LoggedBatch)
batch.Observer(funcBatchObserver(func(ctx context.Context, o ObservedBatch) {
if observedBatch != nil {
t.Fatal("batch observe called more than once")
}
observedBatch = &observation{
observedKeyspace: o.Keyspace,
observedStmts: o.Statements,
observedErr: o.Err,
}
}))
for i := 0; i < 100; i++ {
// hard coding 'i' into one of the values for better testing of observation
batch.Query(fmt.Sprintf(`INSERT INTO batch_observe_table (id,other) VALUES (?,%d)`, i), i)
}
if err := session.ExecuteBatch(batch); err != nil {
t.Fatal("execute batch:", err)
}
if observedBatch == nil {
t.Fatal("batch observation has not been called")
}
if len(observedBatch.observedStmts) != 100 {
t.Fatal("expecting 100 observed statements, got", len(observedBatch.observedStmts))
}
if observedBatch.observedErr != nil {
t.Fatal("not expecting to observe an error", observedBatch.observedErr)
}
if observedBatch.observedKeyspace != "gocql_test" {
t.Fatalf("expecting keyspace 'gocql_test', got %q", observedBatch.observedKeyspace)
}
for i, stmt := range observedBatch.observedStmts {
if stmt != fmt.Sprintf(`INSERT INTO batch_observe_table (id,other) VALUES (?,%d)`, i) {
t.Fatal("unexpected query", stmt)
}
}
} | explode_data.jsonl/11169 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 696
} | [
2830,
3393,
21074,
4121,
13267,
1155,
353,
8840,
836,
8,
341,
25054,
1669,
1855,
5283,
1155,
340,
16867,
3797,
10421,
2822,
743,
3797,
30481,
7763,
983,
5637,
621,
220,
16,
341,
197,
3244,
57776,
445,
6618,
44792,
537,
7248,
13,
5209,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGetVersion(t *testing.T) {
v, err := utils.GetLatestVersionFromGithub()
if err != nil {
t.Fatal(err)
}
_, err = semver.NewVersion(v)
if err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/37548 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 84
} | [
2830,
3393,
1949,
5637,
1155,
353,
8840,
836,
8,
341,
5195,
11,
1848,
1669,
12439,
2234,
31992,
5637,
3830,
78717,
741,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
630,
197,
6878,
1848,
284,
5234,
423,
7121,
5637,
374... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 3 |
func TestInstance_Clone(t *testing.T) {
g := NewGomegaWithT(t)
inst := collection.New(basicmeta.K8SCollection1)
inst.Set(data.EntryN1I1V1)
inst.Set(data.EntryN2I2V2)
inst2 := inst.Clone()
g.Expect(inst2.Size()).To(Equal(2))
g.Expect(inst2.Generation()).To(Equal(int64(2)))
var fe []*resource.Instance
inst2.ForEach(func(r *resource.Instance) bool {
fe = append(fe, r)
return true
})
g.Expect(fe).To(HaveLen(2))
inst.Remove(data.EntryN1I1V1.Metadata.FullName)
g.Expect(inst2.Size()).To(Equal(2))
g.Expect(inst2.Generation()).To(Equal(int64(2)))
fe = nil
inst2.ForEach(func(r *resource.Instance) bool {
fe = append(fe, r)
return true
})
g.Expect(fe).To(HaveLen(2))
} | explode_data.jsonl/70419 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 318
} | [
2830,
3393,
2523,
85110,
603,
1155,
353,
8840,
836,
8,
341,
3174,
1669,
1532,
38,
32696,
2354,
51,
1155,
692,
88656,
1669,
4426,
7121,
1883,
5971,
5490,
11352,
23,
3540,
1908,
16,
340,
88656,
4202,
2592,
22330,
45,
16,
40,
16,
53,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestImageSize1(t *testing.T) {
src, _ := NewImage(1, 1, FilterNearest)
dst, _ := NewImage(1, 1, FilterNearest)
src.Fill(color.White)
dst.DrawImage(src, nil)
got := src.At(0, 0).(color.RGBA)
want := color.RGBA{0xff, 0xff, 0xff, 0xff}
if !sameColors(got, want, 1) {
t.Errorf("got: %#v, want: %#v", got, want)
}
} | explode_data.jsonl/10904 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 153
} | [
2830,
3393,
1906,
1695,
16,
1155,
353,
8840,
836,
8,
341,
41144,
11,
716,
1669,
1532,
1906,
7,
16,
11,
220,
16,
11,
12339,
8813,
15432,
340,
52051,
11,
716,
1669,
1532,
1906,
7,
16,
11,
220,
16,
11,
12339,
8813,
15432,
340,
41144,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestNamedTypeField(t *testing.T) {
type foo struct {
ID
}
f := &foo{ID: 5}
g := &foo{ID: 10}
_, err := starlight.Eval([]byte(`f.ID = g.ID`), map[string]interface{}{"f": f, "g": g}, nil)
if err != nil {
t.Fatal(err)
}
if f.ID != ID(10) {
t.Fatalf("expected %v, but got %v", ID(10), f.ID)
}
} | explode_data.jsonl/47074 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 155
} | [
2830,
3393,
15810,
929,
1877,
1155,
353,
8840,
836,
8,
341,
13158,
15229,
2036,
341,
197,
29580,
198,
197,
532,
1166,
1669,
609,
7975,
90,
915,
25,
220,
20,
532,
3174,
1669,
609,
7975,
90,
915,
25,
220,
16,
15,
532,
197,
6878,
184... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func Test_doFailToWriteSequenceNumber(t *testing.T) {
ctx := log.NewSyncLogger(log.NewLogfmtLogger(os.Stdout))
mm := createMockVMExtensionEnvironmentManager()
mm.setSequenceNumberError = extensionerrors.ErrMustRunAsAdmin
ii, _ := GetInitializationInfo("yaba", "5.0", true, testEnableCallback)
ext, _ := getVMExtensionInternal(ctx, ii, mm)
// We log but continue if we fail to write the sequence number
oldArgs := os.Args
defer putBackArgs(oldArgs)
os.Args = make([]string, 2)
os.Args[0] = "dontcare"
os.Args[1] = "enable"
ext.Do(ctx)
} | explode_data.jsonl/18591 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 202
} | [
2830,
3393,
26309,
19524,
1249,
7985,
14076,
2833,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
1487,
7121,
12154,
7395,
12531,
7121,
2201,
12501,
7395,
9638,
83225,
1171,
2109,
76,
1669,
1855,
11571,
11187,
12049,
12723,
2043,
741,
2109,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReplaceHook_UpdateHook(t *testing.T) {
controller := gomock.NewController(t)
defer controller.Finish()
hooks := []*scm.Hook{
{
ID: "1",
Target: "https://drone.company.com/hook",
},
}
hookInput := &scm.HookInput{
Target: "https://drone.company.com/hook",
}
remote := mockscm.NewMockRepositoryService(controller)
remote.EXPECT().ListHooks(gomock.Any(), "octocat/hello-world", gomock.Any()).Return(hooks, nil, nil)
remote.EXPECT().DeleteHook(gomock.Any(), "octocat/hello-world", "1").Return(nil, nil)
remote.EXPECT().CreateHook(gomock.Any(), "octocat/hello-world", hookInput).Return(nil, nil, nil)
client := new(scm.Client)
client.Repositories = remote
err := replaceHook(context.Background(), client, "octocat/hello-world", hookInput)
if err != nil {
t.Error(err)
}
} | explode_data.jsonl/40651 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 321
} | [
2830,
3393,
23107,
31679,
47393,
31679,
1155,
353,
8840,
836,
8,
341,
61615,
1669,
342,
316,
1176,
7121,
2051,
1155,
340,
16867,
6461,
991,
18176,
2822,
9598,
14685,
1669,
29838,
2388,
76,
3839,
1941,
515,
197,
197,
515,
298,
29580,
25,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestIncreaseWindowOnRead(t *testing.T) {
// This test ensures that the endpoint sends an ack,
// after read() when the window grows by more than 1 MSS.
c := context.New(t, defaultMTU)
defer c.Cleanup()
const rcvBuf = 65535 * 10
c.CreateConnected(context.TestInitialSequenceNumber, 30000, rcvBuf)
// Write chunks of ~30000 bytes. It's important that two
// payloads make it equal or longer than MSS.
remain := rcvBuf * 2
sent := 0
data := make([]byte, defaultMTU/2)
iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
for remain > len(data) {
c.SendPacket(data, &context.Headers{
SrcPort: context.TestPort,
DstPort: c.Port,
Flags: header.TCPFlagAck,
SeqNum: iss.Add(seqnum.Size(sent)),
AckNum: c.IRS.Add(1),
RcvWnd: 30000,
})
sent += len(data)
remain -= len(data)
pkt := c.GetPacket()
checker.IPv4(t, pkt,
checker.PayloadLen(header.TCPMinimumSize),
checker.TCP(
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(uint32(iss)+uint32(sent)),
checker.TCPFlags(header.TCPFlagAck),
),
)
// Break once the window drops below defaultMTU/2
if wnd := header.TCP(header.IPv4(pkt).Payload()).WindowSize(); wnd < defaultMTU/2 {
break
}
}
// We now have < 1 MSS in the buffer space. Read at least > 2 MSS
// worth of data as receive buffer space
w := tcpip.LimitedWriter{
W: ioutil.Discard,
// defaultMTU is a good enough estimate for the MSS used for this
// connection.
N: defaultMTU * 2,
}
for w.N != 0 {
_, err := c.EP.Read(&w, tcpip.ReadOptions{})
if err != nil {
t.Fatalf("Read failed: %s", err)
}
}
// After reading > MSS worth of data, we surely crossed MSS. See the ack:
checker.IPv4(t, c.GetPacket(),
checker.PayloadLen(header.TCPMinimumSize),
checker.TCP(
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(uint32(iss)+uint32(sent)),
checker.TCPWindow(uint16(0xffff)),
checker.TCPFlags(header.TCPFlagAck),
),
)
} | explode_data.jsonl/76028 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 869
} | [
2830,
3393,
69556,
4267,
1925,
4418,
1155,
353,
8840,
836,
8,
341,
197,
322,
1096,
1273,
25351,
429,
279,
14887,
21308,
458,
10725,
345,
197,
322,
1283,
1349,
368,
979,
279,
3241,
27715,
553,
803,
1091,
220,
16,
91649,
624,
1444,
1669... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestCopyDataFrom(t *testing.T) {
config := NewConfig()
config.CopyDataFrom = "copy_data_from"
mysqld, err := NewMysqld(config)
if err != nil {
t.Errorf("Failed to start mysqld: %s", err)
return
}
defer mysqld.Stop()
db, err := sql.Open("mysql", mysqld.Datasource("test", "", "", 0))
if err != nil {
t.Errorf("Failed to connect to database: %s", err)
return
}
rows, err := db.Query("select id,str from test.hello order by id")
if err != nil {
t.Errorf("Failed to fetch data: %s", err)
return
}
var id int
var str string
rows.Next()
rows.Scan(&id, &str)
if id != 1 || str != "hello" {
t.Errorf("Data do not match, got (id:%d str:%s)", id, str)
return
}
rows.Next()
rows.Scan(&id, &str)
if id != 2 || str != "ciao" {
t.Errorf("Data do not match, got (id:%d str:%s)", id, str)
return
}
} | explode_data.jsonl/77525 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 357
} | [
2830,
3393,
12106,
1043,
3830,
1155,
353,
8840,
836,
8,
341,
25873,
1669,
1532,
2648,
741,
25873,
31770,
1043,
3830,
284,
330,
8560,
1769,
5673,
1837,
2109,
1047,
80,
507,
11,
1848,
1669,
1532,
44,
1047,
80,
507,
8754,
340,
743,
1848,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestConnMuxClose(t *testing.T) {
c, err := redisx.DialTest()
if err != nil {
t.Fatalf("error connection to database, %v", err)
}
m := redisx.NewConnMux(c)
defer m.Close()
c1 := m.Get()
c2 := m.Get()
if err := c1.Send("ECHO", "hello"); err != nil {
t.Fatal(err)
}
if err := c1.Close(); err != nil {
t.Fatal(err)
}
if err := c2.Send("ECHO", "world"); err != nil {
t.Fatal(err)
}
if err := c2.Flush(); err != nil {
t.Fatal(err)
}
s, err := redis.String(c2.Receive())
if err != nil {
t.Fatal(err)
}
if s != "world" {
t.Fatalf("echo returned %q, want %q", s, "world")
}
c2.Close()
} | explode_data.jsonl/81834 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 297
} | [
2830,
3393,
9701,
44,
2200,
7925,
1155,
353,
8840,
836,
8,
341,
1444,
11,
1848,
1669,
20870,
87,
98462,
2271,
741,
743,
1848,
961,
2092,
341,
197,
3244,
30762,
445,
841,
3633,
311,
4625,
11,
1018,
85,
497,
1848,
340,
197,
532,
2109,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestMagneticFluxDensityFormat(t *testing.T) {
for _, test := range []struct {
value MagneticFluxDensity
format string
want string
}{
{1.23456789, "%v", "1.23456789 T"},
{1.23456789, "%.1v", "1 T"},
{1.23456789, "%20.1v", " 1 T"},
{1.23456789, "%20v", " 1.23456789 T"},
{1.23456789, "%1v", "1.23456789 T"},
{1.23456789, "%#v", "unit.MagneticFluxDensity(1.23456789)"},
{1.23456789, "%s", "%!s(unit.MagneticFluxDensity=1.23456789 T)"},
} {
got := fmt.Sprintf(test.format, test.value)
if got != test.want {
t.Errorf("Format %q %v: got: %q want: %q", test.format, float64(test.value), got, test.want)
}
}
} | explode_data.jsonl/29067 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 339
} | [
2830,
3393,
44,
38000,
3882,
2200,
66719,
4061,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
1273,
1669,
2088,
3056,
1235,
341,
197,
16309,
220,
62655,
3882,
2200,
66719,
198,
197,
59416,
914,
198,
197,
50780,
256,
914,
198,
197,
59403,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestJoin(t *testing.T) {
t.Parallel()
testJoin(t, "", "")
testJoin(t, "", "", "")
testJoin(t, ".", ".", ".")
testJoin(t, ".", "", ".", "")
testJoin(t, "foo/bar", "foo", "./bar")
testJoin(t, "foo", "foo", "./bar", "..")
testJoin(t, "/foo/bar", "/foo", "./bar")
testJoin(t, "/foo", "/foo", "./bar", "..")
testJoin(t, "bar", ".", "bar")
} | explode_data.jsonl/11901 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 165
} | [
2830,
3393,
12292,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
18185,
12292,
1155,
11,
7342,
14676,
18185,
12292,
1155,
11,
7342,
7342,
14676,
18185,
12292,
1155,
11,
68514,
68514,
5933,
1138,
18185,
12292,
1155,
11,
68514,
7342... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestUserEntityUpdateSUCCESS(t *testing.T) {
resp, _ := sendPut("http://localhost:8080/TESTING/YourAccount&q=Update", UserEntityUpdateSUCCESS, auth.Header.Get("Authorization"))
response := responseToString(resp)
compareResults(t, response, HyperText.CustomResponses["success-update"])
} | explode_data.jsonl/59342 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 94
} | [
2830,
3393,
1474,
3030,
4289,
39308,
1155,
353,
8840,
836,
8,
341,
34653,
11,
716,
1669,
3624,
19103,
445,
1254,
1110,
8301,
25,
23,
15,
23,
15,
14,
10033,
1718,
14,
7771,
7365,
62735,
28,
4289,
497,
2657,
3030,
4289,
39308,
11,
416... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGenerate(t *testing.T) {
expected := `apiVersion: v1
data:
domain: test-domain
namespace: test-namespace
orgID: test-orgID
projectID: test-projectID
region: test-region
zone: test-zone
kind: ConfigMap
metadata:
annotations:
config.kubernetes.io/local-config: "true"
name: gcloud-config.kpt.dev
`
exec.GetGcloudContextFn = func() (map[string]string, error) {
return map[string]string{
"domain": "test-domain",
"namespace": "test-namespace",
"orgID": "test-orgID",
"projectID": "test-projectID",
"region": "test-region",
"zone": "test-zone",
}, nil
}
gen := GcloudConfigGenerator{}
outputs, err := gen.Generate([]*yaml.RNode{})
if err != nil {
t.Fatalf("unexpected error %v", err)
}
if len(outputs) != 1 {
t.Fatalf("expect to generate 1 rnode, got %v", len(outputs))
}
a, _ := outputs[0].MarshalJSON()
actual, _ := yaml2.JSONToYAML(a)
if string(actual) != expected {
t.Fatalf("expect %v, got %v", expected, string(actual))
}
} | explode_data.jsonl/63540 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 421
} | [
2830,
3393,
31115,
1155,
353,
8840,
836,
8,
341,
42400,
1669,
1565,
2068,
5637,
25,
348,
16,
198,
691,
510,
220,
7947,
25,
1273,
72794,
198,
220,
4473,
25,
1273,
12,
2231,
198,
220,
1240,
915,
25,
1273,
71184,
915,
198,
220,
2390,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLoadCEKV(t *testing.T) {
certFile, err := os.Open("../test/always-encrypted_pub.pem")
if err != nil {
t.Fatal(err)
}
certBytes, err := ioutil.ReadAll(certFile)
if err != nil {
t.Fatal(err)
}
pemB, _ := pem.Decode(certBytes)
cert, err := x509.ParseCertificate(pemB.Bytes)
if err != nil {
t.Fatal(nil)
}
cekvFile, err := os.Open("../test/cekv.key")
if err != nil {
t.Fatal(err)
}
cekvBytes, err := ioutil.ReadAll(cekvFile)
cekv := LoadCEKV(cekvBytes)
assert.Equal(t, 1, cekv.Version)
assert.True(t, cekv.Verify(cert))
} | explode_data.jsonl/655 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 266
} | [
2830,
3393,
5879,
2104,
82707,
1155,
353,
8840,
836,
8,
341,
1444,
529,
1703,
11,
1848,
1669,
2643,
12953,
17409,
1944,
14,
32122,
12,
36444,
34014,
49373,
1138,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
630,
1444,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestNullIDResilience(t *testing.T) {
db := dbtesting.GetDB(t)
sr := New(store.New(db))
s, err := graphqlbackend.NewSchema(db, sr, nil, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}
ctx := backend.WithAuthzBypass(context.Background())
ids := []graphql.ID{
marshalBatchChangeID(0),
marshalChangesetID(0),
marshalBatchSpecRandID(""),
marshalChangesetSpecRandID(""),
marshalBatchChangesCredentialID(0),
}
for _, id := range ids {
var response struct{ Node struct{ ID string } }
query := fmt.Sprintf(`query { node(id: %q) { id } }`, id)
apitest.MustExec(ctx, t, s, nil, &response, query)
if have, want := response.Node.ID, ""; have != want {
t.Fatalf("node has wrong ID. have=%q, want=%q", have, want)
}
}
mutations := []string{
fmt.Sprintf(`mutation { closeBatchChange(batchChange: %q) { id } }`, marshalBatchChangeID(0)),
fmt.Sprintf(`mutation { deleteBatchChange(batchChange: %q) { alwaysNil } }`, marshalBatchChangeID(0)),
fmt.Sprintf(`mutation { syncChangeset(changeset: %q) { alwaysNil } }`, marshalChangesetID(0)),
fmt.Sprintf(`mutation { reenqueueChangeset(changeset: %q) { id } }`, marshalChangesetID(0)),
fmt.Sprintf(`mutation { applyBatchChange(batchSpec: %q) { id } }`, marshalBatchSpecRandID("")),
fmt.Sprintf(`mutation { createBatchChange(batchSpec: %q) { id } }`, marshalBatchSpecRandID("")),
fmt.Sprintf(`mutation { moveBatchChange(batchChange: %q, newName: "foobar") { id } }`, marshalBatchChangeID(0)),
fmt.Sprintf(`mutation { createBatchChangesCredential(externalServiceKind: GITHUB, externalServiceURL: "http://test", credential: "123123", user: %q) { id } }`, graphqlbackend.MarshalUserID(0)),
fmt.Sprintf(`mutation { deleteBatchChangesCredential(batchChangesCredential: %q) { alwaysNil } }`, marshalBatchChangesCredentialID(0)),
}
for _, m := range mutations {
var response struct{}
errs := apitest.Exec(ctx, t, s, nil, &response, m)
if len(errs) == 0 {
t.Fatalf("expected errors but none returned (mutation: %q)", m)
}
if have, want := errs[0].Error(), fmt.Sprintf("graphql: %s", ErrIDIsZero{}); have != want {
t.Fatalf("wrong errors. have=%s, want=%s (mutation: %q)", have, want, m)
}
}
} | explode_data.jsonl/13585 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 878
} | [
2830,
3393,
3280,
915,
1061,
321,
1835,
1155,
353,
8840,
836,
8,
341,
20939,
1669,
2927,
8840,
2234,
3506,
1155,
340,
1903,
81,
1669,
1532,
31200,
7121,
9791,
4390,
1903,
11,
1848,
1669,
48865,
20942,
7121,
8632,
9791,
11,
18962,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestFuturesBasisData(t *testing.T) {
t.Parallel()
_, err := b.GetFuturesBasisData(context.Background(), "BTCUSD", "CURRENT_QUARTER", "5m", 0, time.Time{}, time.Time{})
if err != nil {
t.Error(err)
}
_, err = b.GetFuturesBasisData(context.Background(), "BTCUSD", "CURRENT_QUARTER", "5m", 0, time.Unix(1577836800, 0), time.Unix(1580515200, 0))
if err != nil {
t.Error(err)
}
} | explode_data.jsonl/76617 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 171
} | [
2830,
3393,
37,
74606,
33,
10718,
1043,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
197,
6878,
1848,
1669,
293,
2234,
37,
74606,
33,
10718,
1043,
5378,
19047,
1507,
330,
59118,
26749,
497,
330,
44690,
16332,
2992,
640,
497,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestSendVerificationEmail(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
pass, resp := Client.SendVerificationEmail(th.BasicUser.Email)
CheckNoError(t, resp)
if !pass {
t.Fatal("should have passed")
}
_, resp = Client.SendVerificationEmail("")
CheckBadRequestStatus(t, resp)
// Even non-existent emails should return 200 OK
_, resp = Client.SendVerificationEmail(th.GenerateTestEmail())
CheckNoError(t, resp)
Client.Logout()
_, resp = Client.SendVerificationEmail(th.BasicUser.Email)
CheckNoError(t, resp)
} | explode_data.jsonl/21553 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 197
} | [
2830,
3393,
11505,
62339,
4781,
1155,
353,
8840,
836,
8,
341,
70479,
1669,
18626,
1005,
3803,
15944,
741,
16867,
270,
836,
682,
4454,
741,
71724,
1669,
270,
11716,
271,
41431,
11,
9039,
1669,
8423,
20176,
62339,
4781,
24365,
48868,
1474,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestSpanEventSlice(t *testing.T) {
es := NewSpanEventSlice()
assert.EqualValues(t, 0, es.Len())
es = newSpanEventSlice(&[]*otlptrace.Span_Event{})
assert.EqualValues(t, 0, es.Len())
es.EnsureCapacity(7)
emptyVal := newSpanEvent(&otlptrace.Span_Event{})
testVal := generateTestSpanEvent()
assert.EqualValues(t, 7, cap(*es.orig))
for i := 0; i < es.Len(); i++ {
el := es.AppendEmpty()
assert.EqualValues(t, emptyVal, el)
fillTestSpanEvent(el)
assert.EqualValues(t, testVal, el)
}
} | explode_data.jsonl/63278 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 218
} | [
2830,
3393,
12485,
1556,
33236,
1155,
353,
8840,
836,
8,
341,
78966,
1669,
1532,
12485,
1556,
33236,
741,
6948,
12808,
6227,
1155,
11,
220,
15,
11,
1531,
65819,
2398,
78966,
284,
501,
12485,
1556,
33236,
2099,
1294,
9,
354,
75,
3505,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestBlobClientWriteFile(t *testing.T) {
localNodeID := roachpb.NodeID(1)
remoteNodeID := roachpb.NodeID(2)
localExternalDir, remoteExternalDir, stopper, cleanUpFn := createTestResources(t)
defer cleanUpFn()
clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond)
rpcContext := rpc.NewInsecureTestingContext(clock, stopper)
rpcContext.TestingAllowNamedRPCToAnonymousServer = true
blobClientFactory := setUpService(t, rpcContext, localNodeID, remoteNodeID, localExternalDir, remoteExternalDir)
for _, tc := range []struct {
name string
nodeID roachpb.NodeID
filename string
fileContent string
destinationNodeDir string
}{
{
"write-remote-file",
remoteNodeID,
"test/remote.csv",
"remotefile",
remoteExternalDir,
},
{
"write-local-file",
localNodeID,
"test/local.csv",
"localfile",
localExternalDir,
},
} {
t.Run(tc.name, func(t *testing.T) {
ctx := context.TODO()
blobClient, err := blobClientFactory(ctx, tc.nodeID)
if err != nil {
t.Fatal(err)
}
byteContent := []byte(tc.fileContent)
err = blobClient.WriteFile(ctx, tc.filename, bytes.NewReader(byteContent))
if err != nil {
t.Fatal(err)
}
// Check that file is now in correct node
content, err := ioutil.ReadFile(filepath.Join(tc.destinationNodeDir, tc.filename))
if err != nil {
t.Fatal(err, "unable to read fetched file")
}
if !bytes.Equal(content, byteContent) {
t.Fatal(fmt.Sprintf(`fetched file content incorrect, expected %s, got %s`, tc.fileContent, content))
}
})
}
} | explode_data.jsonl/82491 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 672
} | [
2830,
3393,
37985,
2959,
7985,
1703,
1155,
353,
8840,
836,
8,
341,
8854,
1955,
915,
1669,
926,
610,
16650,
21714,
915,
7,
16,
340,
197,
18147,
1955,
915,
1669,
926,
610,
16650,
21714,
915,
7,
17,
340,
8854,
25913,
6184,
11,
8699,
25... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestUptimeMonitorMonitorToBaseMonitorMapper(t *testing.T) {
uptimeMonitorObject := UptimeMonitorMonitor{Name: "Test Monitor",
PK: 124,
MspAddress: "https://stakater.com",
MspInterval: 5,
CheckType: "HTTP"}
monitorObject := UptimeMonitorMonitorToBaseMonitorMapper(uptimeMonitorObject)
providerConfig, _ := monitorObject.Config.(*endpointmonitorv1alpha1.UptimeConfig)
if monitorObject.ID != strconv.Itoa(uptimeMonitorObject.PK) ||
monitorObject.Name != uptimeMonitorObject.Name ||
monitorObject.URL != uptimeMonitorObject.MspAddress ||
5 != providerConfig.Interval ||
"HTTP" != providerConfig.CheckType {
t.Error("Correct: \n",
uptimeMonitorObject.Name,
uptimeMonitorObject.PK,
uptimeMonitorObject.MspAddress,
uptimeMonitorObject.MspInterval,
uptimeMonitorObject.CheckType)
t.Error("Parsed: \n", monitorObject.Name,
monitorObject.ID,
monitorObject.URL,
providerConfig.Interval,
providerConfig.CheckType,
)
t.Error("Mapper did not map the values correctly")
}
} | explode_data.jsonl/37446 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 376
} | [
2830,
3393,
52,
28941,
30098,
30098,
1249,
3978,
30098,
10989,
1155,
353,
8840,
836,
8,
341,
197,
74659,
30098,
1190,
1669,
547,
28941,
30098,
30098,
63121,
25,
330,
2271,
23519,
756,
197,
10025,
42,
25,
688,
220,
16,
17,
19,
345,
197... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestBitwiseOrVsComprehension(t *testing.T) {
x := VarTerm("x")
y := VarTerm("y")
z := VarTerm("z")
a := VarTerm("a")
b := VarTerm("b")
tests := []struct {
note string
input string
exp *Term
}{
{
note: "array containing bitwise or",
input: "[x|y,z]",
exp: ArrayTerm(Or.Call(x, y), z),
},
{
note: "array containing bitwise or - last element",
input: "[z,x|y]",
exp: ArrayTerm(z, Or.Call(x, y)),
},
{
note: "array containing bitwise or - middle",
input: "[z,x|y,a]",
exp: ArrayTerm(z, Or.Call(x, y), a),
},
{
note: "array containing single bitwise or",
input: "[x|y,]",
exp: ArrayTerm(Or.Call(x, y)),
},
{
note: "set containing bitwise or",
input: "{x|y,z}",
exp: SetTerm(Or.Call(x, y), z),
},
{
note: "set containing bitwise or - last element",
input: "{z,x|y}",
exp: SetTerm(z, Or.Call(x, y)),
},
{
note: "set containing bitwise or - middle",
input: "{z,x|y,a}",
exp: SetTerm(z, Or.Call(x, y), a),
},
{
note: "set containing single bitwise or",
input: "{x|y,}",
exp: SetTerm(Or.Call(x, y)),
},
{
note: "object containing bitwise or",
input: "{x:y|z,a:b}",
exp: ObjectTerm([2]*Term{x, Or.Call(y, z)}, [2]*Term{a, b}),
},
{
note: "object containing single bitwise or",
input: "{x:y|z,}",
exp: ObjectTerm([2]*Term{x, Or.Call(y, z)}),
},
}
for _, tc := range tests {
t.Run(tc.note, func(t *testing.T) {
term, err := ParseTerm(tc.input)
if err != nil {
t.Fatal(err)
}
if !term.Equal(tc.exp) {
t.Fatalf("Expected %v but got %v", tc.exp, term)
}
})
}
} | explode_data.jsonl/50480 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 835
} | [
2830,
3393,
8344,
4482,
2195,
51737,
1092,
30782,
2645,
1155,
353,
8840,
836,
8,
1476,
10225,
1669,
8735,
17249,
445,
87,
1138,
14522,
1669,
8735,
17249,
445,
88,
1138,
20832,
1669,
8735,
17249,
445,
89,
1138,
11323,
1669,
8735,
17249,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func Test_connCheck(t *testing.T) {
// tests with real conns
ts := httptest.NewServer(nil)
defer ts.Close()
t.Run("good conn", func(t *testing.T) {
conn, err := net.DialTimeout(ts.Listener.Addr().Network(), ts.Listener.Addr().String(), time.Second)
if err != nil {
t.Fatalf(err.Error())
}
defer conn.Close()
if err = connCheck(conn); err != nil {
t.Fatalf(err.Error())
}
conn.Close()
if err = connCheck(conn); err == nil {
t.Fatalf("expect has error")
}
})
t.Run("bad conn 2", func(t *testing.T) {
conn, err := net.DialTimeout(ts.Listener.Addr().Network(), ts.Listener.Addr().String(), time.Second)
if err != nil {
t.Fatalf(err.Error())
}
defer conn.Close()
ts.Close()
if err = connCheck(conn); err == nil {
t.Fatalf("expect has err")
}
})
} | explode_data.jsonl/79286 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 335
} | [
2830,
3393,
17241,
3973,
1155,
353,
8840,
836,
8,
341,
197,
322,
7032,
448,
1931,
390,
4412,
198,
57441,
1669,
54320,
70334,
7121,
5475,
27907,
340,
16867,
10591,
10421,
2822,
3244,
16708,
445,
18536,
4534,
497,
2915,
1155,
353,
8840,
8... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func Test_RAM_ListRolesWithRPCrequest(t *testing.T) {
client, err := ram.NewClientWithAccessKey(os.Getenv("REGION_ID"), os.Getenv("ACCESS_KEY_ID"), os.Getenv("ACCESS_KEY_SECRET"))
assert.Nil(t, err)
request := ram.CreateListRolesRequest()
request.Scheme = "HTTPS"
response, err := client.ListRoles(request)
assert.Nil(t, err)
assert.True(t, response.IsSuccess())
assert.Equal(t, 36, len(response.RequestId))
} | explode_data.jsonl/56875 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 161
} | [
2830,
3393,
62393,
27104,
25116,
2354,
29528,
2035,
1155,
353,
8840,
836,
8,
341,
25291,
11,
1848,
1669,
17823,
7121,
2959,
2354,
6054,
1592,
9638,
64883,
445,
77431,
3450,
3975,
2643,
64883,
445,
55749,
6600,
3450,
3975,
2643,
64883,
445... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCircuitBreaker_isHalfOpen(t *testing.T) {
type fields struct {
status status
nextRetry int64
}
tests := []struct {
name string
fields fields
want bool
}{
{name: "closed", fields: fields{status: close, nextRetry: tsFuture}, want: false},
{name: "open", fields: fields{status: open, nextRetry: time.Now().Add(1 * time.Hour).UnixNano()}, want: false},
{name: "half open", fields: fields{status: open, nextRetry: time.Now().Add(-1 * time.Minute).UnixNano()}, want: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cb := &CircuitBreaker{
status: tt.fields.status,
nextRetry: tt.fields.nextRetry,
}
assert.Equal(t, tt.want, cb.isHalfOpen())
})
}
} | explode_data.jsonl/24251 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 304
} | [
2830,
3393,
34,
37268,
22524,
261,
6892,
42627,
5002,
1155,
353,
8840,
836,
8,
341,
13158,
5043,
2036,
341,
197,
23847,
262,
2639,
198,
197,
28144,
51560,
526,
21,
19,
198,
197,
532,
78216,
1669,
3056,
1235,
341,
197,
11609,
256,
914,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestConvertUnknownBody(t *testing.T) {
unknownType := map[string]int{"0": 0, "1": 1}
require.Equal(t, fmt.Sprintf("%v", unknownType), anyToBody(unknownType).StringVal())
} | explode_data.jsonl/61128 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 66
} | [
2830,
3393,
12012,
13790,
5444,
1155,
353,
8840,
836,
8,
341,
197,
16088,
929,
1669,
2415,
14032,
63025,
4913,
15,
788,
220,
15,
11,
330,
16,
788,
220,
16,
532,
17957,
12808,
1155,
11,
8879,
17305,
4430,
85,
497,
9788,
929,
701,
894... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestApplyTemplateSpecific(t *testing.T) {
o := graphite.Options{
Separator: "_",
Templates: []string{
"current.* measurement.measurement",
"current.*.* measurement.measurement.service",
},
}
p, err := graphite.NewParserWithOptions(o)
if err != nil {
t.Fatalf("unexpected error creating parser, got %v", err)
}
measurement, tags, _, _ := p.ApplyTemplate("current.users.facebook")
if measurement != "current_users" {
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
measurement, "current_users")
}
service, ok := tags["service"]
if !ok {
t.Error("Expected for template to apply a 'service' tag, but not found")
}
if service != "facebook" {
t.Errorf("Expected service='facebook' tag, got service='%s'", service)
}
} | explode_data.jsonl/32192 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 276
} | [
2830,
3393,
28497,
7275,
47514,
1155,
353,
8840,
836,
8,
341,
22229,
1669,
94173,
22179,
515,
197,
7568,
91640,
25,
9000,
756,
197,
10261,
76793,
25,
3056,
917,
515,
298,
197,
1,
3231,
4908,
18662,
17326,
24359,
756,
298,
197,
1,
3231... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestValidateHTTPFaultInjectionDelay(t *testing.T) {
testCases := []struct {
name string
in *networking.HTTPFaultInjection_Delay
valid bool
}{
{name: "nil", in: nil, valid: true},
{name: "valid fixed", in: &networking.HTTPFaultInjection_Delay{
Percent: 20,
HttpDelayType: &networking.HTTPFaultInjection_Delay_FixedDelay{
FixedDelay: &types.Duration{Seconds: 3},
},
}, valid: true},
{name: "valid default", in: &networking.HTTPFaultInjection_Delay{
HttpDelayType: &networking.HTTPFaultInjection_Delay_FixedDelay{
FixedDelay: &types.Duration{Seconds: 3},
},
}, valid: true},
{name: "invalid percent", in: &networking.HTTPFaultInjection_Delay{
Percent: 101,
HttpDelayType: &networking.HTTPFaultInjection_Delay_FixedDelay{
FixedDelay: &types.Duration{Seconds: 3},
},
}, valid: false},
{name: "invalid delay", in: &networking.HTTPFaultInjection_Delay{
Percent: 20,
HttpDelayType: &networking.HTTPFaultInjection_Delay_FixedDelay{
FixedDelay: &types.Duration{Seconds: 3, Nanos: 42},
},
}, valid: false},
{name: "valid fractional percentage", in: &networking.HTTPFaultInjection_Delay{
Percentage: &networking.Percent{
Value: 0.001,
},
HttpDelayType: &networking.HTTPFaultInjection_Delay_FixedDelay{
FixedDelay: &types.Duration{Seconds: 3},
},
}, valid: true},
{name: "invalid fractional percentage", in: &networking.HTTPFaultInjection_Delay{
Percentage: &networking.Percent{
Value: -10.0,
},
HttpDelayType: &networking.HTTPFaultInjection_Delay_FixedDelay{
FixedDelay: &types.Duration{Seconds: 3},
},
}, valid: false},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
if got := validateHTTPFaultInjectionDelay(tc.in); (got == nil) != tc.valid {
t.Errorf("got valid=%v, want valid=%v: %v",
got == nil, tc.valid, got)
}
})
}
} | explode_data.jsonl/56913 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 791
} | [
2830,
3393,
17926,
9230,
58780,
36653,
20039,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
11609,
220,
914,
198,
197,
17430,
262,
353,
17511,
287,
27358,
58780,
36653,
92726,
198,
197,
56322,
1807,
198,
197,
5... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestProto(t *testing.T) {
m := &commonv1pb.InvokeRequest{
Method: "invoketest",
ContentType: "application/json",
Data: &anypb.Any{Value: []byte("test")},
}
pb := internalv1pb.InternalInvokeRequest{
Ver: internalv1pb.APIVersion_V1,
Message: m,
}
ir, err := InternalInvokeRequest(&pb)
assert.NoError(t, err)
req2 := ir.Proto()
assert.Equal(t, "application/json", req2.GetMessage().ContentType)
assert.Equal(t, []byte("test"), req2.GetMessage().Data.Value)
} | explode_data.jsonl/46244 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 210
} | [
2830,
3393,
31549,
1155,
353,
8840,
836,
8,
341,
2109,
1669,
609,
5464,
85,
16,
16650,
32784,
1900,
515,
197,
84589,
25,
414,
330,
14057,
562,
57824,
756,
197,
197,
29504,
25,
330,
5132,
8931,
756,
197,
40927,
25,
286,
609,
276,
108... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSplitRegistryURL(t *testing.T) {
tt := []struct {
in string
outProtocol string
outRegistry string
outError error
}{
{in: "https://my.registry.com", outProtocol: "https", outRegistry: "my.registry.com", outError: nil},
{in: "https://", outProtocol: "", outRegistry: "", outError: fmt.Errorf("Failed to split registry url 'https://'")},
{in: "my.registry.com", outProtocol: "", outRegistry: "", outError: fmt.Errorf("Failed to split registry url 'my.registry.com'")},
{in: "", outProtocol: "", outRegistry: "", outError: fmt.Errorf("Failed to split registry url ''")},
{in: "https://https://my.registry.com", outProtocol: "", outRegistry: "", outError: fmt.Errorf("Failed to split registry url 'https://https://my.registry.com'")},
}
for _, test := range tt {
p, r, err := splitRegistryURL(test.in)
assert.Equal(t, test.outProtocol, p, "Protocol value unexpected")
assert.Equal(t, test.outRegistry, r, "Registry value unexpected")
assert.Equal(t, test.outError, err, "Error value not as expected")
}
} | explode_data.jsonl/37084 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 398
} | [
2830,
3393,
20193,
15603,
3144,
1155,
353,
8840,
836,
8,
341,
3244,
83,
1669,
3056,
1235,
341,
197,
17430,
688,
914,
198,
197,
13967,
20689,
914,
198,
197,
13967,
15603,
914,
198,
197,
13967,
1454,
262,
1465,
198,
197,
59403,
197,
197... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestLoadKeyPair(t *testing.T) {
t.Parallel()
// Load expected tls.Config.
expectedTLSConfig := getExpectedTLSConfig(t)
// Write key pair and CAs files from bytes.
path := t.TempDir() + "username"
certPath, keyPath, caPath := path+".crt", path+".key", path+".cas"
err := os.WriteFile(certPath, tlsCert, 0600)
require.NoError(t, err)
err = os.WriteFile(keyPath, keyPEM, 0600)
require.NoError(t, err)
err = os.WriteFile(caPath, tlsCACert, 0600)
require.NoError(t, err)
// Load key pair from disk.
creds := LoadKeyPair(certPath, keyPath, caPath)
// Build tls.Config and compare to expected tls.Config.
tlsConfig, err := creds.TLSConfig()
require.NoError(t, err)
requireEqualTLSConfig(t, expectedTLSConfig, tlsConfig)
// Load invalid keypairs.
invalidIdentityCreds := LoadKeyPair("invalid_path", "invalid_path", "invalid_path")
_, err = invalidIdentityCreds.TLSConfig()
require.Error(t, err)
} | explode_data.jsonl/55481 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 354
} | [
2830,
3393,
5879,
1592,
12443,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
197,
322,
8893,
3601,
55026,
10753,
624,
42400,
45439,
2648,
1669,
633,
18896,
45439,
2648,
1155,
692,
197,
322,
9645,
1376,
6716,
323,
356,
2121,
354... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBadAddresses(t *testing.T) {
events := new(testBadAddrServer)
err := Run(events, "tulip://howdy")
assert.Error(t, err)
err = Run(events, "howdy")
assert.Error(t, err)
err = Run(events, "tcp://")
assert.NoError(t, err)
} | explode_data.jsonl/52040 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 99
} | [
2830,
3393,
17082,
52290,
1155,
353,
8840,
836,
8,
341,
90873,
1669,
501,
8623,
17082,
13986,
5475,
340,
9859,
1669,
6452,
50496,
11,
330,
83,
360,
573,
1110,
5158,
10258,
1138,
6948,
6141,
1155,
11,
1848,
340,
9859,
284,
6452,
50496,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStringEscaping(t *testing.T) {
workflow, _ := fixture(t, "valid/escaping.workflow")
assert.Equal(t, `./x " y \ z`, workflow.Actions[0].Uses.String())
} | explode_data.jsonl/55661 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 64
} | [
2830,
3393,
703,
36121,
14216,
1155,
353,
8840,
836,
8,
341,
197,
56249,
11,
716,
1669,
12507,
1155,
11,
330,
1891,
14,
42480,
72774,
1138,
6948,
12808,
1155,
11,
1565,
1725,
87,
330,
379,
1124,
1147,
7808,
28288,
72044,
58,
15,
936,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestUpdateStatus(t *testing.T) {
validator := NewValidator(sdk.ValAddress(pk1.Address().Bytes()), pk1, Description{})
validator, _ = validator.AddTokensFromDel(sdk.NewInt(100))
require.Equal(t, sdk.Unbonded, validator.Status)
require.Equal(t, int64(100), validator.Tokens.Int64())
// Unbonded to Bonded
validator = validator.UpdateStatus(sdk.Bonded)
require.Equal(t, sdk.Bonded, validator.Status)
// Bonded to Unbonding
validator = validator.UpdateStatus(sdk.Unbonding)
require.Equal(t, sdk.Unbonding, validator.Status)
// Unbonding to Bonded
validator = validator.UpdateStatus(sdk.Bonded)
require.Equal(t, sdk.Bonded, validator.Status)
} | explode_data.jsonl/743 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 257
} | [
2830,
3393,
4289,
2522,
1155,
353,
8840,
836,
8,
341,
197,
16112,
1669,
1532,
14256,
1141,
7584,
77819,
4286,
39928,
16,
26979,
1005,
7078,
11858,
22458,
16,
11,
7662,
37790,
197,
16112,
11,
716,
284,
22935,
1904,
29300,
3830,
16532,
11... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAttach(t *testing.T) {
th.SetupHTTP()
defer th.TeardownHTTP()
MockAttachResponse(t)
options := &volumeactions.AttachOpts{
MountPoint: "/mnt",
Mode: "rw",
InstanceUUID: "50902f4f-a974-46a0-85e9-7efc5e22dfdd",
}
err := volumeactions.Attach(client.ServiceClient(), "cd281d77-8217-4830-be95-9528227c105c", options).ExtractErr()
th.AssertNoErr(t, err)
} | explode_data.jsonl/20624 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 175
} | [
2830,
3393,
30485,
1155,
353,
8840,
836,
8,
341,
70479,
39820,
9230,
741,
16867,
270,
94849,
37496,
9230,
2822,
9209,
1176,
30485,
2582,
1155,
692,
35500,
1669,
609,
25060,
4020,
88284,
43451,
515,
197,
9209,
629,
2609,
25,
256,
3521,
4... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestXML(t *testing.T) {
allXMLTimeCreated, _ := time.Parse(time.RFC3339Nano, "2016-01-28T20:33:27.990735300Z")
var tests = []struct {
xml string
event Event
}{
{
xml: allXML,
event: Event{
Provider: Provider{
Name: "Microsoft-Windows-WinRM",
GUID: "{a7975c8f-ac13-49f1-87da-5a984a4ab417}",
EventSourceName: "Service Control Manager",
},
EventIdentifier: EventIdentifier{ID: 91},
LevelRaw: 4,
TaskRaw: 9,
TimeCreated: TimeCreated{allXMLTimeCreated},
RecordID: 100,
Correlation: Correlation{"{A066CCF1-8AB3-459B-B62F-F79F957A5036}", "{85FC0930-9C49-42DA-804B-A7368104BD1B}"},
Execution: Execution{ProcessID: 920, ThreadID: 1152},
Channel: "Microsoft-Windows-WinRM/Operational",
Computer: "vagrant-2012-r2",
User: SID{Identifier: "S-1-5-21-3541430928-2051711210-1391384369-1001"},
EventData: EventData{
Pairs: []KeyValue{
{"param1", "winlogbeat"},
{"param2", "running"},
{"Binary", "770069006E006C006F00670062006500610074002F0034000000"},
},
},
UserData: UserData{
Name: xml.Name{
Local: "EventXML",
Space: "Event_NS",
},
Pairs: []KeyValue{
{"ServerName", `\\VAGRANT-2012-R2`},
{"UserName", "vagrant"},
},
},
Message: "Creating WSMan shell on server with ResourceUri: %1",
Level: "Information",
Task: "Request handling",
Opcode: "Info",
Keywords: []string{"Server"},
RenderErrorCode: 15005,
RenderErrorDataItemName: "shellId",
},
},
{
xml: `
<Event>
<UserData>
<Operation_ClientFailure xmlns='http://manifests.microsoft.com/win/2006/windows/WMI'>
<Id>{00000000-0000-0000-0000-000000000000}</Id>
</Operation_ClientFailure>
</UserData>
</Event>
`,
event: Event{
UserData: UserData{
Name: xml.Name{
Local: "Operation_ClientFailure",
Space: "http://manifests.microsoft.com/win/2006/windows/WMI",
},
Pairs: []KeyValue{
{"Id", "{00000000-0000-0000-0000-000000000000}"},
},
},
},
},
}
for _, test := range tests {
event, err := UnmarshalEventXML([]byte(test.xml))
if err != nil {
t.Error(err)
continue
}
assert.Equal(t, test.event, event)
if testing.Verbose() {
json, err := json.MarshalIndent(event, "", " ")
if err != nil {
t.Error(err)
}
fmt.Println(string(json))
}
}
} | explode_data.jsonl/34647 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1310
} | [
2830,
3393,
10609,
1155,
353,
8840,
836,
8,
341,
50960,
10609,
1462,
11694,
11,
716,
1669,
882,
8937,
9730,
2013,
6754,
18,
18,
18,
24,
83819,
11,
330,
17,
15,
16,
21,
12,
15,
16,
12,
17,
23,
51,
17,
15,
25,
18,
18,
25,
17,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestValidateUEIMSI(t *testing.T) {
err := validateUEIMSI("")
assert.Exactly(t, errors.New("Invalid Argument: IMSI must be between 5 and 15 digits long"), err)
err = validateUEIMSI("0123")
assert.Exactly(t, errors.New("Invalid Argument: IMSI must be between 5 and 15 digits long"), err)
err = validateUEIMSI("0123456789012345")
assert.Exactly(t, errors.New("Invalid Argument: IMSI must be between 5 and 15 digits long"), err)
err = validateUEIMSI("0ABCDEF")
assert.Exactly(t, errors.New("Invalid Argument: IMSI must only be digits"), err)
err = validateUEIMSI("ABCDEF0")
assert.Exactly(t, errors.New("Invalid Argument: IMSI must only be digits"), err)
err = validateUEIMSI("0123456789")
assert.NoError(t, err)
} | explode_data.jsonl/28136 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 246
} | [
2830,
3393,
17926,
2230,
1791,
13817,
1155,
353,
8840,
836,
8,
341,
9859,
1669,
9593,
2230,
1791,
13817,
31764,
6948,
5121,
32739,
1155,
11,
5975,
7121,
445,
7928,
13818,
25,
87277,
40,
1969,
387,
1948,
220,
20,
323,
220,
16,
20,
1850... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCmdInitToken(t *testing.T) {
if *kubeadmCmdSkip {
t.Log("kubeadm cmd tests being skipped")
t.Skip()
}
initTest := []struct {
name string
args string
expected bool
}{
/*
{
name: "invalid token size",
args: "--token=abcd:1234567890abcd",
expected: false,
},
{
name: "invalid token non-lowercase",
args: "--token=Abcdef:1234567890abcdef",
expected: false,
},
*/
{
name: "valid token is accepted",
args: "--token=abcdef.0123456789abcdef",
expected: true,
},
}
for _, rt := range initTest {
t.Run(rt.name, func(t *testing.T) {
_, _, err := runKubeadmInit(rt.args)
if (err == nil) != rt.expected {
t.Fatalf(dedent.Dedent(`
CmdInitToken test case %q failed with an error: %v
command 'kubeadm init %s'
expected: %t
err: %t
`),
rt.name,
err,
rt.args,
rt.expected,
(err == nil),
)
}
})
}
} | explode_data.jsonl/74 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 500
} | [
2830,
3393,
15613,
3803,
3323,
1155,
353,
8840,
836,
8,
341,
743,
353,
74,
392,
3149,
76,
15613,
35134,
341,
197,
3244,
5247,
445,
74,
392,
3149,
76,
5439,
7032,
1660,
35157,
1138,
197,
3244,
57776,
741,
197,
630,
28248,
2271,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestSearchMatrix(t *testing.T) {
Convey("test search", t, func() {
data := [][]int{
[]int{1, 3, 5, 7},
[]int{10, 11, 16, 20},
[]int{23, 30, 34, 50},
}
dut := searchMatrix(data, 3)
So(dut, ShouldEqual, true)
})
} | explode_data.jsonl/31863 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 117
} | [
2830,
3393,
5890,
6689,
1155,
353,
8840,
836,
8,
1476,
93070,
5617,
445,
1944,
2711,
497,
259,
11,
2915,
368,
341,
197,
8924,
1669,
52931,
396,
515,
298,
197,
1294,
396,
90,
16,
11,
220,
18,
11,
220,
20,
11,
220,
22,
1583,
298,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRequestOrdersSubResource(t *testing.T) {
r := (&Request{
baseURL: &url.URL{},
pathPrefix: "/test/",
}).Name("bar").Resource("baz").Namespace("foo").Suffix("test").SubResource("a", "b")
if s := r.URL().String(); s != "/test/namespaces/foo/baz/bar/a/b/test" {
t.Errorf("namespace should be in order in path: %s", s)
}
} | explode_data.jsonl/13253 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 140
} | [
2830,
3393,
1900,
24898,
3136,
4783,
1155,
353,
8840,
836,
8,
341,
7000,
1669,
15899,
1900,
515,
197,
24195,
3144,
25,
262,
609,
1085,
20893,
38837,
197,
26781,
14335,
25,
3521,
1944,
35075,
197,
16630,
675,
445,
2257,
1827,
4783,
445,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestAdminStrategy(t *testing.T) {
conf, reg := internal.NewFastRegistryWithMocks(t)
initViper()
_ = testhelpers.NewRecoveryUIFlowEchoServer(t, reg)
_ = testhelpers.NewSettingsUIFlowEchoServer(t, reg)
_ = testhelpers.NewLoginUIFlowEchoServer(t, reg)
_ = testhelpers.NewErrorTestServer(t, reg)
publicTS, adminTS := testhelpers.NewKratosServer(t, reg)
adminSDK := testhelpers.NewSDKClient(adminTS)
checkLink := func(t *testing.T, l *admin.CreateRecoveryLinkOK, isBefore time.Time) {
require.Contains(t, *l.Payload.RecoveryLink, publicTS.URL+link.RouteRecovery)
rl := urlx.ParseOrPanic(*l.Payload.RecoveryLink)
assert.NotEmpty(t, rl.Query().Get("token"))
assert.NotEmpty(t, rl.Query().Get("flow"))
require.True(t, time.Time(l.Payload.ExpiresAt).Before(isBefore))
}
t.Run("description=should not be able to recover an account that does not exist", func(t *testing.T) {
_, err := adminSDK.Admin.CreateRecoveryLink(admin.NewCreateRecoveryLinkParams().WithBody(
&models.CreateRecoveryLink{IdentityID: models.UUID(x.NewUUID().String())}))
require.IsType(t, err, new(admin.CreateRecoveryLinkNotFound), "%T", err)
})
t.Run("description=should not be able to recover an account that does not have a recovery email", func(t *testing.T) {
id := identity.Identity{Traits: identity.Traits(`{}`)}
require.NoError(t, reg.IdentityManager().Create(context.Background(),
&id, identity.ManagerAllowWriteProtectedTraits))
_, err := adminSDK.Admin.CreateRecoveryLink(admin.NewCreateRecoveryLinkParams().WithBody(
&models.CreateRecoveryLink{IdentityID: models.UUID(id.ID.String())}))
require.IsType(t, err, new(admin.CreateRecoveryLinkBadRequest), "%T", err)
})
t.Run("description=should create a valid recovery link and set the expiry time and not be able to recover the account", func(t *testing.T) {
id := identity.Identity{Traits: identity.Traits(`{"email":"recover.expired@ory.sh"}`)}
require.NoError(t, reg.IdentityManager().Create(context.Background(),
&id, identity.ManagerAllowWriteProtectedTraits))
rl, err := adminSDK.Admin.CreateRecoveryLink(admin.NewCreateRecoveryLinkParams().
WithBody(&models.CreateRecoveryLink{
IdentityID: models.UUID(id.ID.String()),
ExpiresIn: "100ms",
}))
require.NoError(t, err)
time.Sleep(time.Millisecond * 100)
checkLink(t, rl, time.Now().Add(conf.SelfServiceFlowRecoveryRequestLifespan()))
res, err := publicTS.Client().Get(*rl.Payload.RecoveryLink)
require.NoError(t, err)
require.Equal(t, http.StatusOK, res.StatusCode)
// We end up here because the link is expired.
assert.Contains(t, res.Request.URL.Path, "/recover")
})
t.Run("description=should create a valid recovery link and set the expiry time as well and recover the account", func(t *testing.T) {
id := identity.Identity{Traits: identity.Traits(`{"email":"recoverme@ory.sh"}`)}
require.NoError(t, reg.IdentityManager().Create(context.Background(),
&id, identity.ManagerAllowWriteProtectedTraits))
rl, err := adminSDK.Admin.CreateRecoveryLink(admin.NewCreateRecoveryLinkParams().
WithBody(&models.CreateRecoveryLink{IdentityID: models.UUID(id.ID.String())}))
require.NoError(t, err)
checkLink(t, rl, time.Now().Add(conf.SelfServiceFlowRecoveryRequestLifespan()+time.Second))
res, err := publicTS.Client().Get(*rl.Payload.RecoveryLink)
require.NoError(t, err)
assert.Contains(t, res.Request.URL.String(), conf.SelfServiceFlowSettingsUI().String())
assert.Equal(t, http.StatusOK, res.StatusCode)
testhelpers.LogJSON(t, rl.Payload)
sr, err := adminSDK.Public.GetSelfServiceSettingsFlow(
sdkp.NewGetSelfServiceSettingsFlowParams().
WithID(res.Request.URL.Query().Get("flow")), nil)
require.NoError(t, err, "%s", res.Request.URL.String())
require.Len(t, sr.Payload.Messages, 1)
assert.Equal(t, "You successfully recovered your account. Please change your password or set up an alternative login method (e.g. social sign in) within the next 60.00 minutes.", sr.Payload.Messages[0].Text)
})
} | explode_data.jsonl/32117 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1442
} | [
2830,
3393,
7210,
19816,
1155,
353,
8840,
836,
8,
341,
67850,
11,
1217,
1669,
5306,
7121,
32174,
15603,
2354,
72577,
1155,
340,
28248,
53,
12858,
2822,
197,
62,
284,
1273,
21723,
7121,
693,
7449,
2275,
18878,
74994,
5475,
1155,
11,
1217... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAccDataSourceGoogleTagsTagKey_default(t *testing.T) {
org := getTestOrgFromEnv(t)
parent := fmt.Sprintf("organizations/%s", org)
shortName := "tf-test-" + randString(t, 10)
vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccDataSourceGoogleTagsTagKeyConfig(parent, shortName),
Check: resource.ComposeTestCheckFunc(
testAccDataSourceGoogleTagsTagKeyCheck("data.google_tags_tag_key.my_tag_key", "google_tags_tag_key.foobar"),
),
},
},
})
} | explode_data.jsonl/82678 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 228
} | [
2830,
3393,
14603,
17173,
14444,
15930,
5668,
1592,
9993,
1155,
353,
8840,
836,
8,
341,
87625,
1669,
633,
2271,
42437,
3830,
14359,
1155,
692,
24804,
1669,
8879,
17305,
445,
69253,
12627,
82,
497,
1240,
340,
47981,
675,
1669,
330,
8935,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAntiSemiJoinConstFalse(t *testing.T) {
tests := []struct {
sql string
best string
joinType string
}{
{
sql: "select a from t t1 where not exists (select a from t t2 where t1.a = t2.a and t2.b = 1 and t2.b = 2)",
best: "Join{DataScan(t1)->DataScan(t2)}(test.t.a,test.t.a)->Projection",
joinType: "anti semi join",
},
}
s := createPlannerSuite()
ctx := context.Background()
for _, ca := range tests {
comment := fmt.Sprintf("for %s", ca.sql)
stmt, err := s.p.ParseOneStmt(ca.sql, "", "")
require.NoError(t, err, comment)
p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is)
require.NoError(t, err, comment)
p, err = logicalOptimize(context.TODO(), flagDecorrelate|flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan))
require.NoError(t, err, comment)
require.Equal(t, ca.best, ToString(p), comment)
join, _ := p.(LogicalPlan).Children()[0].(*LogicalJoin)
require.Equal(t, ca.joinType, join.JoinType.String(), comment)
}
} | explode_data.jsonl/50205 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 434
} | [
2830,
3393,
32649,
50,
21780,
12292,
19167,
4049,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
30633,
414,
914,
198,
197,
92410,
257,
914,
198,
197,
197,
5987,
929,
914,
198,
197,
59403,
197,
197,
515,
298,
30633,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFloat32_HasAll(t *testing.T) {
testcases := []struct {
name string
s Float32
input []float32
expect bool
}{
{
name: "test Float32 HasAll, set has all input elements",
s: map[float32]struct{}{1: {}, 1.5: {}, 1.2: {}},
input: []float32{1, 1.5},
expect: true,
},
{
name: "test Float32 HasAll, set does not have all input elements",
s: map[float32]struct{}{1: {}, 1.5: {}, 1.2: {}},
input: []float32{1.1, 1.3},
expect: false,
},
{
name: "test Float32 HasAll, set does not have all input elements, but exist elements in set",
s: map[float32]struct{}{1: {}, 1.5: {}, 1.2: {}},
input: []float32{1, 1.3},
expect: false,
},
{
name: "test Float32 HasAll, input empty",
s: map[float32]struct{}{1: {}, 1.5: {}, 1.2: {}},
input: []float32{},
expect: true,
},
}
for _, tc := range testcases {
t.Logf("running scenario: %s", tc.name)
actual := tc.s.HasAll(tc.input...)
if actual != tc.expect {
t.Errorf("expect return: %v, but got: %v", tc.expect, actual)
}
}
} | explode_data.jsonl/60108 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 519
} | [
2830,
3393,
5442,
18,
17,
2039,
300,
2403,
1155,
353,
8840,
836,
8,
341,
18185,
23910,
1669,
3056,
1235,
341,
197,
11609,
256,
914,
198,
197,
1903,
414,
13001,
18,
17,
198,
197,
22427,
220,
3056,
3649,
18,
17,
198,
197,
24952,
1807,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestRollBackRevokeCandidate(t *testing.T) {
desc := &contract.TxDesc{
Module: "tdpos",
Method: "nominate_candidate",
Args: map[string]interface{}{
"candidate": "f3prTg9itaZY6m48wXXikXdcxiByW7zgk",
},
}
strDesc, _ := json.Marshal(desc)
U, L, tdpos := commonWork(t)
txCons, block := makeTxWithDesc(strDesc, U, L, t)
tdpos.context = &contract.TxContext{}
tdpos.context.UtxoBatch = tdpos.utxoVM.NewBatch()
key := "D_revoke_18786b9f4898a3ef375049efe589c3adaa170339c057edab9bdd863860def5b2"
tdpos.context.UtxoBatch.Put([]byte(key), []byte(txCons.Txid))
tdpos.context.UtxoBatch.Write()
desc2 := &contract.TxDesc{
Module: "tdpos",
Method: "rollback_revoke_candidate",
Tx: txCons,
Args: map[string]interface{}{
"candidate": "f3prTg9itaZY6m48wXXikXdcxiByW7zgk",
"txid": fmt.Sprintf("%x", txCons.Txid),
},
}
rollBackRevokeCandErr := tdpos.rollbackRevokeCandidate(desc2, block)
if rollBackRevokeCandErr != nil {
t.Error("rollbackRevokeCandidate error ", rollBackRevokeCandErr.Error())
}
} | explode_data.jsonl/77203 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 473
} | [
2830,
3393,
32355,
3707,
693,
7621,
63901,
1155,
353,
8840,
836,
8,
341,
41653,
1669,
609,
20257,
81362,
11065,
515,
197,
197,
3332,
25,
330,
1296,
966,
756,
197,
84589,
25,
330,
16687,
3277,
62360,
756,
197,
197,
4117,
25,
2415,
1403... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestMultipleSubnets(t *testing.T) {
validConfigs := []struct {
conf string
subnets api.Subnets
}{
{
conf: `
# You can specify multiple subnets to be created in order to achieve H/A
vpcCIDR: 10.4.3.0/16
subnets:
- availabilityZone: ap-northeast-1a
instanceCIDR: 10.4.3.0/24
- availabilityZone: ap-northeast-1c
instanceCIDR: 10.4.4.0/24
`,
subnets: api.Subnets{
{
InstanceCIDR: "10.4.3.0/24",
AvailabilityZone: "ap-northeast-1a",
Name: "Subnet0",
},
{
InstanceCIDR: "10.4.4.0/24",
AvailabilityZone: "ap-northeast-1c",
Name: "Subnet1",
},
},
},
{
conf: `
# Given AZ/CIDR, missing subnets fall-back to the single subnet with the AZ/CIDR given.
vpcCIDR: 10.4.3.0/16
availabilityZone: ap-northeast-1a
instanceCIDR: 10.4.3.0/24
`,
subnets: api.Subnets{
{
AvailabilityZone: "ap-northeast-1a",
InstanceCIDR: "10.4.3.0/24",
Name: "Subnet0",
},
},
},
{
conf: `
# Given AZ/CIDR, empty subnets fall-back to the single subnet with the AZ/CIDR given.
vpcCIDR: 10.4.3.0/16
availabilityZone: ap-northeast-1a
instanceCIDR: 10.4.3.0/24
subnets: []
`,
subnets: api.Subnets{
{
AvailabilityZone: "ap-northeast-1a",
InstanceCIDR: "10.4.3.0/24",
Name: "Subnet0",
},
},
},
{
conf: `
# Given no AZ/CIDR, empty subnets fall-backs to the single subnet with the default az/cidr.
availabilityZone: "ap-northeast-1a"
subnets: []
`,
subnets: api.Subnets{
{
AvailabilityZone: "ap-northeast-1a",
InstanceCIDR: "10.0.0.0/24",
Name: "Subnet0",
},
},
},
{
conf: `
# Missing subnets field fall-backs to the single subnet with the default az/cidr.
availabilityZone: "ap-northeast-1a"
`,
subnets: api.Subnets{
{
AvailabilityZone: "ap-northeast-1a",
InstanceCIDR: "10.0.0.0/24",
Name: "Subnet0",
},
},
},
}
invalidConfigs := []string{
`
# You can't specify both the top-level availability zone and subnets
# (It doesn't make sense. Which configuration did you want, single or multi AZ one?)
availabilityZone: "ap-northeast-1a"
subnets:
- availabilityZone: "ap-northeast-1b"
instanceCIDR: "10.0.0.0/24"
`,
`
# You can't specify both the top-level instanceCIDR and subnets
# (It doesn't make sense. Which configuration did you want, single or multi AZ one?)
instanceCIDR: "10.0.0.0/24"
subnets:
- availabilityZone: "ap-northeast-1b"
instanceCIDR: "10.0.1.0/24"
`,
`
subnets:
# Missing AZ like this
# - availabilityZone: "ap-northeast-1a"
- instanceCIDR: 10.0.0.0/24
`,
`
subnets:
# Missing AZ like this
# - availabilityZone: "ap-northeast-1a"
- instanceCIDR: 10.0.0.0/24
`,
`
subnets:
# Overlapping subnets
- availabilityZone: "ap-northeast-1a"
instanceCIDR: 10.0.5.0/24
- availabilityZone: "ap-northeast-1b"
instanceCIDR: 10.0.5.0/24
`,
}
for _, conf := range validConfigs {
confBody := minimalConfigYaml + conf.conf
c, err := ClusterFromBytes([]byte(confBody))
if err != nil {
t.Errorf("failed to parse config %s: %v", confBody, err)
continue
}
if !reflect.DeepEqual(c.Subnets, conf.subnets) {
t.Errorf(
"parsed subnets %+v does not match expected subnets %+v in config: %s",
c.Subnets,
conf.subnets,
confBody,
)
}
}
for _, conf := range invalidConfigs {
confBody := minimalConfigYaml + conf
_, err := ClusterFromBytes([]byte(confBody))
if err == nil {
t.Errorf("expected error parsing invalid config:\n%s", confBody)
}
}
} | explode_data.jsonl/4369 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1693
} | [
2830,
3393,
32089,
3136,
52861,
1155,
353,
8840,
836,
8,
1476,
56322,
84905,
1669,
3056,
1235,
341,
197,
67850,
262,
914,
198,
197,
28624,
52861,
6330,
12391,
52861,
198,
197,
59403,
197,
197,
515,
298,
67850,
25,
22074,
2,
1446,
646,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestRepositoryLoadIndex(t *testing.T) {
repodir, cleanup := rtest.Env(t, repoFixture)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
rtest.OK(t, repo.LoadIndex(context.TODO()))
} | explode_data.jsonl/71941 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 80
} | [
2830,
3393,
4624,
5879,
1552,
1155,
353,
8840,
836,
8,
341,
73731,
347,
404,
11,
21290,
1669,
435,
1944,
81214,
1155,
11,
15867,
18930,
340,
16867,
21290,
2822,
17200,
5368,
1669,
12542,
8787,
5002,
7319,
1155,
11,
2064,
347,
404,
340,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestProcessNodesWithRetriesWithExponentialBackoff(t *testing.T) {
require := require.New(t)
cancel, controller := newController()
defer cancel()
require.NotNil(controller)
wf := unmarshalWF(helloWorldWf)
require.NotNil(wf)
woc := newWorkflowOperationCtx(wf, controller)
require.NotNil(woc)
// Verify that there are no nodes in the wf status.
require.Zero(len(woc.wf.Status.Nodes))
// Add the parent node for retries.
nodeName := "test-node"
nodeID := woc.wf.NodeID(nodeName)
node := woc.initializeNode(nodeName, wfv1.NodeTypeRetry, "", &wfv1.WorkflowStep{}, "", wfv1.NodeRunning)
retries := wfv1.RetryStrategy{}
retries.Limit = intstrutil.ParsePtr("2")
retries.RetryPolicy = wfv1.RetryPolicyAlways
retries.Backoff = &wfv1.Backoff{
Duration: "5m",
Factor: intstrutil.ParsePtr("2"),
}
woc.wf.Status.Nodes[nodeID] = *node
require.Equal(wfv1.NodeRunning, node.Phase)
// Ensure there are no child nodes yet.
lastChild := getChildNodeIndex(node, woc.wf.Status.Nodes, -1)
require.Nil(lastChild)
woc.initializeNode("child-node-1", wfv1.NodeTypePod, "", &wfv1.WorkflowStep{}, "", wfv1.NodeFailed)
woc.addChildNode(nodeName, "child-node-1")
n := woc.wf.GetNodeByName(nodeName)
// Last child has failed. processNodesWithRetries() should return false due to the default backoff.
var err error
n, _, err = woc.processNodeRetries(n, retries, &executeTemplateOpts{})
require.NoError(err)
require.Equal(wfv1.NodeRunning, n.Phase)
// First backoff should be between 295 and 300 seconds.
backoff, err := parseRetryMessage(n.Message)
require.NoError(err)
require.LessOrEqual(backoff, 300)
require.Less(295, backoff)
woc.initializeNode("child-node-2", wfv1.NodeTypePod, "", &wfv1.WorkflowStep{}, "", wfv1.NodeError)
woc.addChildNode(nodeName, "child-node-2")
n = woc.wf.GetNodeByName(nodeName)
n, _, err = woc.processNodeRetries(n, retries, &executeTemplateOpts{})
require.NoError(err)
require.Equal(wfv1.NodeRunning, n.Phase)
// Second backoff should be between 595 and 600 seconds.
backoff, err = parseRetryMessage(n.Message)
require.NoError(err)
require.LessOrEqual(backoff, 600)
require.Less(595, backoff)
// Mark lastChild as successful.
lastChild = getChildNodeIndex(n, woc.wf.Status.Nodes, -1)
require.NotNil(lastChild)
woc.markNodePhase(lastChild.Name, wfv1.NodeSucceeded)
n, _, err = woc.processNodeRetries(n, retries, &executeTemplateOpts{})
require.NoError(err)
// The parent node also gets marked as Succeeded.
require.Equal(wfv1.NodeSucceeded, n.Phase)
} | explode_data.jsonl/70954 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 998
} | [
2830,
3393,
7423,
12288,
2354,
12020,
4019,
2354,
840,
59825,
3707,
1847,
1155,
353,
8840,
836,
8,
341,
17957,
1669,
1373,
7121,
1155,
692,
84441,
11,
6461,
1669,
501,
2051,
741,
16867,
9121,
741,
17957,
93882,
40845,
340,
6692,
69,
166... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestFixedAddressUpdateParams_WithContext(t *testing.T) {
p := NewFixedAddressUpdateParams()
p = p.WithContext(context.TODO())
require.NotNil(t, p.Context)
assert.Equal(t, context.TODO(), p.Context)
} | explode_data.jsonl/14334 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 79
} | [
2830,
3393,
13520,
4286,
4289,
4870,
62,
91101,
1155,
353,
8840,
836,
8,
341,
3223,
1669,
1532,
13520,
4286,
4289,
4870,
741,
3223,
284,
281,
26124,
1972,
5378,
90988,
2398,
17957,
93882,
1155,
11,
281,
9328,
340,
6948,
12808,
1155,
11,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestSyslogOutput(t *testing.T) {
journalctl, err := exec.LookPath("journalctl")
if err != nil {
t.Skip("unable to locate journalctl -- not running this test")
}
cmd := exec.Command(journalctl, "--system")
if err := cmd.Run(); err != nil {
t.Skip("current user does not have permissions to view system log")
}
rand.Seed(time.Now().UnixNano())
runes := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
randString := func(base string, length int) string {
rs := make([]rune, length)
for i := range rs {
rs[i] = runes[rand.Intn(len(runes))]
}
return base + string(rs)
}
debugStr := randString("DEBUG", 8)
infoStr := randString("INFO", 8)
errorStr := randString("ERROR", 8)
logger := logging.NewStdoutLogger("testPrefix").
WithSyslogOutput().
WithLogLevel(logging.LogLevelDebug)
tests := map[string]struct {
prio syslog.Priority
fn func(string)
fnInput string
fmtFn func(string, ...interface{})
fmtFnFmt string
fmtFnArgs []interface{}
expected *regexp.Regexp
}{
"Debug": {prio: syslog.LOG_DEBUG, fn: logger.Debug, fnInput: debugStr,
expected: regexp.MustCompile(fmt.Sprintf(`\n[A-Za-z]{3} \d{2} \d{2}:\d{2}:\d{2} [\w\.\-]+ [^:]+: \d{2}:\d{2}:\d{2}\.\d{6} [^:]+:\d+: %s\n`, debugStr))},
"Debugf": {prio: syslog.LOG_DEBUG, fmtFn: logger.Debugf, fmtFnFmt: fmt.Sprintf("%s: %%d", debugStr), fmtFnArgs: []interface{}{42},
expected: regexp.MustCompile(fmt.Sprintf(`\n[A-Za-z]{3} \d{2} \d{2}:\d{2}:\d{2} [\w\.\-]+ [^:]+: \d{2}:\d{2}:\d{2}\.\d{6} [^:]+:\d+: %s: 42\n`, debugStr))},
"Info": {prio: syslog.LOG_INFO, fn: logger.Info, fnInput: infoStr,
expected: regexp.MustCompile(fmt.Sprintf(`\n[A-Za-z]{3} \d{2} \d{2}:\d{2}:\d{2} [\w\.\-]+ [^:]+: %s\n`, infoStr))},
"Infof": {prio: syslog.LOG_INFO, fmtFn: logger.Infof, fmtFnFmt: fmt.Sprintf("%s: %%d", infoStr), fmtFnArgs: []interface{}{42},
expected: regexp.MustCompile(fmt.Sprintf(`\n[A-Za-z]{3} \d{2} \d{2}:\d{2}:\d{2} [\w\.\-]+ [^:]+: %s: 42\n`, infoStr))},
"Error": {prio: syslog.LOG_ERR, fn: logger.Error, fnInput: errorStr,
expected: regexp.MustCompile(fmt.Sprintf(`\n[A-Za-z]{3} \d{2} \d{2}:\d{2}:\d{2} [\w\.\-]+ [^:]+: %s\n`, errorStr))},
"Errorf": {prio: syslog.LOG_ERR, fmtFn: logger.Errorf, fmtFnFmt: fmt.Sprintf("%s: %%d", errorStr), fmtFnArgs: []interface{}{42},
expected: regexp.MustCompile(fmt.Sprintf(`\n[A-Za-z]{3} \d{2} \d{2}:\d{2}:\d{2} [\w\.\-]+ [^:]+: %s: 42\n`, errorStr))},
}
jrnlOut := func(t *testing.T, prio int) string {
time.Sleep(10 * time.Millisecond) // Give it time to settle
cmd := exec.Command(journalctl,
fmt.Sprintf("_PID=%d", os.Getpid()),
fmt.Sprintf("PRIORITY=%d", prio),
)
out, err := cmd.Output()
if err != nil {
var stderr string
if ee, ok := err.(*exec.ExitError); ok {
stderr = string(ee.Stderr)
}
t.Fatalf("error: %s\nSTDOUT: %s\nSTDERR: %s", err, out, stderr)
}
return string(out)
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
switch {
case tc.fn != nil:
tc.fn(tc.fnInput)
case tc.fmtFn != nil:
tc.fmtFn(tc.fmtFnFmt, tc.fmtFnArgs...)
default:
t.Fatal("no test function defined")
}
got := jrnlOut(t, int(tc.prio))
if !tc.expected.MatchString(got) {
t.Fatalf("expected %q to match %s", got, tc.expected)
}
})
}
} | explode_data.jsonl/53620 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1609
} | [
2830,
3393,
32792,
839,
5097,
1155,
353,
8840,
836,
8,
341,
12428,
4931,
12373,
11,
1848,
1669,
3883,
36851,
1820,
445,
42518,
12373,
1138,
743,
1848,
961,
2092,
341,
197,
3244,
57776,
445,
45928,
311,
24523,
8322,
12373,
1177,
537,
430... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestWriteAttributeValueJSON(t *testing.T) {
buf := &bytes.Buffer{}
w := jsonFieldsWriter{buf: buf}
buf.WriteByte('{')
writeAttributeValueJSON(&w, "a", `escape\me!`)
writeAttributeValueJSON(&w, "a", true)
writeAttributeValueJSON(&w, "a", false)
writeAttributeValueJSON(&w, "a", uint8(1))
writeAttributeValueJSON(&w, "a", uint16(2))
writeAttributeValueJSON(&w, "a", uint32(3))
writeAttributeValueJSON(&w, "a", uint64(4))
writeAttributeValueJSON(&w, "a", uint(5))
writeAttributeValueJSON(&w, "a", uintptr(6))
writeAttributeValueJSON(&w, "a", int8(-1))
writeAttributeValueJSON(&w, "a", int16(-2))
writeAttributeValueJSON(&w, "a", int32(-3))
writeAttributeValueJSON(&w, "a", int64(-4))
writeAttributeValueJSON(&w, "a", int(-5))
writeAttributeValueJSON(&w, "a", float32(1.5))
writeAttributeValueJSON(&w, "a", float64(4.56))
buf.WriteByte('}')
expect := CompactJSONString(`{
"a":"escape\\me!",
"a":true,
"a":false,
"a":1,
"a":2,
"a":3,
"a":4,
"a":5,
"a":6,
"a":-1,
"a":-2,
"a":-3,
"a":-4,
"a":-5,
"a":1.5,
"a":4.56
}`)
js := buf.String()
if js != expect {
t.Error(js, expect)
}
} | explode_data.jsonl/20595 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 506
} | [
2830,
3393,
7985,
78554,
5370,
1155,
353,
8840,
836,
8,
341,
26398,
1669,
609,
9651,
22622,
16094,
6692,
1669,
2951,
8941,
6492,
90,
5909,
25,
6607,
630,
26398,
98026,
33440,
1305,
24945,
78554,
5370,
2099,
86,
11,
330,
64,
497,
1565,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestSpanProcessor_NilEmptyData(t *testing.T) {
type nilEmptyTestCase struct {
name string
input pdata.Traces
output pdata.Traces
}
// TODO: Add test for "nil" Span. This needs support from data slices to allow to construct that.
testCases := []nilEmptyTestCase{
{
name: "empty",
input: pdata.NewTraces(),
output: pdata.NewTraces(),
},
{
name: "one-empty-resource-spans",
input: testdata.GenerateTracesOneEmptyResourceSpans(),
output: testdata.GenerateTracesOneEmptyResourceSpans(),
},
{
name: "no-libraries",
input: testdata.GenerateTracesNoLibraries(),
output: testdata.GenerateTracesNoLibraries(),
},
{
name: "one-empty-instrumentation-library",
input: testdata.GenerateTracesOneEmptyInstrumentationLibrary(),
output: testdata.GenerateTracesOneEmptyInstrumentationLibrary(),
},
}
factory := NewFactory()
cfg := factory.CreateDefaultConfig()
oCfg := cfg.(*Config)
oCfg.Include = &filterconfig.MatchProperties{
Config: *createMatchConfig(filterset.Strict),
Services: []string{"service"},
}
oCfg.Rename.FromAttributes = []string{"key"}
tp, err := factory.CreateTracesProcessor(context.Background(), componenttest.NewNopProcessorCreateSettings(), oCfg, consumertest.NewNop())
require.Nil(t, err)
require.NotNil(t, tp)
for i := range testCases {
tt := testCases[i]
t.Run(tt.name, func(t *testing.T) {
assert.NoError(t, tp.ConsumeTraces(context.Background(), tt.input))
assert.EqualValues(t, tt.output, tt.input)
})
}
} | explode_data.jsonl/51036 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 595
} | [
2830,
3393,
12485,
22946,
1604,
321,
3522,
1043,
1155,
353,
8840,
836,
8,
341,
13158,
2092,
3522,
16458,
2036,
341,
197,
11609,
256,
914,
198,
197,
22427,
220,
70311,
8240,
2434,
198,
197,
21170,
70311,
8240,
2434,
198,
197,
532,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAlertmanagerStoreAPI(t *testing.T) {
tests := map[string]struct {
legacyAlertStore bool
}{
"legacy alertstore": {legacyAlertStore: true},
"bucket alertstore": {legacyAlertStore: false},
}
for testName, testCfg := range tests {
t.Run(testName, func(t *testing.T) {
s, err := e2e.NewScenario(networkName)
require.NoError(t, err)
defer s.Close()
flags := mergeFlags(AlertmanagerFlags(), AlertmanagerS3Flags(testCfg.legacyAlertStore))
minio := e2edb.NewMinio(9000, alertsBucketName)
require.NoError(t, s.StartAndWaitReady(minio))
am := e2ecortex.NewAlertmanager(
"alertmanager",
flags,
"",
)
require.NoError(t, s.StartAndWaitReady(am))
require.NoError(t, am.WaitSumMetrics(e2e.Equals(1), "alertmanager_cluster_members"))
c, err := e2ecortex.NewClient("", "", am.HTTPEndpoint(), "", "user-1")
require.NoError(t, err)
_, err = c.GetAlertmanagerConfig(context.Background())
require.Error(t, err)
require.EqualError(t, err, e2ecortex.ErrNotFound.Error())
err = c.SetAlertmanagerConfig(context.Background(), cortexAlertmanagerUserConfigYaml, map[string]string{})
require.NoError(t, err)
require.NoError(t, am.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_alertmanager_config_last_reload_successful"},
e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "user", "user-1")),
e2e.WaitMissingMetrics))
require.NoError(t, am.WaitSumMetricsWithOptions(e2e.Greater(0), []string{"cortex_alertmanager_config_last_reload_successful_seconds"},
e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "user", "user-1")),
e2e.WaitMissingMetrics))
cfg, err := c.GetAlertmanagerConfig(context.Background())
require.NoError(t, err)
// Ensure the returned status config matches the loaded config
require.NotNil(t, cfg)
require.Equal(t, "example_receiver", cfg.Route.Receiver)
require.Len(t, cfg.Route.GroupByStr, 1)
require.Equal(t, "example_groupby", cfg.Route.GroupByStr[0])
require.Len(t, cfg.Receivers, 1)
require.Equal(t, "example_receiver", cfg.Receivers[0].Name)
err = c.SendAlertToAlermanager(context.Background(), &model.Alert{Labels: model.LabelSet{"foo": "bar"}})
require.NoError(t, err)
require.NoError(t, am.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_alertmanager_alerts_received_total"},
e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "user", "user-1")),
e2e.WaitMissingMetrics))
err = c.DeleteAlertmanagerConfig(context.Background())
require.NoError(t, err)
// The deleted config is applied asynchronously, so we should wait until the metric
// disappear for the specific user.
require.NoError(t, am.WaitRemovedMetric("cortex_alertmanager_config_last_reload_successful", e2e.WithLabelMatchers(
labels.MustNewMatcher(labels.MatchEqual, "user", "user-1"))))
require.NoError(t, am.WaitRemovedMetric("cortex_alertmanager_config_last_reload_successful_seconds", e2e.WithLabelMatchers(
labels.MustNewMatcher(labels.MatchEqual, "user", "user-1"))))
cfg, err = c.GetAlertmanagerConfig(context.Background())
require.Error(t, err)
require.Nil(t, cfg)
require.EqualError(t, err, "not found")
})
}
} | explode_data.jsonl/60598 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1274
} | [
2830,
3393,
9676,
13297,
6093,
7082,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
2415,
14032,
60,
1235,
341,
197,
197,
39884,
9676,
6093,
1807,
198,
197,
59403,
197,
197,
1,
39884,
5115,
4314,
788,
314,
39884,
9676,
6093,
25,
830,
1583... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_multiSigKey_Properties(t *testing.T) {
tmpKey1 := secp256k1.GenPrivKeyFromSecret([]byte("mySecret"))
pk := multisig.NewLegacyAminoPubKey(
1,
[]cryptotypes.PubKey{tmpKey1.PubKey()},
)
tmp, err := keyring.NewMultiInfo("myMultisig", pk)
require.NoError(t, err)
require.Equal(t, "myMultisig", tmp.GetName())
require.Equal(t, keyring.TypeMulti, tmp.GetType())
require.Equal(t, "D3923267FA8A3DD367BB768FA8BDC8FF7F89DA3F", tmp.GetPubKey().Address().String())
require.Equal(t, "cosmos16wfryel63g7axeamw68630wglalcnk3l0zuadc", sdk.MustBech32ifyAddressBytes("cosmos", tmp.GetAddress()))
} | explode_data.jsonl/13878 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 262
} | [
2830,
3393,
25133,
47246,
1592,
1088,
9249,
1155,
353,
8840,
836,
8,
341,
20082,
1592,
16,
1669,
511,
4672,
17,
20,
21,
74,
16,
65384,
32124,
1592,
3830,
19773,
10556,
3782,
445,
2408,
19773,
5455,
3223,
74,
1669,
74737,
343,
7121,
77... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStruct_Map(t *testing.T) {
one := newPerson()
two := newPerson()
three := newPerson()
testItems := map[string]*Person{"one": &one, "two": &two, "three": &three}
// Pass by value - not ok
err := Scrub(testItems, []string{})
assert.NoError(t, err)
} | explode_data.jsonl/9425 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 99
} | [
2830,
3393,
9422,
56992,
1155,
353,
8840,
836,
8,
1476,
197,
603,
1669,
501,
10680,
741,
3244,
1126,
1669,
501,
10680,
741,
197,
27856,
1669,
501,
10680,
2822,
18185,
4353,
1669,
2415,
14032,
8465,
10680,
4913,
603,
788,
609,
603,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestJoinsWithSelect(t *testing.T) {
type result struct {
Name string
Email string
}
user := User{
Name: "joins_with_select",
Emails: []Email{{Email: "join1@example.com"}, {Email: "join2@example.com"}},
}
DB.Save(&user)
var results []result
DB.Table("users").Select("name, emails.email").Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins_with_select").Scan(&results)
sort.Slice(results, func(i, j int) bool {
return strings.Compare(results[i].Email, results[j].Email) < 0
})
if len(results) != 2 || results[0].Email != "join1@example.com" || results[1].Email != "join2@example.com" {
t.Errorf("Should find all two emails with Join select")
}
} | explode_data.jsonl/28054 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 259
} | [
2830,
3393,
22493,
1330,
2354,
3379,
1155,
353,
8840,
836,
8,
341,
13158,
1102,
2036,
341,
197,
21297,
220,
914,
198,
197,
197,
4781,
914,
198,
197,
630,
19060,
1669,
2657,
515,
197,
21297,
25,
256,
330,
7305,
1330,
6615,
13051,
756,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestListFileSharesWithFilter(t *testing.T) {
type test struct {
input []*model.FileShareSpec
param map[string][]string
expected []*model.FileShareSpec
}
tests := []test{
// select by storage type
{
input: []*model.FileShareSpec{
&SampleFileShares[0],
&SampleFileShares[1],
},
param: map[string][]string{
"poolId": {"a5965ebe-dg2c-434t-b28e-f373746a71ca"},
},
expected: []*model.FileShareSpec{
&SampleFileShares[0],
},
},
// sort by name asc
{
input: []*model.FileShareSpec{
&SampleFileShares[0],
&SampleFileShares[1],
},
param: map[string][]string{
"sortKey": {"name"},
"sortDir": {"asc"},
},
expected: []*model.FileShareSpec{
&SampleFileShares[0],
&SampleFileShares[1],
},
},
// sort by name desc
{
input: []*model.FileShareSpec{
&SampleFileShares[0],
&SampleFileShares[1],
},
param: map[string][]string{
"sortKey": {"name"},
"sortDir": {"desc"},
},
expected: []*model.FileShareSpec{
&SampleFileShares[1],
&SampleFileShares[0],
},
},
// limit is 1
{
input: []*model.FileShareSpec{
&SampleFileShares[0],
&SampleFileShares[1]},
param: map[string][]string{
"limit": {"1"},
"offset": {"1"},
},
expected: []*model.FileShareSpec{
&SampleFileShares[1],
},
},
}
for _, testcase := range tests {
ret := fc.FilterAndSort(testcase.input, testcase.param, sortableKeysMap[typeFileShares])
var res = []*model.FileShareSpec{}
for _, data := range ret.([]interface{}) {
res = append(res, data.(*model.FileShareSpec))
}
if !reflect.DeepEqual(res, testcase.expected) {
var expected []model.FileShareSpec
for _, value := range testcase.expected {
expected = append(expected, *value)
}
var got []model.FileShareSpec
for _, value := range res {
got = append(got, *value)
}
t.Errorf("Expected %+v\n", expected)
t.Errorf("Got %+v\n", got)
}
}
} | explode_data.jsonl/50713 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 880
} | [
2830,
3393,
852,
1703,
73015,
2354,
5632,
1155,
353,
8840,
836,
8,
341,
13158,
1273,
2036,
341,
197,
22427,
262,
29838,
2528,
8576,
12115,
8327,
198,
197,
36037,
262,
2415,
14032,
45725,
917,
198,
197,
42400,
29838,
2528,
8576,
12115,
8... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestExecute(t *testing.T) {
gaia.Cfg = &gaia.Config{}
gaia.Cfg.Logger = hclog.New(&hclog.LoggerOptions{
Level: hclog.Trace,
Output: hclog.DefaultOutput,
Name: "Gaia",
})
p := &Plugin{pluginConn: new(fakePluginGRPC)}
buf := new(bytes.Buffer)
p.writer = bufio.NewWriter(buf)
err := p.Execute(&gaia.Job{})
if err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/25827 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 169
} | [
2830,
3393,
17174,
1155,
353,
8840,
836,
8,
341,
3174,
64,
685,
727,
4817,
284,
609,
6743,
685,
10753,
16094,
3174,
64,
685,
727,
4817,
12750,
284,
50394,
839,
7121,
2099,
38052,
839,
12750,
3798,
515,
197,
197,
4449,
25,
220,
50394,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestVolume(t *testing.T) {
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
exposedVolume := "/foo"
if err := volume(b, []string{exposedVolume}, nil, ""); err != nil {
t.Fatalf("Error should be empty, got: %s", err.Error())
}
if b.runConfig.Volumes == nil {
t.Fatalf("Volumes should be set")
}
if len(b.runConfig.Volumes) != 1 {
t.Fatalf("Volumes should contain only 1 element. Got %s", b.runConfig.Volumes)
}
if _, ok := b.runConfig.Volumes[exposedVolume]; !ok {
t.Fatalf("Volume %s should be present. Got %s", exposedVolume, b.runConfig.Volumes)
}
} | explode_data.jsonl/28286 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 232
} | [
2830,
3393,
18902,
1155,
353,
8840,
836,
8,
341,
2233,
1669,
609,
3297,
90,
11161,
25,
609,
33,
9195,
22655,
1598,
2648,
25,
609,
3586,
10753,
22655,
11156,
33441,
25,
830,
630,
8122,
3865,
18902,
1669,
3521,
7975,
1837,
743,
1848,
16... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestShippingService(t *testing.T) {
// Connecting
//url := "127.0.0.1:50051"
url := "shippingservice:50051"
conn := connect(t, url)
defer conn.Close()
// Creating context
ctx, cn := context.WithTimeout(context.Background(), timeout)
defer cn()
// Creating the stub
client := hipstershop.NewShippingServiceClient(conn)
// GetQuote
t.Run("GetQuote", func(t *testing.T) {
quoteRes, err := client.GetQuote(ctx, &hipstershop.GetQuoteRequest{
Address: &hipstershop.Address{
ZipCode: 94203,
},
Items: []*hipstershop.CartItem{
{
ProductId: "OLJCESPC7Z",
Quantity: 1,
},
{
ProductId: "1YMWWN1N4O",
Quantity: 2,
},
},
})
if err != nil {
t.Fatalf("error getting quote - %s", err)
}
t.Logf("%v", quoteRes.CostUsd)
})
// ShipOrder
t.Run("ShipOrder", func(t *testing.T) {
shipRes, err := client.ShipOrder(ctx, &hipstershop.ShipOrderRequest{
Address: &hipstershop.Address{
ZipCode: 94203,
},
Items: []*hipstershop.CartItem{
{
ProductId: "OLJCESPC7Z",
Quantity: 1,
},
{
ProductId: "1YMWWN1N4O",
Quantity: 2,
},
}},
)
if err != nil {
t.Fatalf("error shipping order - %s", err)
}
t.Logf("Tracking ID: %s", shipRes.TrackingId)
})
} | explode_data.jsonl/43999 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 590
} | [
2830,
3393,
43356,
1860,
1155,
353,
8840,
836,
8,
341,
197,
322,
83938,
198,
197,
322,
1085,
1669,
330,
16,
17,
22,
13,
15,
13,
15,
13,
16,
25,
20,
15,
15,
20,
16,
698,
19320,
1669,
330,
927,
2807,
819,
1017,
25,
20,
15,
15,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestListNew(t *testing.T) {
list1 := New()
if actualValue := list1.Empty(); actualValue != true {
t.Errorf("Got %v expected %v", actualValue, true)
}
list2 := New(1, "b")
if actualValue := list2.Size(); actualValue != 2 {
t.Errorf("Got %v expected %v", actualValue, 2)
}
if actualValue, ok := list2.Get(0); actualValue != 1 || !ok {
t.Errorf("Got %v expected %v", actualValue, 1)
}
if actualValue, ok := list2.Get(1); actualValue != "b" || !ok {
t.Errorf("Got %v expected %v", actualValue, "b")
}
if actualValue, ok := list2.Get(2); actualValue != nil || ok {
t.Errorf("Got %v expected %v", actualValue, nil)
}
} | explode_data.jsonl/18281 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 251
} | [
2830,
3393,
852,
3564,
1155,
353,
8840,
836,
8,
341,
14440,
16,
1669,
1532,
2822,
743,
5042,
1130,
1669,
1140,
16,
11180,
2129,
5042,
1130,
961,
830,
341,
197,
3244,
13080,
445,
32462,
1018,
85,
3601,
1018,
85,
497,
5042,
1130,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func TestFindByIdWithNonInt(t *testing.T) {
setupDataFileForTest()
_, err := findById("notes", "ABCD")
if err == nil {
t.Errorf("Not getting an error when 'ABCD' used as key.")
}
} | explode_data.jsonl/81916 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 71
} | [
2830,
3393,
9885,
2720,
2354,
8121,
1072,
1155,
353,
8840,
836,
8,
1476,
84571,
1043,
1703,
2461,
2271,
741,
197,
6878,
1848,
1669,
59525,
445,
18286,
497,
330,
1867,
6484,
5130,
743,
1848,
621,
2092,
341,
197,
3244,
13080,
445,
2623,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestPDBCreation(t *testing.T) {
controller := newController()
wfcset := controller.wfclientset.ArgoprojV1alpha1().Workflows("")
wf := unmarshalWF(pdbwf)
wf, err := wfcset.Create(wf)
assert.NoError(t, err)
woc := newWorkflowOperationCtx(wf, controller)
woc.operate()
pdb, _ := controller.kubeclientset.PolicyV1beta1().PodDisruptionBudgets("").Get(woc.wf.Name, metav1.GetOptions{})
assert.NotNil(t, pdb)
assert.Equal(t, pdb.Name, wf.Name)
woc.markWorkflowSuccess()
woc.operate()
pdb, _ = controller.kubeclientset.PolicyV1beta1().PodDisruptionBudgets("").Get(woc.wf.Name, metav1.GetOptions{})
assert.Nil(t, pdb)
} | explode_data.jsonl/54395 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 273
} | [
2830,
3393,
47,
3506,
32701,
1155,
353,
8840,
836,
8,
341,
61615,
1669,
501,
2051,
741,
6692,
8316,
746,
1669,
6461,
1418,
69,
2972,
746,
18979,
45926,
73,
53,
16,
7141,
16,
1005,
6776,
38140,
31764,
6692,
69,
1669,
650,
27121,
32131,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSetClusterInConfig(t *testing.T) {
cfg := getTestConfig()
cfg.Cluster = clusterName
agent := &ecsAgent{cfg: &cfg}
err := agent.setClusterInConfig(clusterName)
assert.NoError(t, err)
} | explode_data.jsonl/41589 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 75
} | [
2830,
3393,
1649,
28678,
641,
2648,
1155,
353,
8840,
836,
8,
341,
50286,
1669,
633,
2271,
2648,
741,
50286,
72883,
284,
10652,
675,
198,
197,
8092,
1669,
609,
53717,
16810,
90,
14072,
25,
609,
14072,
532,
9859,
1669,
8315,
980,
28678,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestGetCircuitBreaker(t *testing.T) {
testCases := []struct {
desc string
labels map[string]string
expected *types.CircuitBreaker
}{
{
desc: "should return nil when no CB label",
labels: map[string]string{},
expected: nil,
},
{
desc: "should return a struct when CB label is set",
labels: map[string]string{
TraefikBackendCircuitBreakerExpression: "NetworkErrorRatio() > 0.5",
},
expected: &types.CircuitBreaker{
Expression: "NetworkErrorRatio() > 0.5",
},
},
}
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
actual := GetCircuitBreaker(test.labels)
assert.Equal(t, test.expected, actual)
})
}
} | explode_data.jsonl/51858 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 312
} | [
2830,
3393,
1949,
34,
37268,
22524,
261,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
41653,
257,
914,
198,
197,
95143,
256,
2415,
14032,
30953,
198,
197,
42400,
353,
9242,
727,
37268,
22524,
261,
198,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.