text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func TestLevelDBReceiptsGetReceiptOK(t *testing.T) {
assert := assert.New(t)
conf := &LevelDBReceiptStoreConf{
Path: path.Join(tmpdir, "test7"),
}
r, err := newLevelDBReceipts(conf)
defer r.store.Close()
receipt1 := make(map[string]interface{})
receipt1["_id"] = "r1"
receipt1["prop1"] = "value1"
receipt1["from"] = "addr1"
receipt1["to"] = "addr2"
err = r.AddReceipt("r1", &receipt1)
result, err := r.GetReceipt("r1")
assert.NoError(err)
assert.Equal("r1", (*result)["_id"])
assert.Equal("value1", (*result)["prop1"])
} | explode_data.jsonl/21458 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 246
} | [
2830,
3393,
4449,
3506,
67461,
82,
1949,
67461,
3925,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
692,
67850,
1669,
609,
4449,
3506,
67461,
6093,
15578,
515,
197,
69640,
25,
1815,
22363,
10368,
3741,
11,
330,
1944,
22,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestParseLevel(t *testing.T) {
tests := []struct {
name string
wantOut Level
wantErr bool
}{
{name: "debug", wantOut: DEBUG},
{name: "Info", wantOut: INFO},
{name: "WARN", wantOut: WARN},
{name: "error", wantOut: ERROR},
{name: "panic", wantOut: PANIC},
{name: "FATAL", wantOut: FATAL},
{name: "Off", wantOut: OFF},
{name: "xxxx", wantOut: 0, wantErr: true},
}
for _, tt := range tests {
got, err := ParseLevel(tt.name)
if (err != nil) != tt.wantErr {
t.Errorf("ParseLevel(%q) error = %v, wantErr %v", tt.name, err, tt.wantErr)
return
}
if got != tt.wantOut {
t.Errorf("ParseLevel(%q) output = %v, want %v", tt.name, got, tt.wantOut)
}
}
} | explode_data.jsonl/75478 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 321
} | [
2830,
3393,
14463,
4449,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
262,
914,
198,
197,
50780,
2662,
9395,
198,
197,
50780,
7747,
1807,
198,
197,
59403,
197,
197,
47006,
25,
330,
8349,
497,
1366,
2662,
25,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestGenericArrayScanScannerSliceString(t *testing.T) {
src, expected, nss := `{NULL,"\"",xyz}`,
[]sql.NullString{{}, {String: `"`, Valid: true}, {String: `xyz`, Valid: true}},
[]sql.NullString{{String: ``, Valid: true}, {}, {}}
if err := (GenericArray{&nss}).Scan(src); err != nil {
t.Fatalf("Expected no error, got %v", err)
}
if !reflect.DeepEqual(nss, expected) {
t.Errorf("Expected %v, got %v", expected, nss)
}
} | explode_data.jsonl/5345 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 177
} | [
2830,
3393,
19964,
1857,
26570,
31002,
33236,
703,
1155,
353,
8840,
836,
8,
341,
41144,
11,
3601,
11,
308,
778,
1669,
53692,
4576,
1335,
55853,
28854,
27249,
197,
197,
1294,
3544,
23979,
703,
2979,
2137,
314,
703,
25,
53305,
7808,
7818,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestContainsPath(t *testing.T) {
testContainsPath(t, false, "a.proto", "a.proto")
testContainsPath(t, true, ".", "a.proto")
testContainsPath(t, false, "a.proto", ".")
testContainsPath(t, false, ".", ".")
testContainsPath(t, true, ".", "a/b.proto")
testContainsPath(t, true, ".", "a/b")
testContainsPath(t, false, "a", "ab/c")
testContainsPath(t, true, "a", "a/b/c")
testContainsPath(t, false, "b", "a/b/c")
testContainsPath(t, true, "b", "b/b/c")
testContainsPath(t, true, "b", "b/a/c")
} | explode_data.jsonl/11906 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 233
} | [
2830,
3393,
23805,
1820,
1155,
353,
8840,
836,
8,
341,
18185,
23805,
1820,
1155,
11,
895,
11,
330,
64,
57322,
497,
330,
64,
57322,
1138,
18185,
23805,
1820,
1155,
11,
830,
11,
68514,
330,
64,
57322,
1138,
18185,
23805,
1820,
1155,
11,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSQLite_AutoIncrement(t *testing.T) {
liteRun(t, func(t *liteTest) {
usersT := &schema.Table{
Name: "users",
Columns: []*schema.Column{
{Name: "id", Type: &schema.ColumnType{Type: &schema.IntegerType{T: "integer"}}, Attrs: []schema.Attr{sqlite.AutoIncrement{}}},
},
}
usersT.PrimaryKey = &schema.Index{Parts: []*schema.IndexPart{{C: usersT.Columns[0]}}}
t.migrate(&schema.AddTable{T: usersT})
t.dropTables(usersT.Name)
_, err := t.db.Exec("INSERT INTO users DEFAULT VALUES")
require.NoError(t, err)
var id int
err = t.db.QueryRow("SELECT id FROM users").Scan(&id)
require.NoError(t, err)
require.Equal(t, 1, id)
})
} | explode_data.jsonl/20086 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 290
} | [
2830,
3393,
81772,
1566,
1535,
38311,
1155,
353,
8840,
836,
8,
341,
8810,
632,
6727,
1155,
11,
2915,
1155,
353,
68078,
2271,
8,
341,
197,
90896,
51,
1669,
609,
17349,
18257,
515,
298,
21297,
25,
330,
4218,
756,
298,
197,
13965,
25,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_validateEmailAddress(t *testing.T) {
type args struct {
email string
}
tests := []struct {
name string
args args
wantErr bool
}{
// TODO: Add test cases.
{
name: "Valid email",
args: args{
email: "hello@example.com",
},
wantErr: false,
},
{
name: "Invalid email - no '@'",
args: args{
email: "example.com",
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := validateEmailAddress(tt.args.email); (err != nil) != tt.wantErr {
t.Errorf("validateEmailAddress() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
} | explode_data.jsonl/58086 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 299
} | [
2830,
3393,
42681,
79986,
1155,
353,
8840,
836,
8,
341,
13158,
2827,
2036,
341,
197,
57549,
914,
198,
197,
532,
78216,
1669,
3056,
1235,
341,
197,
11609,
262,
914,
198,
197,
31215,
262,
2827,
198,
197,
50780,
7747,
1807,
198,
197,
594... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_simplifyPath(t *testing.T) {
type args struct {
path string
}
tests := []struct {
name string
args args
want string
}{
{
"[Test Case 1]",
args{
"/a/./b/../../c/",
},
"/c",
},
{
"[Test Case 2]",
args{
"/home",
},
"/home",
},
{
"[Test Case 3]",
args{
"/",
},
"/",
},
{
"[Test Case 4]",
args{
"/home/../../..",
},
"/",
},
{
"[Test Case 5]",
args{
"///",
},
"/",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := simplifyPath(tt.args.path); got != tt.want {
t.Errorf("simplifyPath() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/38285 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 392
} | [
2830,
3393,
643,
70206,
1820,
1155,
353,
8840,
836,
8,
341,
13158,
2827,
2036,
341,
197,
26781,
914,
198,
197,
532,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
31215,
2827,
198,
197,
50780,
914,
198,
197,
59403,
197,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestRunAndMonitorPlugin(t *testing.T) {
// Dead simple plugin works for this test. No need to test daemonset/job specific logic so
// a job plugin is much simpler to test against.
testPlugin := &job.Plugin{
Base: driver.Base{
Definition: manifest.Manifest{
SonobuoyConfig: manifest.SonobuoyConfig{
PluginName: "myPlugin",
},
},
Namespace: "testNS",
},
}
testPluginExpectedResults := []plugin.ExpectedResult{
{ResultType: "myPlugin", NodeName: "global"},
}
healthyPod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"sonobuoy-run": ""}},
}
failingPod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"sonobuoy-run": ""}},
Status: corev1.PodStatus{
Conditions: []corev1.PodCondition{
{Reason: "Unschedulable"},
},
},
}
testCert, err := getTestCert()
if err != nil {
t.Fatalf("Could not generate test cert: %v", err)
}
testTimeout := 1 * time.Minute
sonotime.UseShortAfter()
defer sonotime.ResetAfter()
testCases := []struct {
desc string
expectNumResults int
expectStillRunning bool
forceResults bool
cancelContext bool
plugin plugin.Interface
expectedResults []plugin.ExpectedResult
podList *corev1.PodList
podCreationError error
}{
{
desc: "Continue monitoring if no results/errors",
plugin: testPlugin,
expectedResults: testPluginExpectedResults,
podList: &corev1.PodList{
Items: []corev1.Pod{healthyPod},
},
expectStillRunning: true,
}, {
desc: "Error launching plugin causes exit and plugin result",
plugin: testPlugin,
expectedResults: testPluginExpectedResults,
podList: &corev1.PodList{
Items: []corev1.Pod{healthyPod},
},
podCreationError: errors.New("createPod error"),
expectNumResults: 1,
}, {
desc: "Failing plugin causes exit and plugin result",
plugin: testPlugin,
expectedResults: testPluginExpectedResults,
podList: &corev1.PodList{
Items: []corev1.Pod{failingPod},
},
expectNumResults: 1,
}, {
desc: "Plugin obtaining results in exits",
plugin: testPlugin,
expectedResults: testPluginExpectedResults,
podList: &corev1.PodList{
Items: []corev1.Pod{healthyPod},
},
forceResults: true,
expectNumResults: 1,
}, {
desc: "Context cancellation results in exit",
plugin: testPlugin,
expectedResults: testPluginExpectedResults,
podList: &corev1.PodList{
Items: []corev1.Pod{healthyPod},
},
cancelContext: true,
expectNumResults: 0,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "sonobuoy-test")
if err != nil {
t.Fatalf("Failed to make temp directory: %v", err)
}
defer os.RemoveAll(tmpDir)
a := NewAggregator(tmpDir, tc.expectedResults)
ctx, cancel := context.WithCancel(context.Background())
fclient := fake.NewSimpleClientset()
fclient.PrependReactor("list", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
return true, tc.podList, nil
})
fclient.PrependReactor("create", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, tc.podCreationError
})
doneCh, timeoutCh := make(chan (struct{}), 1), make(chan (struct{}), 1)
if tc.cancelContext {
cancel()
} else {
// Max timeout for test to unblock.
go func() {
time.Sleep(2 * time.Second)
timeoutCh <- struct{}{}
cancel()
}()
}
go func() {
a.RunAndMonitorPlugin(ctx, testTimeout, tc.plugin, fclient, nil, "testname", testCert, &corev1.Pod{}, "")
doneCh <- struct{}{}
}()
if tc.forceResults {
a.resultsMutex.Lock()
a.Results["myPlugin/global"] = &plugin.Result{}
a.resultsMutex.Unlock()
}
// Wait for completion/timeout and see which happens first.
wasStillRunning := false
select {
case <-doneCh:
t.Log("runAndMonitor is done")
case <-timeoutCh:
t.Log("runAndMonitor timed out")
wasStillRunning = true
}
if len(a.Results) != tc.expectNumResults {
t.Errorf("Expected %v results but found %v: %+v", tc.expectNumResults, len(a.Results), a.Results)
}
if wasStillRunning != tc.expectStillRunning {
t.Errorf("Expected wasStillMonitoring %v but found %v", tc.expectStillRunning, wasStillRunning)
}
})
}
} | explode_data.jsonl/8326 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1874
} | [
2830,
3393,
6727,
3036,
30098,
11546,
1155,
353,
8840,
836,
8,
341,
197,
322,
15021,
4285,
9006,
4278,
369,
419,
1273,
13,
2308,
1184,
311,
1273,
39293,
746,
77402,
3151,
12218,
773,
198,
197,
322,
264,
2618,
9006,
374,
1753,
34288,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTxNotifierMultipleHistoricalSpendRescans(t *testing.T) {
t.Parallel()
const startingHeight = 10
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache)
// The first registration for an outpoint in the notifier should request
// a historical spend rescan as it does not have a historical view of
// the chain.
ntfn1 := &chainntnfs.SpendNtfn{
SpendID: 0,
OutPoint: zeroOutPoint,
Event: chainntnfs.NewSpendEvent(nil),
}
historicalDispatch1, err := n.RegisterSpend(ntfn1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
if historicalDispatch1 == nil {
t.Fatal("expected to receive historical dispatch request")
}
// We'll register another spend notification for the same outpoint. This
// should not request a historical spend rescan since the first one is
// still pending.
ntfn2 := &chainntnfs.SpendNtfn{
SpendID: 1,
OutPoint: zeroOutPoint,
Event: chainntnfs.NewSpendEvent(nil),
}
historicalDispatch2, err := n.RegisterSpend(ntfn2)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
if historicalDispatch2 != nil {
t.Fatal("received unexpected historical rescan request")
}
// Finally, we'll mark the ongoing historical rescan as complete and
// register another notification. We should also expect not to see a
// historical rescan request since the confirmation details should be
// cached.
spendDetails := &chainntnfs.SpendDetail{
SpentOutPoint: &ntfn2.OutPoint,
SpenderTxHash: &zeroHash,
SpendingTx: wire.NewMsgTx(2),
SpenderInputIndex: 0,
SpendingHeight: startingHeight - 1,
}
err = n.UpdateSpendDetails(ntfn2.OutPoint, spendDetails)
if err != nil {
t.Fatalf("unable to update spend details: %v", err)
}
ntfn3 := &chainntnfs.SpendNtfn{
SpendID: 2,
OutPoint: zeroOutPoint,
Event: chainntnfs.NewSpendEvent(nil),
}
historicalDispatch3, err := n.RegisterSpend(ntfn3)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
if historicalDispatch3 != nil {
t.Fatal("received unexpected historical rescan request")
}
} | explode_data.jsonl/69556 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 772
} | [
2830,
3393,
31584,
64729,
32089,
48983,
938,
50,
3740,
1061,
66,
596,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
4777,
5916,
3640,
284,
220,
16,
15,
198,
9598,
396,
8233,
1669,
501,
11571,
26987,
8233,
741,
9038,
1669,
878... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestCumulativeWeights(t *testing.T) {
weights := []float64{1, 2, 3, 4, 5, 6, 7}
cum := CumulativeWeights(weights)
assert.Equal(t, []float64{1, 3, 6, 10, 15, 21, 28}, cum)
} | explode_data.jsonl/20514 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 79
} | [
2830,
3393,
85805,
22160,
55630,
1155,
353,
8840,
836,
8,
341,
197,
13327,
1669,
3056,
3649,
21,
19,
90,
16,
11,
220,
17,
11,
220,
18,
11,
220,
19,
11,
220,
20,
11,
220,
21,
11,
220,
22,
532,
1444,
372,
1669,
31578,
22160,
55630... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_Encoding_Entries(t *testing.T) {
record := &WALRecord{
UserID: "123",
RefEntries: RefEntries{
Ref: 456,
Entries: []logproto.Entry{
{
Timestamp: time.Unix(1000, 0),
Line: "first",
},
{
Timestamp: time.Unix(2000, 0),
Line: "second",
},
},
},
}
buf := record.encodeEntries(nil)
var decoded WALRecord
err := decodeWALRecord(buf, &decoded)
require.Nil(t, err)
require.Equal(t, record, &decoded)
} | explode_data.jsonl/45900 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 234
} | [
2830,
3393,
93529,
3700,
62,
24533,
1155,
353,
8840,
836,
8,
341,
71952,
1669,
609,
54,
969,
6471,
515,
197,
31672,
915,
25,
330,
16,
17,
18,
756,
197,
197,
3945,
24533,
25,
8550,
24533,
515,
298,
197,
3945,
25,
220,
19,
20,
21,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestUpdatePrivilege(t *testing.T) {
store, clean := realtikvtest.CreateMockStoreAndSetup(t)
defer clean()
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2;")
tk.MustExec("create table t1 (id int);")
tk.MustExec("create table t2 (id int);")
tk.MustExec("insert into t1 values (1);")
tk.MustExec("insert into t2 values (2);")
tk.MustExec("create user xxx;")
tk.MustExec("grant all on test.t1 to xxx;")
tk.MustExec("grant select on test.t2 to xxx;")
tk1 := testkit.NewTestKit(t, store)
tk1.MustExec("use test")
require.True(t, tk1.Session().Auth(&auth.UserIdentity{Username: "xxx", Hostname: "localhost"}, []byte(""), []byte("")))
tk1.MustMatchErrMsg("update t2 set id = 666 where id = 1;", "privilege check.*")
// Cover a bug that t1 and t2 both require update privilege.
// In fact, the privlege check for t1 should be update, and for t2 should be select.
tk1.MustExec("update t1,t2 set t1.id = t2.id;")
// Fix issue 8911
tk.MustExec("create database weperk")
tk.MustExec("use weperk")
tk.MustExec("create table tb_wehub_server (id int, active_count int, used_count int)")
tk.MustExec("create user 'weperk'")
tk.MustExec("grant all privileges on weperk.* to 'weperk'@'%'")
require.True(t, tk1.Session().Auth(&auth.UserIdentity{Username: "weperk", Hostname: "%"}, []byte(""), []byte("")))
tk1.MustExec("use weperk")
tk1.MustExec("update tb_wehub_server a set a.active_count=a.active_count+1,a.used_count=a.used_count+1 where id=1")
tk.MustExec("create database service")
tk.MustExec("create database report")
tk.MustExec(`CREATE TABLE service.t1 (
id int(11) DEFAULT NULL,
a bigint(20) NOT NULL,
b text DEFAULT NULL,
PRIMARY KEY (a)
)`)
tk.MustExec(`CREATE TABLE report.t2 (
a bigint(20) DEFAULT NULL,
c bigint(20) NOT NULL
)`)
tk.MustExec("grant all privileges on service.* to weperk")
tk.MustExec("grant all privileges on report.* to weperk")
tk1.Session().GetSessionVars().CurrentDB = ""
tk1.MustExec(`update service.t1 s,
report.t2 t
set s.a = t.a
WHERE
s.a = t.a
and t.c >= 1 and t.c <= 10000
and s.b !='xx';`)
// Fix issue 10028
tk.MustExec("create database ap")
tk.MustExec("create database tp")
tk.MustExec("grant all privileges on ap.* to xxx")
tk.MustExec("grant select on tp.* to xxx")
tk.MustExec("create table tp.record( id int,name varchar(128),age int)")
tk.MustExec("insert into tp.record (id,name,age) values (1,'john',18),(2,'lary',19),(3,'lily',18)")
tk.MustExec("create table ap.record( id int,name varchar(128),age int)")
tk.MustExec("insert into ap.record(id) values(1)")
require.True(t, tk1.Session().Auth(&auth.UserIdentity{Username: "xxx", Hostname: "localhost"}, []byte(""), []byte("")))
tk1.MustExec("update ap.record t inner join tp.record tt on t.id=tt.id set t.name=tt.name")
} | explode_data.jsonl/5763 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1093
} | [
2830,
3393,
4289,
32124,
42769,
1155,
353,
8840,
836,
8,
341,
57279,
11,
4240,
1669,
1931,
83,
1579,
85,
1944,
7251,
11571,
6093,
3036,
21821,
1155,
340,
16867,
4240,
2822,
3244,
74,
1669,
1273,
8226,
7121,
2271,
7695,
1155,
11,
3553,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEnqueueInFront(t *testing.T) {
finite_tests.EnqueueInFront(t, func(size int) interface {
goqueue.Owner
goqueue.Enqueuer
goqueue.EnqueueInFronter
goqueue.Peeker
} {
return finite.New(size)
})
} | explode_data.jsonl/54510 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 90
} | [
2830,
3393,
1702,
4584,
641,
23395,
1155,
353,
8840,
836,
8,
341,
1166,
15856,
32509,
65625,
641,
23395,
1155,
11,
2915,
6856,
526,
8,
3749,
341,
197,
30680,
4584,
49920,
198,
197,
30680,
4584,
22834,
591,
8801,
198,
197,
30680,
4584,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBadRequest(t *testing.T) {
badRequestUrl := fmt.Sprintf("%s?%s=%s", createUrl(badRequestPath), badRequestMessage, "test")
resp, err := http.Get(badRequestUrl)
AssertThat(t, err, Is{nil})
AssertThat(t, resp.StatusCode, EqualTo{http.StatusBadRequest})
bodyBytes, _ := ioutil.ReadAll(resp.Body)
AssertThat(t, bytes.ContainsAny(bodyBytes, "test"), Is{true})
} | explode_data.jsonl/65287 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 140
} | [
2830,
3393,
46015,
1155,
353,
8840,
836,
8,
341,
2233,
329,
1900,
2864,
1669,
8879,
17305,
4430,
82,
30,
4,
82,
7846,
82,
497,
1855,
2864,
1883,
329,
1900,
1820,
701,
3873,
1900,
2052,
11,
330,
1944,
1138,
34653,
11,
1848,
1669,
175... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSessionSplitFetch(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vnet := getVirtualNetwork()
ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil)
defer ig.Close()
bgen := blocksutil.NewBlockGenerator()
inst := ig.Instances(11)
// Add 10 distinct blocks to each of 10 peers
blks := bgen.Blocks(100)
for i := 0; i < 10; i++ {
if err := inst[i].Blockstore().PutMany(blks[i*10 : (i+1)*10]); err != nil {
t.Fatal(err)
}
}
var cids []cid.Cid
for _, blk := range blks {
cids = append(cids, blk.Cid())
}
// Create a session on the remaining peer and fetch all the blocks 10 at a time
ses := inst[10].Exchange.NewSession(ctx).(*bssession.Session)
ses.SetBaseTickDelay(time.Millisecond * 10)
for i := 0; i < 10; i++ {
ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10])
if err != nil {
t.Fatal(err)
}
var got []blocks.Block
for b := range ch {
got = append(got, b)
}
if err := assertBlockLists(got, blks[i*10:(i+1)*10]); err != nil {
t.Fatal(err)
}
}
} | explode_data.jsonl/2223 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 440
} | [
2830,
3393,
5283,
20193,
20714,
1155,
353,
8840,
836,
8,
341,
20985,
11,
9121,
1669,
2266,
26124,
9269,
5378,
19047,
2398,
16867,
9121,
2822,
5195,
4711,
1669,
633,
33026,
12320,
741,
197,
343,
1669,
1273,
4851,
7121,
2271,
2523,
12561,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestArgoCDInstanceSelector(t *testing.T) {
t.Run("Selector for a Valid name", func(t *testing.T) {
validName := "argocd-server"
selector, err := argocdInstanceSelector(validName)
assert.NilError(t, err)
assert.Equal(t, selector.String(), "app.kubernetes.io/managed-by=argocd-server")
})
t.Run("Selector for an Invalid name", func(t *testing.T) {
invalidName := "argocd-*/"
selector, err := argocdInstanceSelector(invalidName)
assert.ErrorContains(t, err, `failed to create a requirement for invalid label value: "argocd-*/`)
assert.Equal(t, selector, nil)
})
} | explode_data.jsonl/11935 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 225
} | [
2830,
3393,
2735,
78,
6484,
2523,
5877,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
5877,
369,
264,
7818,
829,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
56322,
675,
1669,
330,
858,
509,
67,
26300,
698,
197,
197,
8925,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReconciler_Batch_Rerun(t *testing.T) {
job := mock.Job()
job.Type = structs.JobTypeBatch
job.TaskGroups[0].Update = nil
// Create 10 allocations from the old job and have them be complete
var allocs []*structs.Allocation
for i := 0; i < 10; i++ {
alloc := mock.Alloc()
alloc.Job = job
alloc.JobID = job.ID
alloc.NodeID = uuid.Generate()
alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
alloc.TaskGroup = job.TaskGroups[0].Name
alloc.ClientStatus = structs.AllocClientStatusComplete
alloc.DesiredStatus = structs.AllocDesiredStatusStop
allocs = append(allocs, alloc)
}
// Create a copy of the job that is "new"
job2 := job.Copy()
job2.CreateIndex++
reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, true, job2.ID, job2, nil, allocs, nil)
r := reconciler.Compute()
// Assert the correct results
assertResults(t, r, &resultExpectation{
createDeployment: nil,
deploymentUpdates: nil,
place: 10,
destructive: 0,
desiredTGUpdates: map[string]*structs.DesiredUpdates{
job.TaskGroups[0].Name: {
Place: 10,
DestructiveUpdate: 0,
Ignore: 10,
},
},
})
assertNamesHaveIndexes(t, intRange(0, 9), placeResultsToNames(r.place))
} | explode_data.jsonl/67278 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 530
} | [
2830,
3393,
693,
40446,
5769,
1668,
754,
2568,
261,
359,
1155,
353,
8840,
836,
8,
341,
68577,
1669,
7860,
45293,
741,
68577,
10184,
284,
62845,
45293,
929,
21074,
198,
68577,
28258,
22173,
58,
15,
936,
4289,
284,
2092,
271,
197,
322,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestValidateDuration(t *testing.T) {
type durationCheck struct {
duration *types.Duration
isValid bool
}
checks := []durationCheck{
{
duration: &types.Duration{Seconds: 1},
isValid: true,
},
{
duration: &types.Duration{Seconds: 1, Nanos: -1},
isValid: false,
},
{
duration: &types.Duration{Seconds: -11, Nanos: -1},
isValid: false,
},
{
duration: &types.Duration{Nanos: 1},
isValid: false,
},
{
duration: &types.Duration{Seconds: 1, Nanos: 1},
isValid: false,
},
}
for _, check := range checks {
if got := ValidateDuration(check.duration); (got == nil) != check.isValid {
t.Errorf("Failed: got valid=%t but wanted valid=%t: %v for %v", got == nil, check.isValid, got, check.duration)
}
}
} | explode_data.jsonl/56895 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 326
} | [
2830,
3393,
17926,
12945,
1155,
353,
8840,
836,
8,
341,
13158,
8090,
3973,
2036,
341,
197,
89300,
353,
9242,
33795,
198,
197,
19907,
4088,
220,
1807,
198,
197,
630,
25157,
82,
1669,
3056,
17021,
3973,
515,
197,
197,
515,
298,
89300,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestResolveGceAddressValue(t *testing.T) {
oldRunner := runner
defer func() { runner = oldRunner }()
runner = exec.TestRunner{Output: []byte(testIpAddress)}
zone := "us-central1-a"
cluster := model.Cluster{
Name: "kcd-clustername",
Provider: model.Provider{
GKE: &model.GkeProvider{
Project: "test-project",
Zone: &zone,
ClusterName: "gke-clustername",
},
},
}
env := &model.Environment{
Cluster: &cluster,
}
address := &model.GceAddressValueRef{
Name: "my-address",
IsGlobal: false,
}
out, err := ResolveGceAddressValue(address, env)
assert.NoError(t, err)
assert.Equal(t, testIpAddress, string(out))
} | explode_data.jsonl/3664 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 282
} | [
2830,
3393,
56808,
38,
346,
4286,
1130,
1155,
353,
8840,
836,
8,
341,
61828,
19486,
1669,
22259,
198,
16867,
2915,
368,
314,
22259,
284,
2310,
19486,
50746,
197,
41736,
284,
3883,
8787,
19486,
90,
5097,
25,
3056,
3782,
8623,
98567,
1056... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLogFiles(t *testing.T) {
cfg := Config{
KernelImagePath: filepath.Join(testDataPath, "vmlinux"), SocketPath: filepath.Join(testDataPath, "socket-path"),
Drives: []models.Drive{
{
DriveID: String("0"),
IsRootDevice: Bool(true),
IsReadOnly: Bool(false),
PathOnHost: String(testRootfs),
},
},
DisableValidation: true,
}
opClient := fctesting.MockClient{
GetMachineConfigurationFn: func(params *ops.GetMachineConfigurationParams) (*ops.GetMachineConfigurationOK, error) {
return &ops.GetMachineConfigurationOK{
Payload: &models.MachineConfiguration{},
}, nil
},
}
ctx := context.Background()
client := NewClient("socket-path", fctesting.NewLogEntry(t), true, WithOpsClient(&opClient))
stdoutPath := filepath.Join(testDataPath, "stdout.log")
stderrPath := filepath.Join(testDataPath, "stderr.log")
stdout, err := os.Create(stdoutPath)
if err != nil {
t.Fatalf("error creating %q: %v", stdoutPath, err)
}
stderr, err := os.Create(stderrPath)
if err != nil {
t.Fatalf("error creating %q: %v", stderrPath, err)
}
fd, err := net.Listen("unix", cfg.SocketPath)
if err != nil {
t.Fatalf("unexpected error during creation of unix socket: %v", err)
}
defer func() {
fd.Close()
}()
defer func() {
os.Remove(stdoutPath)
os.Remove(stderrPath)
}()
cmd := exec.Command("ls")
cmd.Stdout = stdout
cmd.Stderr = stderr
m, err := NewMachine(
ctx,
cfg,
WithClient(client),
WithProcessRunner(cmd),
WithLogger(fctesting.NewLogEntry(t)),
)
if err != nil {
t.Fatalf("failed to create new machine: %v", err)
}
defer m.StopVMM()
if err := m.Handlers.FcInit.Run(ctx, m); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, err := os.Stat(stdoutPath); os.IsNotExist(err) {
t.Errorf("expected log file to be present")
}
if _, err := os.Stat(stderrPath); os.IsNotExist(err) {
t.Errorf("expected log file to be present")
}
} | explode_data.jsonl/70439 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 785
} | [
2830,
3393,
2201,
10809,
1155,
353,
8840,
836,
8,
341,
50286,
1669,
5532,
515,
197,
197,
26343,
66356,
25,
26054,
22363,
8623,
1043,
1820,
11,
330,
85,
1014,
19559,
3975,
20954,
1820,
25,
26054,
22363,
8623,
1043,
1820,
11,
330,
9556,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPyModuleGetFilenameObject(t *testing.T) {
fmt.Println(assert.CallerInfo()[0])
assert.Nil(t, pymodule.GetFilenameObject(nil))
queue := pyimport.ImportModule("queue")
defer py.DecRef(queue)
queueRefCnt := py.RefCnt(queue)
defer func() { assert.Equal(t, queueRefCnt, py.RefCnt(queue)) }()
name := pymodule.GetFilenameObject(queue)
nameRefCnt := py.RefCnt(name)
defer func() { assert.Equal(t, nameRefCnt, py.RefCnt(name)) }()
assert.NotNil(t, name)
assert.True(t, strings.HasSuffix(pyunicode.AsString(name), "/queue.py"))
} | explode_data.jsonl/33471 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 217
} | [
2830,
3393,
13828,
3332,
1949,
20759,
1190,
1155,
353,
8840,
836,
8,
341,
11009,
12419,
75846,
727,
13956,
1731,
10116,
15,
9604,
6948,
59678,
1155,
11,
45760,
1756,
2234,
20759,
1190,
27907,
4390,
46993,
1669,
4510,
474,
67275,
3332,
445... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestErrWrapPanic(t *testing.T) {
gopClTest(t, `
var ret int = println("Hi")!
`, `package main
import fmt "fmt"
var ret int = func() (_gop_ret int) {
var _gop_err error
_gop_ret, _gop_err = fmt.Println("Hi")
if _gop_err != nil {
panic(_gop_err)
}
return
}()
`)
} | explode_data.jsonl/73612 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 130
} | [
2830,
3393,
7747,
26787,
47,
31270,
1155,
353,
8840,
836,
8,
341,
3174,
453,
5066,
2271,
1155,
11,
22074,
947,
2112,
526,
284,
13751,
445,
13048,
899,
4894,
7808,
1565,
1722,
1887,
271,
474,
8879,
330,
12501,
1837,
947,
2112,
526,
284... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestValidateS3BucketReplicationRuleId(t *testing.T) {
validId := []string{
"YadaHereAndThere",
"Valid-5Rule_ID",
"This . is also %% valid@!)+*(:ID",
"1234",
strings.Repeat("W", 255),
}
for _, v := range validId {
_, errors := validateS3BucketReplicationRuleId(v, "id")
if len(errors) != 0 {
t.Fatalf("%q should be a valid lifecycle rule id: %q", v, errors)
}
}
invalidId := []string{
// length > 255
strings.Repeat("W", 256),
}
for _, v := range invalidId {
_, errors := validateS3BucketReplicationRuleId(v, "id")
if len(errors) == 0 {
t.Fatalf("%q should be an invalid replication configuration rule id", v)
}
}
} | explode_data.jsonl/78576 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 271
} | [
2830,
3393,
17926,
50,
18,
36018,
18327,
1693,
11337,
764,
1155,
353,
8840,
836,
8,
341,
56322,
764,
1669,
3056,
917,
515,
197,
197,
1,
56,
2584,
8420,
3036,
3862,
756,
197,
197,
1,
4088,
12,
20,
11337,
3450,
756,
197,
197,
21520,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestRoundtripNoCompression(t *testing.T) {
// Make sure that we can use our standard routines for decompressing
// something with '0' level compression.
var buf bytes.Buffer
r1 := report.MakeReport()
r1.WriteBinary(&buf, 0)
r2, err := report.MakeFromBinary(&buf)
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(r1, *r2) {
t.Errorf("%v != %v", r1, *r2)
}
} | explode_data.jsonl/17309 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 150
} | [
2830,
3393,
27497,
32981,
2753,
81411,
1155,
353,
8840,
836,
8,
341,
197,
322,
7405,
2704,
429,
582,
646,
990,
1039,
5297,
29497,
369,
28502,
1873,
287,
198,
197,
322,
2494,
448,
364,
15,
6,
2188,
25111,
624,
2405,
6607,
5820,
22622,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestGet_LeadingSlash(t *testing.T) {
want := "http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/identity?audience=http://example.com"
tests := []struct {
name string
suffix string
}{
{
name: "without leading slash",
suffix: "instance/service-accounts/default/identity?audience=http://example.com",
},
{
name: "with leading slash",
suffix: "/instance/service-accounts/default/identity?audience=http://example.com",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ct := &captureTransport{}
c := NewClient(&http.Client{Transport: ct})
c.Get(tc.suffix)
if ct.url != want {
t.Fatalf("got %v, want %v", ct.url, want)
}
})
}
} | explode_data.jsonl/17307 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 312
} | [
2830,
3393,
1949,
62,
69750,
88004,
1155,
353,
8840,
836,
8,
341,
50780,
1669,
330,
1254,
1110,
16,
21,
24,
13,
17,
20,
19,
13,
16,
21,
24,
13,
17,
20,
19,
25093,
8492,
14610,
5457,
16,
14,
4851,
34186,
12,
26206,
28989,
14,
169... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFormatArgsWithoutParameterTypeName(t *testing.T) {
// given
m := &sysl.Module{
Apps: map[string]*sysl.Application{
"test": {
Types: map[string]*sysl.Type{
"User": {
Attrs: make(map[string]*sysl.Attribute),
},
},
},
},
}
// when
actual := formatArgs(m, "test", "")
assert.Equal(t, "<color blue>test.</color> <<color green>?, ?</color>>", actual)
} | explode_data.jsonl/58739 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 176
} | [
2830,
3393,
4061,
4117,
26040,
4971,
39429,
1155,
353,
8840,
836,
8,
341,
197,
322,
2661,
198,
2109,
1669,
609,
7791,
75,
26958,
515,
197,
197,
53602,
25,
2415,
14032,
8465,
7791,
75,
17521,
515,
298,
197,
1,
1944,
788,
341,
571,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLinkDestEncode(t *testing.T) {
dest1 := []byte("http://foo.bar/测试")
encoded := html.EncodeDestination(dest1)
dest2 := html.DecodeDestination(encoded)
if !bytes.Equal(dest1, dest2) {
t.Fatalf("Link dest encode failed")
}
} | explode_data.jsonl/10311 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 97
} | [
2830,
3393,
3939,
34830,
32535,
1155,
353,
8840,
836,
8,
341,
49616,
16,
1669,
3056,
3782,
445,
1254,
1110,
7975,
22001,
14,
81705,
1138,
197,
19329,
1669,
5272,
50217,
33605,
27010,
16,
340,
49616,
17,
1669,
5272,
56372,
33605,
76270,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestSetDefaults(t *testing.T) {
b.SetDefaults()
if b.Name != "Bitfinex" || b.Enabled != false ||
b.Verbose != false || b.Websocket != false ||
b.RESTPollingDelay != 10 {
t.Error("Test Failed - Bitfinex SetDefaults values not set correctly")
}
} | explode_data.jsonl/79925 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 99
} | [
2830,
3393,
1649,
16273,
1155,
353,
8840,
836,
8,
341,
2233,
4202,
16273,
2822,
743,
293,
2967,
961,
330,
8344,
5472,
327,
1,
1369,
293,
13690,
961,
895,
8244,
197,
2233,
42505,
8297,
961,
895,
1369,
293,
6473,
9556,
961,
895,
8244,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
// Setup a fake server to listen for requests, and run the ReplicaSet controller in steady state
fakeHandler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: "{}",
}
testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady
// Steady state for the ReplicaSet, no Status.Replicas updates expected
activePods := 5
labelMap := map[string]string{"foo": "bar"}
rs := newReplicaSet(activePods, labelMap)
manager.rsStore.Store.Add(rs)
rs.Status = extensions.ReplicaSetStatus{Replicas: int32(activePods)}
newPodList(manager.podStore.Indexer, activePods, api.PodRunning, labelMap, rs, "pod")
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.syncReplicaSet(getKey(rs, t))
validateSyncReplicaSet(t, &fakePodControl, 0, 0)
if fakeHandler.RequestReceived != nil {
t.Errorf("Unexpected update when pods and ReplicaSets are in a steady state")
}
// This response body is just so we don't err out decoding the http response, all
// we care about is the request body sent below.
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
fakeHandler.ResponseBody = response
rs.Generation = rs.Generation + 1
manager.syncReplicaSet(getKey(rs, t))
rs.Status.ObservedGeneration = rs.Generation
updatedRc := runtime.EncodeOrDie(testapi.Extensions.Codec(), rs)
fakeHandler.ValidateRequest(t, testapi.Extensions.ResourcePath(replicaSetResourceName(), rs.Namespace, rs.Name)+"/status", "PUT", &updatedRc)
} | explode_data.jsonl/10043 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 595
} | [
2830,
3393,
2522,
37091,
26040,
18327,
52210,
4072,
1155,
353,
8840,
836,
8,
341,
197,
322,
18626,
264,
12418,
3538,
311,
8844,
369,
7388,
11,
323,
1598,
279,
94036,
1649,
6461,
304,
24020,
1584,
198,
1166,
726,
3050,
1669,
4094,
8840,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_getUserID(t *testing.T) {
type args struct {
msg *types.Message
}
tests := []struct {
name string
args args
want string
}{
{
name: "test with nil msg",
args: args{},
want: "",
},
{
name: "test with nil labels",
args: args{
msg: &types.Message{
Labels: nil,
},
},
want: "",
},
{
name: "test with no WebhookLabelKey label",
args: args{
msg: &types.Message{
Labels: map[types.LabelKey]interface{}{
"foo": "bar",
},
},
},
want: "",
},
{
name: "test with WebhookLabelKey label and string value",
args: args{
msg: &types.Message{
Labels: map[types.LabelKey]interface{}{
types.LabelKey(constant.WebhookLabelKey): "10001",
},
},
},
want: "",
},
{
name: "test with WebhookLabelKey label and no userID",
args: args{
msg: &types.Message{
Labels: map[types.LabelKey]interface{}{
types.LabelKey(constant.WebhookLabelKey): map[string]interface{}{
"foo": "bar",
},
},
},
},
want: "",
},
{
name: "test with not string userID value",
args: args{
msg: &types.Message{
Labels: map[types.LabelKey]interface{}{
types.LabelKey(constant.WebhookLabelKey): map[string]interface{}{
"userID": 10001,
},
},
},
},
want: "",
},
{
name: "test with correct",
args: args{
msg: &types.Message{
Labels: map[types.LabelKey]interface{}{
types.LabelKey(constant.WebhookLabelKey): map[string]interface{}{
"userID": "10001",
},
},
},
},
want: "10001",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getUserIDFromMessage(tt.args.msg); got != tt.want {
t.Errorf("getUserIDFromMessage() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/20545 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 901
} | [
2830,
3393,
3062,
36899,
1155,
353,
8840,
836,
8,
341,
13158,
2827,
2036,
341,
197,
21169,
353,
9242,
8472,
198,
197,
532,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
31215,
2827,
198,
197,
50780,
914,
198,
197,
59403,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestAuthor_PendingExtrinsics(t *testing.T) {
res, err := author.PendingExtrinsics()
assert.NoError(t, err)
assert.Equal(t, []types.Extrinsic{types.Extrinsic{Version: 0x84, Signature: types.ExtrinsicSignatureV4{Signer: types.Address{IsAccountID: true, AsAccountID: types.AccountID{0xd4, 0x35, 0x93, 0xc7, 0x15, 0xfd, 0xd3, 0x1c, 0x61, 0x14, 0x1a, 0xbd, 0x4, 0xa9, 0x9f, 0xd6, 0x82, 0x2c, 0x85, 0x58, 0x85, 0x4c, 0xcd, 0xe3, 0x9a, 0x56, 0x84, 0xe7, 0xa5, 0x6d, 0xa2, 0x7d}, IsAccountIndex: false, AsAccountIndex: 0x0}, Signature: types.MultiSignature{IsEd25519: true, AsEd25519: types.Signature{0xa0, 0x23, 0xbb, 0xe8, 0x83, 0x40, 0x5b, 0x5f, 0xac, 0x2a, 0xa1, 0x14, 0x9, 0x3f, 0xcf, 0x3d, 0x8, 0x2, 0xd2, 0xf3, 0xd3, 0x71, 0x5e, 0x9, 0x12, 0x9b, 0x0, 0xa4, 0xbf, 0x74, 0x10, 0x48, 0xca, 0xf5, 0x3d, 0x8c, 0x7d, 0x97, 0xe8, 0x72, 0xca, 0xa7, 0x3, 0xe7, 0xd0, 0x4f, 0x17, 0x4a, 0x4e, 0x2e, 0xd4, 0xac, 0xad, 0xee, 0x41, 0x73, 0xa8, 0xb6, 0xba, 0xb7, 0xe4, 0x5c, 0xa, 0x6}, IsSr25519: false, AsSr25519: types.Signature{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, IsEcdsa: false, AsEcdsa: types.Bytes(nil)}, Era: types.ExtrinsicEra{IsImmortalEra: true, IsMortalEra: false, AsMortalEra: types.MortalEra{First: 0x0, Second: 0x0}}, Nonce: types.NewUCompactFromUInt(0x3), Tip: types.NewUCompactFromUInt(0x0)}, Method: types.Call{CallIndex: types.CallIndex{SectionIndex: 0x6, MethodIndex: 0x0}, Args: types.Args{0xff, 0x8e, 0xaf, 0x4, 0x15, 0x16, 0x87, 0x73, 0x63, 0x26, 0xc9, 0xfe, 0xa1, 0x7e, 0x25, 0xfc, 0x52, 0x87, 0x61, 0x36, 0x93, 0xc9, 0x12, 0x90, 0x9c, 0xb2, 0x26, 0xaa, 0x47, 0x94, 0xf2, 0x6a, 0x48, 0xe5, 0x6c}}}}, res) //nolint:lll,dupl
} | explode_data.jsonl/50045 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1129
} | [
2830,
3393,
7133,
1088,
2459,
840,
376,
67102,
1155,
353,
8840,
836,
8,
341,
10202,
11,
1848,
1669,
3150,
96624,
840,
376,
67102,
741,
6948,
35699,
1155,
11,
1848,
340,
6948,
12808,
1155,
11,
3056,
9242,
5121,
45002,
90,
9242,
5121,
4... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStructDeclConformsNullable(t *testing.T) {
decl, ok := testSchema(t).lookupDeclByName("ExampleStruct", true)
if !ok {
t.Fatalf("lookupDeclByName failed")
}
structDecl := decl.(*StructDecl)
checkConforms(t,
context{},
structDecl,
[]conformTest{
conformOk{gidlir.Record{
Name: "ExampleStruct",
Fields: []gidlir.Field{
{Key: gidlir.FieldKey{Name: "s"}, Value: "foo"},
},
}},
conformOk{nil},
},
)
} | explode_data.jsonl/21401 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 202
} | [
2830,
3393,
9422,
21629,
1109,
9807,
15703,
1155,
353,
8840,
836,
8,
341,
197,
10005,
11,
5394,
1669,
1273,
8632,
1155,
568,
21020,
21629,
16898,
445,
13314,
9422,
497,
830,
340,
743,
753,
562,
341,
197,
3244,
30762,
445,
21020,
21629,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestCachedChartsRefreshChartRepoNotFound(t *testing.T) {
// Stubs Download and processing
DownloadAndExtractChartTarballOrig := charthelper.DownloadAndExtractChartTarball
defer func() { charthelper.DownloadAndExtractChartTarball = DownloadAndExtractChartTarballOrig }()
charthelper.DownloadAndExtractChartTarball = func(chart *swaggermodels.ChartPackage, repoURL string) error { return nil }
DownloadAndProcessChartIconOrig := charthelper.DownloadAndProcessChartIcon
defer func() { charthelper.DownloadAndProcessChartIcon = DownloadAndProcessChartIconOrig }()
charthelper.DownloadAndProcessChartIcon = func(chart *swaggermodels.ChartPackage) error { return nil }
err := chartsImplementation.RefreshChart("stable", "inexistant chart")
assert.Err(t, err, errors.New("no chart \"inexistant chart\" found for repo stable\n"))
} | explode_data.jsonl/37975 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 237
} | [
2830,
3393,
70293,
64878,
14567,
14488,
25243,
10372,
1155,
353,
8840,
836,
8,
341,
197,
322,
794,
15738,
8577,
323,
8692,
198,
197,
11377,
3036,
28959,
14488,
62733,
3959,
62726,
1669,
1161,
339,
2947,
61204,
3036,
28959,
14488,
62733,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_Shutdown(t *testing.T) {
prwe := &PRWExporter{
wg: new(sync.WaitGroup),
closeChan: make(chan struct{}),
}
wg := new(sync.WaitGroup)
err := prwe.Shutdown(context.Background())
require.NoError(t, err)
errChan := make(chan error, 5)
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
errChan <- prwe.PushMetrics(context.Background(), pdata.NewMetrics())
}()
}
wg.Wait()
close(errChan)
for ok := range errChan {
assert.Error(t, ok)
}
} | explode_data.jsonl/78811 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 223
} | [
2830,
3393,
36578,
18452,
1155,
353,
8840,
836,
8,
341,
25653,
896,
1669,
609,
6480,
54,
88025,
515,
197,
72079,
25,
286,
501,
97233,
28384,
2808,
1326,
197,
27873,
46019,
25,
1281,
35190,
2036,
6257,
1326,
197,
532,
72079,
1669,
501,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMixedOSBuildKojiJobErrors(t *testing.T) {
require := require.New(t)
emptyManifestV2 := distro.Manifest(`{"version":"2","pipelines":{}}`)
server := newTestServer(t, t.TempDir(), time.Duration(0), "/api/worker/v1")
enqueueKojiJob := func(job *worker.OSBuildKojiJob) uuid.UUID {
initJob := new(worker.KojiInitJob)
initJobID, err := server.EnqueueKojiInit(initJob, "")
require.NoError(err)
jobID, err := server.EnqueueOSBuildKoji("k", job, initJobID, "")
require.NoError(err)
return jobID
}
oldJob := worker.OSBuildKojiJob{
Manifest: emptyManifestV2,
ImageName: "no-pipeline-names",
}
oldJobID := enqueueKojiJob(&oldJob)
newJob := worker.OSBuildKojiJob{
Manifest: emptyManifestV2,
ImageName: "with-pipeline-names",
PipelineNames: &worker.PipelineNames{
Build: []string{"build"},
Payload: []string{"other", "pipelines"},
},
}
newJobID := enqueueKojiJob(&newJob)
oldJobRead := new(worker.OSBuildKojiJob)
err := server.OSBuildKojiJob(oldJobID, oldJobRead)
require.NoError(err)
// Not entirely equal
require.NotEqual(oldJob, oldJobRead)
// NewJob the same when read back
newJobRead := new(worker.OSBuildKojiJob)
err = server.OSBuildKojiJob(newJobID, newJobRead)
require.NoError(err)
// Dequeue the jobs (via RequestJob) to get their tokens and update them to
// test the result retrieval
// Finish init jobs
for idx := uint(0); idx < 2; idx++ {
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
_, token, _, _, _, err := server.RequestJob(ctx, "k", []string{"koji-init"}, []string{""})
require.NoError(err)
require.NoError(server.FinishJob(token, nil))
}
getJob := func() (uuid.UUID, uuid.UUID) {
// don't block forever if the jobs weren't added or can't be retrieved
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
id, token, _, _, _, err := server.RequestJob(ctx, "k", []string{"osbuild-koji"}, []string{""})
require.NoError(err)
return id, token
}
getJobTokens := func(n uint) map[uuid.UUID]uuid.UUID {
tokens := make(map[uuid.UUID]uuid.UUID, n)
for idx := uint(0); idx < n; idx++ {
id, token := getJob()
tokens[id] = token
}
return tokens
}
jobTokens := getJobTokens(2)
// make sure we got them both as expected
require.Contains(jobTokens, oldJobID)
require.Contains(jobTokens, newJobID)
oldJobResult := &worker.OSBuildKojiJobResult{
KojiError: "koji build error",
}
oldJobResultRaw, err := json.Marshal(oldJobResult)
require.NoError(err)
oldJobToken := jobTokens[oldJobID]
err = server.FinishJob(oldJobToken, oldJobResultRaw)
require.NoError(err)
oldJobResultRead := new(worker.OSBuildKojiJobResult)
_, _, err = server.OSBuildKojiJobStatus(oldJobID, oldJobResultRead)
require.NoError(err)
// oldJobResultRead should have PipelineNames now
require.NotEqual(oldJobResult, oldJobResultRead)
newJobResult := &worker.OSBuildKojiJobResult{
PipelineNames: &worker.PipelineNames{
Build: []string{"build-result"},
Payload: []string{"result-test-payload", "result-test-assembler"},
},
JobResult: worker.JobResult{
JobError: clienterrors.WorkerClientError(clienterrors.ErrorKojiBuild, "Koji build error", nil),
},
}
newJobResultRaw, err := json.Marshal(newJobResult)
require.NoError(err)
newJobToken := jobTokens[newJobID]
err = server.FinishJob(newJobToken, newJobResultRaw)
require.NoError(err)
newJobResultRead := new(worker.OSBuildKojiJobResult)
_, _, err = server.OSBuildKojiJobStatus(newJobID, newJobResultRead)
require.NoError(err)
require.Equal(newJobResult, newJobResultRead)
} | explode_data.jsonl/1113 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1374
} | [
2830,
3393,
86433,
3126,
11066,
42,
26902,
12245,
13877,
1155,
353,
8840,
836,
8,
341,
17957,
1669,
1373,
7121,
1155,
692,
197,
3194,
38495,
53,
17,
1669,
1582,
299,
72272,
5809,
4913,
4366,
3252,
17,
2198,
51501,
10999,
788,
90,
3417,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGenesisBytes(t *testing.T) {
a := assert.New(t)
g1 := GetDefaultGenesis()
g1.Balance = map[string]string{"abc": "1234"}
g1.BPs = []string{"xxx", "yyy", "zzz"}
b := g1.Bytes()
fmt.Println(spew.Sdump(g1))
g2 := GetGenesisFromBytes(b)
a.Nil(g2.Balance)
} | explode_data.jsonl/47846 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 126
} | [
2830,
3393,
84652,
7078,
1155,
353,
8840,
836,
8,
341,
11323,
1669,
2060,
7121,
1155,
340,
3174,
16,
1669,
2126,
3675,
84652,
741,
3174,
16,
1785,
4978,
284,
2415,
14032,
30953,
4913,
13683,
788,
330,
16,
17,
18,
19,
16707,
3174,
16,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMap(t *testing.T) {
items := []int{23, 24, 2, 5, 10}
interfaceItems := make([]interface{}, len(items))
for i, v := range items {
interfaceItems[i] = v
}
a := New(interfaceItems)
doubleArray := a.Map(double)
if a.Length() != doubleArray.Length() {
t.Log("Mapped array should have same length as original array")
t.Log("Expected", a.Length(), "\n Got", doubleArray.Length())
t.Fail()
}
i := 0
for ; i < a.Length(); i++ {
originalValue, _ := a.Get(i)
mapValue, _ := doubleArray.Get(i)
if (mapValue.(int) % originalValue.(int)) != 0 {
t.Log("New array values should be double original array values")
t.Log("Expected", 0, "\n Got", (mapValue.(int) % originalValue.(int)))
t.Fail()
}
}
} | explode_data.jsonl/47092 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 281
} | [
2830,
3393,
2227,
1155,
353,
8840,
836,
8,
341,
46413,
1669,
3056,
396,
90,
17,
18,
11,
220,
17,
19,
11,
220,
17,
11,
220,
20,
11,
220,
16,
15,
532,
58915,
1564,
4353,
1669,
1281,
10556,
4970,
22655,
2422,
24337,
4390,
2023,
600,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestCreateVolumeDBEntry(t *testing.T) {
var in = &model.VolumeSpec{
BaseModel: &model.BaseModel{},
Name: "volume sample",
Description: "This is a sample volume for testing",
Size: int64(1),
ProfileId: "3769855c-a102-11e7-b772-17b880d2f537",
Status: model.VolumeCreating,
}
t.Run("Everything should work well", func(t *testing.T) {
mockClient := new(dbtest.Client)
mockClient.On("CreateVolume", context.NewAdminContext(), in).Return(&SampleVolumes[0], nil)
db.C = mockClient
var expected = &SampleVolumes[0]
result, err := CreateVolumeDBEntry(context.NewAdminContext(), in)
if err != nil {
t.Errorf("failed to create volume asynchronously, err is %v\n", err)
}
assertTestResult(t, result, expected)
})
t.Run("The size of volume created should be greater than zero", func(t *testing.T) {
in.Size = int64(-2)
mockClient := new(dbtest.Client)
mockClient.On("CreateVolume", context.NewAdminContext(), in).Return(&SampleVolumes[0], nil)
db.C = mockClient
_, err := CreateVolumeDBEntry(context.NewAdminContext(), in)
expectedError := fmt.Sprintf("invalid volume size: %d", in.Size)
assertTestResult(t, err.Error(), expectedError)
})
t.Run("The profile id should not be empty", func(t *testing.T) {
in.Size, in.ProfileId = int64(1), ""
mockClient := new(dbtest.Client)
mockClient.On("CreateVolume", context.NewAdminContext(), in).Return(&SampleVolumes[0], nil)
db.C = mockClient
_, err := CreateVolumeDBEntry(context.NewAdminContext(), in)
expectedError := "profile id can not be empty when creating volume in db"
assertTestResult(t, err.Error(), expectedError)
})
} | explode_data.jsonl/29971 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 604
} | [
2830,
3393,
4021,
18902,
3506,
5874,
1155,
353,
8840,
836,
8,
341,
2405,
304,
284,
609,
2528,
79106,
8327,
515,
197,
66732,
1712,
25,
256,
609,
2528,
13018,
1712,
38837,
197,
21297,
25,
286,
330,
25060,
6077,
756,
197,
47414,
25,
330,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestContextRenderNoContentData(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Data(http.StatusNoContent, "text/csv", []byte(`foo,bar`))
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Empty(t, w.Body.String())
assert.Equal(t, "text/csv", w.Header().Get("Content-Type"))
} | explode_data.jsonl/26791 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 124
} | [
2830,
3393,
1972,
6750,
2753,
2762,
1043,
1155,
353,
8840,
836,
8,
341,
6692,
1669,
54320,
70334,
7121,
47023,
741,
1444,
11,
716,
1669,
4230,
2271,
1972,
3622,
692,
1444,
3336,
19886,
10538,
2753,
2762,
11,
330,
1318,
92128,
497,
3056,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_jx_boot_in_non_boot_repo_fails(t *testing.T) {
log.SetOutput(ioutil.Discard)
bootDir, err := ioutil.TempDir("", "boot-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(bootDir)
}()
commonOpts := opts.NewCommonOptionsWithFactory(clients.NewFactory())
commonOpts.BatchMode = true
o := BootOptions{
CommonOptions: &commonOpts,
Dir: bootDir,
}
// make the tmp directory a git repo
gitter := gits.NewGitCLI()
err = gitter.Init(bootDir)
require.NoError(t, err)
err = gitter.SetRemoteURL(bootDir, "origin", "https://github.com/johndoe/jx.git")
require.NoError(t, err)
_, err = os.Create(filepath.Join(bootDir, "foo"))
require.NoError(t, err)
err = gitter.AddCommit(bootDir, "adding foo")
require.NoError(t, err)
err = o.Run()
assert.Error(t, err)
assert.Contains(t, err.Error(), "trying to execute 'jx boot' from a non requirements repo")
} | explode_data.jsonl/63010 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 369
} | [
2830,
3393,
5374,
87,
52062,
1243,
21637,
52062,
37784,
761,
6209,
1155,
353,
8840,
836,
8,
341,
6725,
4202,
5097,
1956,
30158,
909,
47560,
340,
197,
4619,
6184,
11,
1848,
1669,
43144,
65009,
6184,
19814,
330,
4619,
16839,
1138,
17957,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCloudTasksListTasksError(t *testing.T) {
errCode := codes.PermissionDenied
mockCloudTasks.err = gstatus.Error(errCode, "test error")
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
var request = &taskspb.ListTasksRequest{
Parent: formattedParent,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListTasks(context.Background(), request).Next()
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
} | explode_data.jsonl/30862 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 264
} | [
2830,
3393,
16055,
25449,
852,
25449,
1454,
1155,
353,
8840,
836,
8,
341,
9859,
2078,
1669,
13912,
73409,
54481,
198,
77333,
16055,
25449,
18441,
284,
342,
2829,
6141,
3964,
2078,
11,
330,
1944,
1465,
5130,
2405,
23126,
8387,
914,
284,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestDiffBasicNoLabels(t *testing.T) {
s := newScaffold(t)
defer s.reset()
d := &dg{cmValue: "baz", secretValue: "baz"}
s.client.getFunc = d.get
err := s.executeCommand("diff", "dev", "--ignore-all-labels", "-S", "--show-deletes=false")
require.NotNil(t, err)
a := assert.New(t)
secretValue := base64.StdEncoding.EncodeToString([]byte("baz"))
redactedValue := base64.RawStdEncoding.EncodeToString([]byte("redacted."))
a.NotContains(s.stdout(), redactedValue)
a.Contains(s.stdout(), secretValue)
a.NotContains(s.stdout(), "qbec.io/environment")
} | explode_data.jsonl/72088 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 231
} | [
2830,
3393,
21751,
15944,
2753,
23674,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
501,
50,
27864,
1155,
340,
16867,
274,
13857,
741,
2698,
1669,
609,
35138,
90,
6226,
1130,
25,
330,
42573,
497,
6234,
1130,
25,
330,
42573,
16707,
1903,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCacheMountPrivateRefs(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
g1 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g2 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g3 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g4 := newRefGetter(co.manager, co.md, sharedCacheRefs)
ref, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
ref2, err := g1.getRefCacheDir(ctx, nil, "bar", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
// different ID returns different ref
require.NotEqual(t, ref.ID(), ref2.ID())
// same ID on same mount still shares the reference
ref3, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref3.ID())
// same ID on different mount gets a new ID
ref4, err := g2.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.NotEqual(t, ref.ID(), ref4.ID())
// releasing one of two refs still keeps first ID private
ref.Release(context.TODO())
ref5, err := g3.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.NotEqual(t, ref.ID(), ref5.ID())
require.NotEqual(t, ref4.ID(), ref5.ID())
// releasing all refs releases ID to be reused
ref3.Release(context.TODO())
ref5, err = g4.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref5.ID())
// other mounts still keep their IDs
ref6, err := g2.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.Equal(t, ref4.ID(), ref6.ID())
} | explode_data.jsonl/50739 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 817
} | [
2830,
3393,
8233,
16284,
16787,
82807,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
20985,
1669,
58091,
26124,
22699,
5378,
19047,
1507,
330,
5834,
8226,
16839,
5130,
20082,
3741,
11,
1848,
1669,
43144,
65009,
6184,
19814,
330,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBlockList(t *testing.T) {
r := require.New(t)
now := time.Now()
name := "alfa"
withinBlockTTL := now.Add(blockListTTL / 2)
pastBlockTTL := now.Add(blockListTTL * 2)
blockTests := []struct {
curTime time.Time
blocked bool
}{
{now, false},
{now, false},
{withinBlockTTL, true},
{withinBlockTTL, true},
{pastBlockTTL, false},
{withinBlockTTL, false},
{withinBlockTTL, false},
{pastBlockTTL, false},
}
list := NewBlockList(10)
for _, v := range blockTests {
list.Add(name, now)
r.Equal(v.blocked, list.Blocked(name, v.curTime))
}
} | explode_data.jsonl/7020 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 248
} | [
2830,
3393,
4713,
852,
1155,
353,
8840,
836,
8,
341,
7000,
1669,
1373,
7121,
1155,
692,
80922,
1669,
882,
13244,
741,
11609,
1669,
330,
84426,
698,
46948,
258,
4713,
51,
13470,
1669,
1431,
1904,
18682,
852,
51,
13470,
608,
220,
17,
34... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestResumptionDisabled(t *testing.T) {
sessionFilePath := tempFile("")
defer os.Remove(sessionFilePath)
config := *testConfig
test := &serverTest{
name: "IssueTicketPreDisable",
command: []string{"openssl", "s_client", "-cipher", "RC4-SHA", "-sess_out", sessionFilePath},
config: &config,
}
runServerTestTLS12(t, test)
config.SessionTicketsDisabled = true
test = &serverTest{
name: "ResumeDisabled",
command: []string{"openssl", "s_client", "-cipher", "RC4-SHA", "-sess_in", sessionFilePath},
config: &config,
}
runServerTestTLS12(t, test)
// One needs to manually confirm that the handshake in the golden data
// file for ResumeDisabled does not include a resumption handshake.
} | explode_data.jsonl/80566 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 254
} | [
2830,
3393,
1061,
60574,
25907,
1155,
353,
8840,
836,
8,
341,
25054,
19090,
1669,
2730,
1703,
31764,
16867,
2643,
13270,
16264,
19090,
692,
25873,
1669,
353,
1944,
2648,
271,
18185,
1669,
609,
4030,
2271,
515,
197,
11609,
25,
262,
330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestValidateEndpointHandlerErrorRegion(t *testing.T) {
restoreEnvFn := sdktesting.StashEnv()
defer restoreEnvFn()
svc := awstesting.NewClient()
svc.Handlers.Clear()
svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
err := req.Build()
if err == nil {
t.Errorf("expect error, got none")
}
if e, a := aws.ErrMissingRegion, err; e != a {
t.Errorf("expect %v to be %v", e, a)
}
} | explode_data.jsonl/44093 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 196
} | [
2830,
3393,
17926,
27380,
3050,
1454,
14091,
1155,
353,
8840,
836,
8,
341,
96027,
14359,
24911,
1669,
45402,
8840,
7758,
988,
14359,
741,
16867,
14952,
14359,
24911,
741,
1903,
7362,
1669,
1360,
267,
59855,
7121,
2959,
741,
1903,
7362,
35... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) {
for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
node := newNode("not-too-much-mem", nil)
node.Status.Allocatable = allocatableResources("200M", "200m")
manager.nodeStore.Add(node)
manager.podStore.Add(&v1.Pod{
Spec: podSpec,
})
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 1)
}
} | explode_data.jsonl/50314 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 276
} | [
2830,
3393,
50,
26683,
29392,
1955,
89177,
32067,
288,
23527,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
8282,
1669,
2088,
2647,
2580,
69388,
368,
341,
197,
3223,
347,
8327,
1669,
5101,
23527,
8327,
445,
1921,
93579,
1448,
1387,
1448,
33... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestFile_NotExists(t *testing.T) {
fs := testFs()
err := CheckFile(fs, "missing.file", false)
assert.Equal(t, linterrors.NewFileError("missing.file", "does not exist"), err)
} | explode_data.jsonl/71216 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 68
} | [
2830,
3393,
1703,
60816,
15575,
1155,
353,
8840,
836,
8,
341,
53584,
1669,
1273,
48300,
741,
9859,
1669,
4248,
1703,
31856,
11,
330,
30616,
9715,
497,
895,
692,
6948,
12808,
1155,
11,
57920,
7650,
7121,
1703,
1454,
445,
30616,
9715,
497... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestWatchPurposefulShutdown(t *testing.T) {
fakeClient := NewFakeEtcdClient(t)
h := EtcdHelper{fakeClient, codec, versioner}
fakeClient.expectNotFoundGetSet["/some/key"] = struct{}{}
// Test purposeful shutdown
watching := h.Watch("/some/key", 0)
fakeClient.WaitForWatchCompletion()
watching.Stop()
// Did everything shut down?
if _, open := <-fakeClient.WatchResponse; open {
t.Errorf("A stop did not cause a graceful shutdown")
}
if _, open := <-watching.ResultChan(); open {
t.Errorf("An injected error did not cause a graceful shutdown")
}
} | explode_data.jsonl/40984 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 189
} | [
2830,
3393,
14247,
74033,
1262,
62004,
1155,
353,
8840,
836,
8,
341,
1166,
726,
2959,
1669,
1532,
52317,
31860,
4385,
2959,
1155,
340,
9598,
1669,
18888,
4385,
5511,
90,
30570,
2959,
11,
34647,
11,
2319,
261,
532,
1166,
726,
2959,
25952... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestPageHTMLContent(t *testing.T) {
b := newTestSitesBuilder(t)
b.WithSimpleConfigFile()
frontmatter := `---
title: "HTML Content"
---
`
b.WithContent("regular.html", frontmatter+`<h1>Hugo</h1>`)
b.WithContent("noblackfridayforyou.html", frontmatter+`**Hugo!**`)
b.WithContent("manualsummary.html", frontmatter+`
<p>This is summary</p>
<!--more-->
<p>This is the main content.</p>`)
b.Build(BuildCfg{})
b.AssertFileContent(
"public/regular/index.html",
"Single: HTML Content|Hello|en|RelPermalink: /regular/|",
"Summary: Hugo|Truncated: false")
b.AssertFileContent(
"public/noblackfridayforyou/index.html",
"Permalink: http://example.com/noblackfridayforyou/|**Hugo!**|",
)
// https://github.com/gohugoio/hugo/issues/5723
b.AssertFileContent(
"public/manualsummary/index.html",
"Single: HTML Content|Hello|en|RelPermalink: /manualsummary/|",
"Summary: \n<p>This is summary</p>\n|Truncated: true",
"|<p>This is the main content.</p>|",
)
} | explode_data.jsonl/60631 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 403
} | [
2830,
3393,
2665,
5835,
2762,
1155,
353,
8840,
836,
8,
341,
2233,
1669,
501,
2271,
93690,
3297,
1155,
340,
2233,
26124,
16374,
2648,
1703,
2822,
1166,
9411,
58965,
1669,
1565,
10952,
2102,
25,
330,
5835,
8883,
698,
10952,
3989,
2233,
26... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNotIteratorBasics(t *testing.T) {
ctx := context.TODO()
allIt := NewFixed(
Int64Node(1),
Int64Node(2),
Int64Node(3),
Int64Node(4),
)
toComplementIt := NewFixed(
Int64Node(2),
Int64Node(4),
)
not := NewNot(toComplementIt, allIt)
if v, _ := not.Size(); v != 2 {
t.Errorf("Unexpected iterator size: got:%d, expected: %d", v, 2)
}
expect := []int{1, 3}
for i := 0; i < 2; i++ {
if got := iterated(not); !reflect.DeepEqual(got, expect) {
t.Errorf("Failed to iterate Not correctly on repeat %d: got:%v expected:%v", i, got, expect)
}
not.Reset()
}
for _, v := range []int{1, 3} {
if !not.Contains(ctx, Int64Node(v)) {
t.Errorf("Failed to correctly check %d as true", v)
}
}
for _, v := range []int{2, 4} {
if not.Contains(ctx, Int64Node(v)) {
t.Errorf("Failed to correctly check %d as false", v)
}
}
} | explode_data.jsonl/59598 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 389
} | [
2830,
3393,
2623,
11951,
33603,
1211,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
2266,
90988,
741,
50960,
2132,
1669,
1532,
13520,
1006,
197,
57152,
21,
19,
1955,
7,
16,
1326,
197,
57152,
21,
19,
1955,
7,
17,
1326,
197,
57152,
21,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestKeeperDB_EligibleUpkeeps_KeepersRotate(t *testing.T) {
t.Parallel()
store, orm, cleanup := setupKeeperDB(t)
defer cleanup()
db := store.DB
ethKeyStore := cltest.NewKeyStore(t, store.DB).Eth()
registry, _ := cltest.MustInsertKeeperRegistry(t, store, ethKeyStore)
registry.NumKeepers = 5
require.NoError(t, store.DB.Save(®istry).Error)
cltest.MustInsertUpkeepForRegistry(t, store, registry)
cltest.AssertCount(t, db, keeper.Registry{}, 1)
cltest.AssertCount(t, db, &keeper.UpkeepRegistration{}, 1)
// out of 5 valid block ranges, with 5 keepers, we are eligible
// to submit on exactly 1 of them
list1, err := orm.EligibleUpkeepsForRegistry(context.Background(), registry.ContractAddress, 20, 0)
require.NoError(t, err)
list2, err := orm.EligibleUpkeepsForRegistry(context.Background(), registry.ContractAddress, 41, 0)
require.NoError(t, err)
list3, err := orm.EligibleUpkeepsForRegistry(context.Background(), registry.ContractAddress, 62, 0)
require.NoError(t, err)
list4, err := orm.EligibleUpkeepsForRegistry(context.Background(), registry.ContractAddress, 83, 0)
require.NoError(t, err)
list5, err := orm.EligibleUpkeepsForRegistry(context.Background(), registry.ContractAddress, 104, 0)
require.NoError(t, err)
totalEligible := len(list1) + len(list2) + len(list3) + len(list4) + len(list5)
require.Equal(t, 1, totalEligible)
} | explode_data.jsonl/27011 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 502
} | [
2830,
3393,
77233,
3506,
2089,
7708,
1238,
2324,
440,
7124,
62,
19434,
388,
34540,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
57279,
11,
67602,
11,
21290,
1669,
6505,
77233,
3506,
1155,
340,
16867,
21290,
741,
20939,
1669,
35... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAgentConnectCALeafCert_aclServiceWrite(t *testing.T) {
t.Parallel()
require := require.New(t)
a := NewTestAgent(t.Name(), TestACLConfig()+testAllowProxyConfig())
defer a.Shutdown()
// Register a service with a managed proxy
{
reg := &structs.ServiceDefinition{
ID: "test-id",
Name: "test",
Address: "127.0.0.1",
Port: 8000,
Check: structs.CheckType{
TTL: 15 * time.Second,
},
Connect: &structs.ServiceConnect{
Proxy: &structs.ServiceDefinitionConnectProxy{},
},
}
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg))
resp := httptest.NewRecorder()
_, err := a.srv.AgentRegisterService(resp, req)
require.NoError(err)
require.Equal(200, resp.Code, "body: %s", resp.Body.String())
}
// Create an ACL with service:write for our service
var token string
{
args := map[string]interface{}{
"Name": "User Token",
"Type": "client",
"Rules": `service "test" { policy = "write" }`,
}
req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args))
resp := httptest.NewRecorder()
obj, err := a.srv.ACLCreate(resp, req)
if err != nil {
t.Fatalf("err: %v", err)
}
aclResp := obj.(aclCreateResponse)
token = aclResp.ID
}
req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil)
resp := httptest.NewRecorder()
obj, err := a.srv.AgentConnectCALeafCert(resp, req)
require.NoError(err)
// Get the issued cert
_, ok := obj.(*structs.IssuedCert)
require.True(ok)
} | explode_data.jsonl/33655 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 633
} | [
2830,
3393,
16810,
14611,
49533,
68,
2577,
36934,
63692,
1860,
7985,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
17957,
1669,
1373,
7121,
1155,
340,
11323,
1669,
1532,
2271,
16810,
1155,
2967,
1507,
3393,
55393,
2648,
17140,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestRestoreSystemTableFromFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
const numAccounts = 10
_, _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, initNone)
defer cleanupFn()
sqlDB.Exec(t, `CREATE USER maxroach`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE temp_sys`)
sqlDB.Exec(t, `RESTORE system.users FROM $1 WITH into_db='temp_sys'`, localFoo)
sqlDB.CheckQueryResults(t, "SELECT * FROM temp_sys.users", sqlDB.QueryStr(t, "SELECT * FROM system.users"))
} | explode_data.jsonl/48483 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 213
} | [
2830,
3393,
56284,
2320,
2556,
3830,
9432,
28678,
56245,
1155,
353,
8840,
836,
8,
341,
16867,
23352,
1944,
36892,
2271,
1155,
8,
2822,
4777,
1629,
41369,
284,
220,
16,
15,
198,
197,
6878,
8358,
5704,
3506,
11,
8358,
21290,
24911,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestName_ModulesDedup(t *testing.T) {
if usesOldGolist {
t.Skip("pre-modules version of Go")
}
exported := packagestest.Export(t, packagestest.Modules, []packagestest.Module{{
Name: "golang.org/fake",
Files: map[string]interface{}{
"fake.go": `package fake`,
}}})
defer exported.Cleanup()
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
// testdata/TestNamed_ModulesDedup contains:
// - pkg/mod/github.com/heschik/tools-testrepo/v2@v2.0.2/pkg/pkg.go
// - pkg/mod/github.com/heschik/tools-testrepo/v2@v2.0.1/pkg/pkg.go
// - pkg/mod/github.com/heschik/tools-testrepo@v1.0.0/pkg/pkg.go
// but, inexplicably, not v2.0.0. Nobody knows why.
exported.Config.Mode = packages.LoadImports
exported.Config.Env = append(exported.Config.Env, "GOPATH="+wd+"/testdata/TestName_ModulesDedup")
initial, err := packages.Load(exported.Config, "iamashamedtousethedisabledqueryname=pkg")
if err != nil {
t.Fatal(err)
}
for _, pkg := range initial {
if strings.Contains(pkg.PkgPath, "v2") {
if strings.Contains(pkg.GoFiles[0], "v2.0.2") {
return
}
}
}
t.Errorf("didn't find v2.0.2 of pkg in Load results: %v", initial)
} | explode_data.jsonl/45197 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 515
} | [
2830,
3393,
675,
71485,
2425,
35,
291,
454,
1155,
353,
8840,
836,
8,
341,
743,
5711,
18284,
38,
34675,
341,
197,
3244,
57776,
445,
1726,
61079,
2319,
315,
5994,
1138,
197,
630,
59440,
291,
1669,
6328,
267,
477,
81077,
1155,
11,
6328,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestWrappedErrors(t *testing.T) {
t.Parallel()
const err semerr.Error = "error"
testCases := []struct {
Name string
Create func(err error) error
Temporary bool
}{
{
Name: "StatusRequestTimeoutError",
Create: semerr.NewStatusRequestTimeoutError,
Temporary: true,
},
{
Name: "InternalServerError",
Create: semerr.NewInternalServerError,
Temporary: false,
},
{
Name: "BadRequestError",
Create: semerr.NewBadRequestError,
Temporary: false,
},
{
Name: "UnsupportedMediaTypeError",
Create: semerr.NewUnsupportedMediaTypeError,
Temporary: false,
},
{
Name: "StatusGatewayTimeoutError",
Create: semerr.NewStatusGatewayTimeoutError,
Temporary: true,
},
{
Name: "NotFoundError",
Create: semerr.NewNotFoundError,
Temporary: false,
},
{
Name: "ConflictError",
Create: semerr.NewConflictError,
Temporary: false,
},
{
Name: "ForbiddenError",
Create: semerr.NewForbiddenError,
Temporary: false,
},
{
Name: "TooManyRequestsError",
Create: semerr.NewTooManyRequestsError,
Temporary: false,
},
{
Name: "RequestEntityTooLargeError",
Create: semerr.NewRequestEntityTooLargeError,
Temporary: false,
},
{
Name: "UnimplementedError",
Create: semerr.NewUnimplementedError,
Temporary: false,
},
{
Name: "ServiceUnavailableError",
Create: semerr.NewServiceUnavailableError,
Temporary: true,
},
{
Name: "UnauthorizedError",
Create: semerr.NewUnauthorizedError,
Temporary: false,
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
errWrapped := tc.Create(err)
switch {
case !strings.HasSuffix(tc.Name, "Error"):
t.Fatal(tc.Name)
case tc.Create(nil) != nil:
t.Fatal()
case errWrapped.Error() != err.Error():
t.Fatal("exp", err.Error(), "got", errWrapped.Error())
case !errors.Is(errWrapped, err):
t.Fatal()
case semerr.IsTemporaryError(errWrapped) != tc.Temporary:
t.Fatal()
}
})
}
} | explode_data.jsonl/72526 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 995
} | [
2830,
3393,
67795,
13877,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
4777,
1848,
5234,
615,
6141,
284,
330,
841,
1837,
18185,
37302,
1669,
3056,
1235,
341,
197,
21297,
414,
914,
198,
197,
75569,
262,
2915,
3964,
1465,
8,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAddAndWrite(t *testing.T) {
builder, err := NewBuilder(Header{
Author: "devblok",
DateCreated: time.Now().Unix(),
Version: 1,
})
if err != nil {
t.Error(err)
}
builder.Add("test", bytes.NewReader([]byte("idunvovkjnreovmegihjbrqlkmfrjnb")))
builder.Add("test2", bytes.NewReader([]byte("idunvovkjnreovmsdvwrvnervnreegihjbrqlkmfrjnb")))
if len(builder.files) != 2 {
t.Error("incorrect number of files present")
}
var data []byte
buf := bytes.NewBuffer(data)
if written, err := builder.WriteTo(buf); err != nil {
t.Error(err)
} else {
t.Logf("written %d", written)
}
} | explode_data.jsonl/74121 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 261
} | [
2830,
3393,
2212,
3036,
7985,
1155,
353,
8840,
836,
8,
341,
44546,
11,
1848,
1669,
1532,
3297,
7,
4047,
515,
197,
197,
7133,
25,
414,
330,
3583,
38145,
74,
756,
197,
48730,
11694,
25,
882,
13244,
1005,
55832,
3148,
197,
77847,
25,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestServerRequestVoteDeniedForStaleTerm(t *testing.T) {
s := newTestServer("1", &testTransporter{})
s.Start()
if _, err := s.Do(&DefaultJoinCommand{Name: s.Name()}); err != nil {
t.Fatalf("Server %s unable to join: %v", s.Name(), err)
}
s.(*server).currentTerm = 2
defer s.Stop()
resp := s.RequestVote(newRequestVoteRequest(1, "foo", 1, 0))
if resp.Term != 2 || resp.VoteGranted {
t.Fatalf("Invalid request vote response: %v/%v", resp.Term, resp.VoteGranted)
}
if s.Term() != 2 && s.State() != Follower {
t.Fatalf("Server did not update term and demote: %v / %v", s.Term(), s.State())
}
} | explode_data.jsonl/44058 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 237
} | [
2830,
3393,
5475,
1900,
41412,
54481,
2461,
623,
1574,
17249,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
501,
2271,
5475,
445,
16,
497,
609,
1944,
27560,
261,
6257,
692,
1903,
12101,
741,
743,
8358,
1848,
1669,
274,
33596,
2099,
3675,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestPidNamespace(t *testing.T) {
testID := "test-id"
testPid := uint32(1234)
testSandboxID := "sandbox-id"
containerConfig, sandboxConfig, imageConfig, _ := getCreateContainerTestData()
ociRuntime := config.Runtime{}
c := newTestCRIService()
for desc, test := range map[string]struct {
pidNS runtime.NamespaceMode
expected runtimespec.LinuxNamespace
}{
"node namespace mode": {
pidNS: runtime.NamespaceMode_NODE,
expected: runtimespec.LinuxNamespace{
Type: runtimespec.PIDNamespace,
Path: opts.GetPIDNamespace(testPid),
},
},
"container namespace mode": {
pidNS: runtime.NamespaceMode_CONTAINER,
expected: runtimespec.LinuxNamespace{
Type: runtimespec.PIDNamespace,
},
},
"pod namespace mode": {
pidNS: runtime.NamespaceMode_POD,
expected: runtimespec.LinuxNamespace{
Type: runtimespec.PIDNamespace,
Path: opts.GetPIDNamespace(testPid),
},
},
} {
t.Logf("TestCase %q", desc)
containerConfig.Linux.SecurityContext.NamespaceOptions = &runtime.NamespaceOption{Pid: test.pidNS}
spec, err := c.containerSpec(testID, testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
require.NoError(t, err)
assert.Contains(t, spec.Linux.Namespaces, test.expected)
}
} | explode_data.jsonl/6413 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 491
} | [
2830,
3393,
32339,
22699,
1155,
353,
8840,
836,
8,
341,
18185,
915,
1669,
330,
1944,
12897,
698,
18185,
32339,
1669,
2622,
18,
17,
7,
16,
17,
18,
19,
340,
18185,
50,
31536,
915,
1669,
330,
76756,
12897,
698,
53290,
2648,
11,
42754,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestDB_BeginRW_Closed(t *testing.T) {
var db bolt.DB
if _, err := db.Begin(true); err != bolt.ErrDatabaseNotOpen {
t.Fatalf("unexpected error: %s", err)
}
} | explode_data.jsonl/27475 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 69
} | [
2830,
3393,
3506,
93447,
56368,
920,
9259,
1155,
353,
8840,
836,
8,
341,
2405,
2927,
31842,
22537,
198,
743,
8358,
1848,
1669,
2927,
28467,
3715,
1215,
1848,
961,
31842,
27862,
5988,
2623,
5002,
341,
197,
3244,
30762,
445,
53859,
1465,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestHandlesExistingCommonSecret(t *testing.T) {
t.Parallel()
ns := &core_v1.Namespace{
TypeMeta: meta_v1.TypeMeta{
Kind: k8s.NamespaceKind,
APIVersion: core_v1.SchemeGroupVersion.String(),
},
ObjectMeta: meta_v1.ObjectMeta{
Name: namespaceName,
Labels: map[string]string{
voyager.ServiceNameLabel: serviceName,
},
},
}
existingSecret := &core_v1.Secret{
TypeMeta: meta_v1.TypeMeta{
Kind: k8s.SecretKind,
APIVersion: core_v1.SchemeGroupVersion.String(),
},
ObjectMeta: meta_v1.ObjectMeta{
Name: commonSecretName,
Namespace: ns.Name,
},
Type: core_v1.SecretTypeOpaque,
Data: map[string][]byte{
"Some": []byte("Base64thing"),
},
}
tc := testCase{
mainClientObjects: []runtime.Object{ns, existingDefaultDockerSecret(), existingSecret},
ns: ns,
test: func(t *testing.T, cntrlr *Controller, ctx *ctrl.ProcessContext, tc *testCase) {
// mock that the secret already exists
existsErrorFunc := func(action kube_testing.Action) (bool, runtime.Object, error) {
if create, ok := action.(kube_testing.CreateAction); ok {
if secret, ok := create.GetObject().(*core_v1.Secret); ok {
if secret.Name == "common-secrets" {
return true, nil, api_errors.NewAlreadyExists(action.GetResource().GroupResource(), commonSecretName)
}
}
}
return false, nil, nil
}
tc.mainFake.PrependReactor("create", "secrets", existsErrorFunc)
service := &creator_v1.Service{
ObjectMeta: meta_v1.ObjectMeta{
Name: serviceName,
},
Spec: creator_v1.ServiceSpec{
ResourceOwner: "somebody",
BusinessUnit: "the unit",
LoggingID: "some-logging-id",
Metadata: creator_v1.ServiceMetadata{
PagerDuty: &creator_v1.PagerDutyMetadata{},
},
SSAMContainerName: "some-ssam-container",
ResourceTags: map[voyager.Tag]string{
"foo": "bar",
"baz": "blah",
},
},
}
tc.scFake.On("GetService", mock.Anything, auth.NoUser(), serviceNameSc).Return(service, nil)
_, err := cntrlr.Process(ctx)
require.NoError(t, err)
// the fact that there is no error means that it
// handled the already exists error
},
}
tc.run(t)
} | explode_data.jsonl/4284 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 961
} | [
2830,
3393,
65928,
53067,
10839,
19773,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
84041,
1669,
609,
2153,
2273,
16,
46011,
515,
197,
27725,
12175,
25,
8823,
2273,
16,
10184,
12175,
515,
298,
197,
10629,
25,
981,
595,
23,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestStartAndEndOfWeek(t *testing.T) {
t.Log(StartOfWeek(time.Now()))
t.Log(PreviousStartOfWeek(time.Now()))
t.Log(NextStartOfWeek(time.Now()))
t.Log(EndOfWeek(time.Now()))
t.Log(PreviousEndOfWeek(time.Now()))
t.Log(NextEndOfWeek(time.Now()))
t.Log(StartOfWeek(time.Now(), time.Sunday))
t.Log(PreviousStartOfWeek(time.Now(), time.Sunday))
t.Log(NextStartOfWeek(time.Now(), time.Sunday))
t.Log(EndOfWeek(time.Now(), time.Sunday))
t.Log(PreviousEndOfWeek(time.Now(), time.Sunday))
t.Log(NextEndOfWeek(time.Now(), time.Sunday))
} | explode_data.jsonl/80463 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 223
} | [
2830,
3393,
3479,
3036,
3727,
38353,
1155,
353,
8840,
836,
8,
341,
3244,
5247,
7,
3479,
38353,
9730,
13244,
12145,
3244,
5247,
7,
21291,
3479,
38353,
9730,
13244,
12145,
3244,
5247,
7,
5847,
3479,
38353,
9730,
13244,
49962,
3244,
5247,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTouch(t *testing.T) {
fileName := dirRoot + "/touch.txt"
err := Touch(fileName)
if err != nil {
panic(err)
}
t.Cleanup(func() {
_ = RemoveWithRecur(dirRoot)
})
if !Exists(fileName) {
t.Error("Touch test failed!")
}
} | explode_data.jsonl/34170 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 105
} | [
2830,
3393,
11309,
1155,
353,
8840,
836,
8,
341,
17661,
675,
1669,
5419,
8439,
488,
3521,
22020,
3909,
1837,
9859,
1669,
19338,
23014,
340,
743,
1848,
961,
2092,
341,
197,
30764,
3964,
340,
197,
630,
3244,
727,
60639,
18552,
368,
341,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMonthsAbbreviated(t *testing.T) {
trans := New()
months := trans.MonthsAbbreviated()
for i, month := range months {
s := trans.MonthAbbreviated(time.Month(i + 1))
if s != month {
t.Errorf("Expected '%s' Got '%s'", month, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 1,
// expected: "Jan",
// },
// {
// idx: 2,
// expected: "Feb",
// },
// {
// idx: 3,
// expected: "Mar",
// },
// {
// idx: 4,
// expected: "Apr",
// },
// {
// idx: 5,
// expected: "May",
// },
// {
// idx: 6,
// expected: "Jun",
// },
// {
// idx: 7,
// expected: "Jul",
// },
// {
// idx: 8,
// expected: "Aug",
// },
// {
// idx: 9,
// expected: "Sep",
// },
// {
// idx: 10,
// expected: "Oct",
// },
// {
// idx: 11,
// expected: "Nov",
// },
// {
// idx: 12,
// expected: "Dec",
// },
}
for _, tt := range tests {
s := trans.MonthAbbreviated(time.Month(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
} | explode_data.jsonl/1286 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 660
} | [
2830,
3393,
59184,
80219,
7282,
10029,
1155,
353,
8840,
836,
8,
1476,
72453,
1669,
1532,
741,
197,
49714,
1669,
1356,
48383,
82,
80219,
7282,
10029,
2822,
2023,
600,
11,
2254,
1669,
2088,
3951,
341,
197,
1903,
1669,
1356,
48383,
80219,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestMergeSort(t *testing.T) {
t.Run("should sort int slice correctly", func(t *testing.T) {
a := util.GenerateSeries(5)
got := sorting.MergeSort(a)
want := []int{1, 2, 3, 4, 5}
if !reflect.DeepEqual(got, want) {
t.Errorf("got: %v, want: %v", got, want)
}
if !reflect.DeepEqual(a, []int{5, 4, 3, 2, 1}) {
t.Errorf("shoule not mutate input slice, got: %v", a)
}
})
} | explode_data.jsonl/14909 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 182
} | [
2830,
3393,
52096,
10231,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
5445,
3378,
526,
15983,
12440,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
11323,
1669,
4094,
57582,
25544,
7,
20,
340,
197,
3174,
354,
1669,
28273,
93855,
10... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestNewRemoteModuleInstaller(t *testing.T) {
var (
got = NewRemoteModuleInstaller()
wantDownloader = downloader.NewDownloader()
)
if !reflect.DeepEqual(got.downloader, wantDownloader) {
t.Errorf("downloader got: '%v', want: '%v'", got.downloader, wantDownloader)
}
} | explode_data.jsonl/16232 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 115
} | [
2830,
3393,
3564,
24703,
3332,
88096,
1155,
353,
8840,
836,
8,
341,
2405,
2399,
197,
3174,
354,
310,
284,
1532,
24703,
3332,
88096,
741,
197,
50780,
92698,
284,
86774,
7121,
92698,
741,
197,
340,
743,
753,
34913,
94750,
3268,
354,
18148... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestTmplServiceAccount(t *testing.T) {
woc := newWoc()
woc.execWf.Spec.ServiceAccountName = "foo"
woc.execWf.Spec.Templates[0].ServiceAccountName = "tmpl"
tmplCtx, err := woc.createTemplateContext(wfv1.ResourceScopeLocal, "")
assert.NoError(t, err)
ctx := context.Background()
_, err = woc.executeContainer(ctx, woc.execWf.Spec.Entrypoint, tmplCtx.GetTemplateScope(), &woc.execWf.Spec.Templates[0], &wfv1.WorkflowStep{}, &executeTemplateOpts{})
assert.NoError(t, err)
pods, err := listPods(woc)
assert.NoError(t, err)
assert.Len(t, pods.Items, 1)
pod := pods.Items[0]
assert.Equal(t, pod.Spec.ServiceAccountName, "tmpl")
} | explode_data.jsonl/75365 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 264
} | [
2830,
3393,
51,
54010,
1860,
7365,
1155,
353,
8840,
836,
8,
341,
6692,
509,
1669,
501,
54,
509,
741,
6692,
509,
15776,
54,
69,
36473,
13860,
7365,
675,
284,
330,
7975,
698,
6692,
509,
15776,
54,
69,
36473,
836,
76793,
58,
15,
936,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCompactString(t *testing.T) {
assert := require.New(t)
assert.Nil(CompactString(nil))
assert.Equal([]string{
`one`, `three`, `five`,
}, CompactString([]string{
`one`, `three`, `five`,
}))
assert.Equal([]string{
`one`, `three`, ` `, `five`,
}, CompactString([]string{
`one`, ``, `three`, ``, ` `, `five`,
}))
} | explode_data.jsonl/45564 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 142
} | [
2830,
3393,
98335,
703,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
1373,
7121,
1155,
692,
6948,
59678,
7,
98335,
703,
27907,
4390,
6948,
12808,
10556,
917,
515,
197,
197,
63,
603,
7808,
1565,
27856,
7808,
1565,
52670,
12892,
197,
2137,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestContext2Plan_targetedOrphan(t *testing.T) {
m := testModule(t, "plan-targeted-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[string]providers.Factory{
"aws": testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.orphan": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-789xyz",
},
},
"aws_instance.nottargeted": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
}),
Destroy: true,
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Resource(
addrs.ManagedResourceMode, "aws_instance", "orphan",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.orphan":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be destroyed", ric.Addr)
}
default:
t.Fatal("unknown instance:", i)
}
}
} | explode_data.jsonl/28706 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 713
} | [
2830,
3393,
1972,
17,
20485,
11123,
291,
2195,
9943,
1155,
353,
8840,
836,
8,
341,
2109,
1669,
1273,
3332,
1155,
11,
330,
10393,
18489,
291,
26511,
9943,
1138,
3223,
1669,
1273,
5179,
445,
8635,
1138,
3223,
98063,
24911,
284,
1273,
2175... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestServer_fetchApp(t *testing.T) {
app, trx, down, err := models.NewAppForTest(nil, t)
assert.Nil(t, err)
defer down(t)
_, err = fetchAPP(app.UID, "", trx)
assert.Equal(t, err, ErrAppSecret)
} | explode_data.jsonl/30078 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 91
} | [
2830,
3393,
5475,
11803,
2164,
1155,
353,
8840,
836,
8,
341,
28236,
11,
73021,
11,
1495,
11,
1848,
1669,
4119,
7121,
2164,
2461,
2271,
27907,
11,
259,
340,
6948,
59678,
1155,
11,
1848,
340,
16867,
1495,
1155,
340,
197,
6878,
1848,
284... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestChannelNotFound_InChannel(t *testing.T) {
var expected *discordgo.Channel
cnf := &callbacks.ChannelNotFound{}
actual := cnf.InChannel()
err := deepEqual(actual, expected)
if err != nil {
t.Error(err)
}
} | explode_data.jsonl/56088 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 84
} | [
2830,
3393,
9629,
10372,
25972,
9629,
1155,
353,
8840,
836,
8,
341,
2405,
3601,
353,
42579,
3346,
38716,
271,
1444,
31737,
1669,
609,
68311,
38716,
10372,
16094,
88814,
1669,
13665,
69,
5337,
9629,
2822,
9859,
1669,
5538,
2993,
29721,
11,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestComposeNotImplemented(t *testing.T) {
c := NewParallelE2eCLI(t, binDir)
res := c.RunDockerCmd("context", "show")
res.Assert(t, icmd.Expected{Out: "default"})
res = c.RunDockerOrExitError("compose", "up")
res.Assert(t, icmd.Expected{
ExitCode: 1,
Err: `Command "compose up" not available in current context (default)`,
})
res = c.RunDockerOrExitError("compose", "-f", "titi.yaml", "up")
res.Assert(t, icmd.Expected{
ExitCode: 1,
Err: `Command "compose up" not available in current context (default)`,
})
} | explode_data.jsonl/5424 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 227
} | [
2830,
3393,
70492,
2623,
18300,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
1532,
16547,
36,
17,
68,
63959,
1155,
11,
9544,
6184,
340,
10202,
1669,
272,
16708,
35,
13659,
15613,
445,
2147,
497,
330,
3445,
1138,
10202,
11711,
1155,
11,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInMemorySignVerify(t *testing.T) {
cstore := NewInMemory()
algo := hd.Secp256k1
n1, n2, n3 := "some dude", "a dudette", "dude-ish"
// create two users and get their info
i1, _, err := cstore.NewMnemonic(n1, English, sdk.FullFundraiserPath, algo)
require.Nil(t, err)
i2, _, err := cstore.NewMnemonic(n2, English, sdk.FullFundraiserPath, algo)
require.Nil(t, err)
// let's try to sign some messages
d1 := []byte("my first message")
d2 := []byte("some other important info!")
d3 := []byte("feels like I forgot something...")
// try signing both data with both ..
s11, pub1, err := cstore.Sign(n1, d1)
require.Nil(t, err)
require.Equal(t, i1.GetPubKey(), pub1)
s12, pub1, err := cstore.Sign(n1, d2)
require.Nil(t, err)
require.Equal(t, i1.GetPubKey(), pub1)
s21, pub2, err := cstore.Sign(n2, d1)
require.Nil(t, err)
require.Equal(t, i2.GetPubKey(), pub2)
s22, pub2, err := cstore.Sign(n2, d2)
require.Nil(t, err)
require.Equal(t, i2.GetPubKey(), pub2)
// let's try to validate and make sure it only works when everything is proper
cases := []struct {
key types.PubKey
data []byte
sig []byte
valid bool
}{
// proper matches
{i1.GetPubKey(), d1, s11, true},
// change data, pubkey, or signature leads to fail
{i1.GetPubKey(), d2, s11, false},
{i2.GetPubKey(), d1, s11, false},
{i1.GetPubKey(), d1, s21, false},
// make sure other successes
{i1.GetPubKey(), d2, s12, true},
{i2.GetPubKey(), d1, s21, true},
{i2.GetPubKey(), d2, s22, true},
}
for i, tc := range cases {
valid := tc.key.VerifySignature(tc.data, tc.sig)
require.Equal(t, tc.valid, valid, "%d", i)
}
// Import a public key
armor, err := cstore.ExportPubKeyArmor(n2)
require.Nil(t, err)
err = cstore.Delete(n2)
require.NoError(t, err)
err = cstore.ImportPubKey(n3, armor)
require.NoError(t, err)
i3, err := cstore.Key(n3)
require.NoError(t, err)
require.Equal(t, i3.GetName(), n3)
// Now try to sign data with a secret-less key
_, _, err = cstore.Sign(n3, d3)
require.Error(t, err)
require.Equal(t, "cannot sign with offline keys", err.Error())
} | explode_data.jsonl/73445 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 906
} | [
2830,
3393,
641,
10642,
7264,
32627,
1155,
353,
8840,
836,
8,
341,
1444,
4314,
1669,
1532,
641,
10642,
741,
69571,
3346,
1669,
17907,
808,
757,
79,
17,
20,
21,
74,
16,
271,
9038,
16,
11,
308,
17,
11,
308,
18,
1669,
330,
14689,
350... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestNfq(t *testing.T) {
gotpacket := make(chan struct{}, 16)
cb := func(date []byte) Verdict {
gotpacket <- struct{}{}
return NF_ACCEPT
}
nfq, err := NewDefaultQueue(0, cb)
if err != nil {
t.Fatal(err)
}
l, err := net.ListenPacket("udp", "127.0.0.1:9999")
if err != nil {
t.Fatal(err)
}
defer l.Close()
addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:9999")
if err != nil {
t.Fatalf("ResolveUDPAddr failed: %v", err)
}
if _, err := l.WriteTo([]byte{1, 2, 3}, addr); err != nil {
t.Fatal(err)
}
<-gotpacket
select {
case <-gotpacket:
t.Fatal("didn't expect another packet")
default:
}
nfq.Close()
} | explode_data.jsonl/1599 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 303
} | [
2830,
3393,
45,
63919,
1155,
353,
8840,
836,
8,
1476,
3174,
354,
24829,
1669,
1281,
35190,
2036,
22655,
220,
16,
21,
692,
63810,
1669,
2915,
11970,
3056,
3782,
8,
6250,
8477,
341,
197,
3174,
354,
24829,
9119,
2036,
6257,
16094,
197,
8... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDivideSeriesMultiReturn(t *testing.T) {
now32 := int32(time.Now().Unix())
tests := []th.MultiReturnEvalTestItem{
{
parser.NewExpr("divideSeries",
"metric[12]",
"metric2",
),
map[parser.MetricRequest][]*types.MetricData{
{"metric[12]", 0, 1}: {
types.MakeMetricData("metric1", []float64{1, 2, 3, 4, 5}, 1, now32),
types.MakeMetricData("metric2", []float64{2, 4, 6, 8, 10}, 1, now32),
},
{"metric1", 0, 1}: {
types.MakeMetricData("metric1", []float64{1, 2, 3, 4, 5}, 1, now32),
},
{"metric2", 0, 1}: {
types.MakeMetricData("metric2", []float64{2, 4, 6, 8, 10}, 1, now32),
},
},
"divideSeries",
map[string][]*types.MetricData{
"divideSeries(metric1,metric2)": {types.MakeMetricData("divideSeries(metric1,metric2)", []float64{0.5, 0.5, 0.5, 0.5, 0.5}, 1, now32)},
"divideSeries(metric2,metric2)": {types.MakeMetricData("divideSeries(metric2,metric2)", []float64{1, 1, 1, 1, 1}, 1, now32)},
},
},
}
for _, tt := range tests {
testName := tt.E.Target() + "(" + tt.E.RawArgs() + ")"
t.Run(testName, func(t *testing.T) {
th.TestMultiReturnEvalExpr(t, &tt)
})
}
} | explode_data.jsonl/47840 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 574
} | [
2830,
3393,
12509,
577,
25544,
20358,
5598,
1155,
353,
8840,
836,
8,
341,
80922,
18,
17,
1669,
526,
18,
17,
9730,
13244,
1005,
55832,
12367,
78216,
1669,
3056,
339,
57706,
5598,
54469,
2271,
1234,
515,
197,
197,
515,
298,
55804,
7121,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSetNullRange(t *testing.T) {
for _, start := range pos {
for _, end := range pos {
n := NewNulls(BatchSize)
n.SetNullRange(start, end)
for i := uint64(0); i < BatchSize; i++ {
expected := i >= start && i < end
require.Equal(t, expected, n.NullAt64(i),
"NullAt(%d) should be %t after SetNullRange(%d, %d)", i, expected, start, end)
}
}
}
} | explode_data.jsonl/37155 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 169
} | [
2830,
3393,
1649,
3280,
6046,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
1191,
1669,
2088,
1133,
341,
197,
2023,
8358,
835,
1669,
2088,
1133,
341,
298,
9038,
1669,
1532,
3280,
82,
5349,
754,
1695,
340,
298,
9038,
4202,
3280,
6046,
1063... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestNamespacedServiceBrokerClient(t *testing.T) {
const name = "test-broker"
const namespace = "test-namespace"
resetFeaturesFunc, err := enableNamespacedResources()
if err != nil {
t.Fatal(err)
}
defer resetFeaturesFunc()
client, _, shutdownServer := getFreshApiserverAndClient(t, func() runtime.Object {
return &servicecatalog.ClusterServiceBroker{}
})
defer shutdownServer()
if err := testNamespacedServiceBrokerClient(client, namespace, name); err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/51880 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 170
} | [
2830,
3393,
7980,
68552,
1860,
65545,
2959,
1155,
353,
8840,
836,
8,
341,
4777,
829,
284,
330,
1944,
1455,
45985,
698,
4777,
4473,
284,
330,
1944,
12,
2231,
698,
70343,
21336,
9626,
11,
1848,
1669,
7283,
7980,
68552,
11277,
741,
743,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAssociateUserToken(t *testing.T) {
ts := httptest.NewTLSServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, associateUserTokenResponse)
}),
)
defer ts.Close()
duo := buildAdminClient(ts.URL, nil)
result, err := duo.AssociateUserToken("DU3RP9I2WOC59VZX672N", "DHEKH0JJIYC1LX3AZWO4")
if err != nil {
t.Errorf("Unexpected error from AssociateUserToken call %v", err.Error())
}
if result.Stat != "OK" {
t.Errorf("Expected OK, but got %s", result.Stat)
}
if len(result.Response) != 0 {
t.Errorf("Expected empty response, but got %s", result.Response)
}
} | explode_data.jsonl/61416 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 252
} | [
2830,
3393,
95540,
1474,
3323,
1155,
353,
8840,
836,
8,
341,
57441,
1669,
54320,
70334,
7121,
13470,
1220,
2836,
1006,
197,
28080,
89164,
18552,
3622,
1758,
37508,
11,
435,
353,
1254,
9659,
8,
341,
298,
11009,
991,
33655,
3622,
11,
2194... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBuildStatSummaryRequest(t *testing.T) {
t.Run("Maps Kubernetes friendly names to canonical names", func(t *testing.T) {
expectations := map[string]string{
"deployments": k8s.Deployment,
"deployment": k8s.Deployment,
"deploy": k8s.Deployment,
"pods": k8s.Pod,
"pod": k8s.Pod,
"po": k8s.Pod,
}
for friendly, canonical := range expectations {
statSummaryRequest, err := BuildStatSummaryRequest(
StatsSummaryRequestParams{
StatsBaseRequestParams: StatsBaseRequestParams{
ResourceType: friendly,
},
},
)
if err != nil {
t.Fatalf("Unexpected error from BuildStatSummaryRequest [%s => %s]: %s", friendly, canonical, err)
}
if statSummaryRequest.Selector.Resource.Type != canonical {
t.Fatalf("Unexpected resource type from BuildStatSummaryRequest [%s => %s]: %s", friendly, canonical, statSummaryRequest.Selector.Resource.Type)
}
}
})
t.Run("Parses valid time windows", func(t *testing.T) {
expectations := []string{
"1m",
"60s",
"1m",
}
for _, timeWindow := range expectations {
statSummaryRequest, err := BuildStatSummaryRequest(
StatsSummaryRequestParams{
StatsBaseRequestParams: StatsBaseRequestParams{
TimeWindow: timeWindow,
ResourceType: k8s.Deployment,
},
},
)
if err != nil {
t.Fatalf("Unexpected error from BuildStatSummaryRequest [%s => %s]", timeWindow, err)
}
if statSummaryRequest.TimeWindow != timeWindow {
t.Fatalf("Unexpected TimeWindow from BuildStatSummaryRequest [%s => %s]", timeWindow, statSummaryRequest.TimeWindow)
}
}
})
t.Run("Rejects invalid time windows", func(t *testing.T) {
expectations := map[string]string{
"1": "time: missing unit in duration \"1\"",
"s": "time: invalid duration \"s\"",
}
for timeWindow, msg := range expectations {
_, err := BuildStatSummaryRequest(
StatsSummaryRequestParams{
StatsBaseRequestParams: StatsBaseRequestParams{
TimeWindow: timeWindow,
},
},
)
if err == nil {
t.Fatalf("BuildStatSummaryRequest(%s) unexpectedly succeeded, should have returned %s", timeWindow, msg)
}
if err.Error() != msg {
t.Fatalf("BuildStatSummaryRequest(%s) should have returned: %s but got unexpected message: %s", timeWindow, msg, err)
}
}
})
t.Run("Rejects invalid Kubernetes resource types", func(t *testing.T) {
expectations := map[string]string{
"foo": "cannot find Kubernetes canonical name from friendly name [foo]",
"": "cannot find Kubernetes canonical name from friendly name []",
}
for input, msg := range expectations {
_, err := BuildStatSummaryRequest(
StatsSummaryRequestParams{
StatsBaseRequestParams: StatsBaseRequestParams{
ResourceType: input,
},
},
)
if err == nil {
t.Fatalf("BuildStatSummaryRequest(%s) unexpectedly succeeded, should have returned %s", input, msg)
}
if err.Error() != msg {
t.Fatalf("BuildStatSummaryRequest(%s) should have returned: %s but got unexpected message: %s", input, msg, err)
}
}
})
} | explode_data.jsonl/53451 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1206
} | [
2830,
3393,
11066,
15878,
19237,
1900,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
36562,
66374,
11657,
5036,
311,
42453,
5036,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
24952,
804,
1669,
2415,
14032,
30953,
515,
298,
197,
1,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestHead(t *testing.T) {
// TODO: test result
srv := httptest.NewServer(http.HandlerFunc(HandleHead))
defer srv.Close()
url := "http://" + srv.Listener.Addr().String()
resp, err := Head(url, nil, nil)
if err != nil {
t.Error(err)
}
assert.Equal(t, 200, resp.Status())
} | explode_data.jsonl/14810 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 118
} | [
2830,
3393,
12346,
1155,
353,
8840,
836,
8,
341,
197,
322,
5343,
25,
1273,
1102,
198,
1903,
10553,
1669,
54320,
70334,
7121,
5475,
19886,
89164,
90832,
12346,
1171,
16867,
43578,
10421,
741,
19320,
1669,
330,
1254,
52136,
488,
43578,
6409... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFlushIntervalHeaders(t *testing.T) {
const expected = "hi"
stopCh := make(chan struct{})
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("MyHeader", expected)
w.WriteHeader(200)
w.(http.Flusher).Flush()
<-stopCh
}))
defer backend.Close()
defer close(stopCh)
backendURL, err := url.Parse(backend.URL)
if err != nil {
t.Fatal(err)
}
responder := &fakeResponder{t: t}
proxyHandler := NewUpgradeAwareHandler(backendURL, nil, false, false, responder)
frontend := httptest.NewServer(proxyHandler)
defer frontend.Close()
req, _ := http.NewRequest("GET", frontend.URL, nil)
req.Close = true
ctx, cancel := context.WithTimeout(req.Context(), 10*time.Second)
defer cancel()
req = req.WithContext(ctx)
res, err := frontend.Client().Do(req)
if err != nil {
t.Fatalf("Get: %v", err)
}
defer res.Body.Close()
if res.Header.Get("MyHeader") != expected {
t.Errorf("got header %q; expected %q", res.Header.Get("MyHeader"), expected)
}
} | explode_data.jsonl/35096 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 396
} | [
2830,
3393,
46874,
10256,
10574,
1155,
353,
8840,
836,
8,
341,
4777,
3601,
284,
330,
6023,
698,
62644,
1143,
1669,
1281,
35190,
2036,
37790,
197,
20942,
1669,
54320,
70334,
7121,
5475,
19886,
89164,
18552,
3622,
1758,
37508,
11,
435,
353,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestVisitorDisableChildren(t *testing.T) {
for _, tt := range nodesToTest {
v := &visitorMock{false, []string{}}
tt.node.Walk(v)
expected := []string{}
actual := v.visitedKeys
assert.DeepEqual(t, expected, actual)
}
} | explode_data.jsonl/50809 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 94
} | [
2830,
3393,
16796,
25479,
11539,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
17853,
1669,
2088,
7798,
1249,
2271,
341,
197,
5195,
1669,
609,
39985,
11571,
90,
3849,
11,
3056,
917,
6257,
532,
197,
3244,
83,
12097,
1175,
1692,
3747,
692,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestImageCopy(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("copying image and using it should panic")
}
}()
img0, _ := NewImage(256, 256, FilterDefault)
img1 := *img0
img1.Fill(color.Transparent)
} | explode_data.jsonl/10906 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 96
} | [
2830,
3393,
1906,
12106,
1155,
353,
8840,
836,
8,
341,
16867,
2915,
368,
341,
197,
743,
435,
1669,
11731,
2129,
435,
621,
2092,
341,
298,
3244,
13080,
445,
8560,
287,
2168,
323,
1667,
432,
1265,
21975,
1138,
197,
197,
532,
197,
66816,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestTraceConfigValidation(t *testing.T) {
factory := NewFactory()
config := factory.CreateDefaultConfig().(*Config)
assert.Error(t, config.validate())
config.Topic = "projects/000project/topics/my-topic"
assert.Error(t, config.validate())
config.Topic = "projects/my-project/subscriptions/my-subscription"
assert.Error(t, config.validate())
config.Topic = "projects/my-project/topics/my-topic"
assert.NoError(t, config.validate())
} | explode_data.jsonl/9313 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 152
} | [
2830,
3393,
6550,
2648,
13799,
1155,
353,
8840,
836,
8,
341,
1166,
2919,
1669,
1532,
4153,
741,
25873,
1669,
8633,
7251,
3675,
2648,
1005,
4071,
2648,
340,
6948,
6141,
1155,
11,
2193,
19520,
2398,
25873,
98354,
284,
330,
17161,
14,
15,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBuilder(t *testing.T) {
result := aTask("ID1",
withIP("10.10.10.10"),
withLabel("foo", "bar"),
withLabel("fii", "bar"),
withLabel("fuu", "bar"),
withInfo("name1",
withPorts(withPort("TCP", 80, "p"),
withPortTCP(81, "n"))),
withStatus(withHealthy(true), withState("a")))
expected := state.Task{
FrameworkID: "",
ID: "ID1",
SlaveIP: "10.10.10.10",
Name: "",
SlaveID: "",
State: "",
Statuses: []state.Status{{
State: "a",
Healthy: Bool(true),
ContainerStatus: state.ContainerStatus{},
}},
DiscoveryInfo: state.DiscoveryInfo{
Name: "name1",
Labels: struct {
Labels []state.Label "json:\"labels\""
}{},
Ports: state.Ports{DiscoveryPorts: []state.DiscoveryPort{
{Protocol: "TCP", Number: 80, Name: "p"},
{Protocol: "TCP", Number: 81, Name: "n"}}}},
Labels: []state.Label{
{Key: "foo", Value: "bar"},
{Key: "fii", Value: "bar"},
{Key: "fuu", Value: "bar"}}}
assert.Equal(t, expected, result)
} | explode_data.jsonl/24531 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 496
} | [
2830,
3393,
3297,
1155,
353,
8840,
836,
8,
341,
9559,
1669,
264,
6262,
445,
915,
16,
756,
197,
46948,
3298,
445,
16,
15,
13,
16,
15,
13,
16,
15,
13,
16,
15,
4461,
197,
46948,
2476,
445,
7975,
497,
330,
2257,
4461,
197,
46948,
24... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestParseSignedData(t *testing.T) {
mockSigner := NewMockSigner()
t.Run("success", func(t *testing.T) {
jwsSignature, err := internal.NewJWS(nil, nil, []byte("payload"), mockSigner)
require.NoError(t, err)
compactJWS, err := jwsSignature.SerializeCompact(false)
require.NoError(t, err)
jws, err := parseSignedData(compactJWS)
require.NoError(t, err)
require.NotNil(t, jws)
})
t.Run("missing signed data", func(t *testing.T) {
jws, err := parseSignedData("")
require.Error(t, err)
require.Nil(t, jws)
require.Contains(t, err.Error(), "invalid JWS compact format")
})
t.Run("missing protected headers", func(t *testing.T) {
jws, err := parseSignedData(".cGF5bG9hZA.c2lnbmF0dXJl")
require.Error(t, err)
require.Nil(t, jws)
require.Contains(t, err.Error(), "unmarshal JSON headers: unexpected end of JSON input")
})
t.Run("missing payload", func(t *testing.T) {
jwsSignature, err := internal.NewJWS(nil, nil, nil, mockSigner)
require.NoError(t, err)
compactJWS, err := jwsSignature.SerializeCompact(false)
require.NoError(t, err)
jws, err := parseSignedData(compactJWS)
require.Error(t, err)
require.Nil(t, jws)
require.Contains(t, err.Error(), "compact jws payload is empty")
})
t.Run("missing signature", func(t *testing.T) {
jws, err := parseSignedData("eyJhbGciOiJhbGciLCJraWQiOiJraWQifQ.cGF5bG9hZA.")
require.Error(t, err)
require.Nil(t, jws)
require.Contains(t, err.Error(), "compact jws signature is empty")
})
} | explode_data.jsonl/78227 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 636
} | [
2830,
3393,
14463,
49312,
1043,
1155,
353,
8840,
836,
8,
341,
77333,
7264,
261,
1669,
1532,
11571,
7264,
261,
2822,
3244,
16708,
445,
5630,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
12428,
8915,
25088,
11,
1848,
1669,
5306,
7121,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestApp_OrderOfOperations(t *testing.T) {
counts := &opCounts{}
resetCounts := func() { counts = &opCounts{} }
app := &App{
EnableShellCompletion: true,
ShellComplete: func(c *Context) {
fmt.Fprintf(os.Stderr, "---> ShellComplete(%#v)\n", c)
counts.Total++
counts.ShellComplete = counts.Total
},
OnUsageError: func(c *Context, err error, isSubcommand bool) error {
counts.Total++
counts.OnUsageError = counts.Total
return errors.New("hay OnUsageError")
},
}
beforeNoError := func(c *Context) error {
counts.Total++
counts.Before = counts.Total
return nil
}
beforeError := func(c *Context) error {
counts.Total++
counts.Before = counts.Total
return errors.New("hay Before")
}
app.Before = beforeNoError
app.CommandNotFound = func(c *Context, command string) {
counts.Total++
counts.CommandNotFound = counts.Total
}
afterNoError := func(c *Context) error {
counts.Total++
counts.After = counts.Total
return nil
}
afterError := func(c *Context) error {
counts.Total++
counts.After = counts.Total
return errors.New("hay After")
}
app.After = afterNoError
app.Commands = []*Command{
{
Name: "bar",
Action: func(c *Context) error {
counts.Total++
counts.SubCommand = counts.Total
return nil
},
},
}
app.Action = func(c *Context) error {
counts.Total++
counts.Action = counts.Total
return nil
}
_ = app.Run([]string{"command", "--nope"})
expect(t, counts.OnUsageError, 1)
expect(t, counts.Total, 1)
resetCounts()
_ = app.Run([]string{"command", fmt.Sprintf("--%s", genCompName())})
expect(t, counts.ShellComplete, 1)
expect(t, counts.Total, 1)
resetCounts()
oldOnUsageError := app.OnUsageError
app.OnUsageError = nil
_ = app.Run([]string{"command", "--nope"})
expect(t, counts.Total, 0)
app.OnUsageError = oldOnUsageError
resetCounts()
_ = app.Run([]string{"command", "foo"})
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.CommandNotFound, 0)
expect(t, counts.Action, 2)
expect(t, counts.After, 3)
expect(t, counts.Total, 3)
resetCounts()
app.Before = beforeError
_ = app.Run([]string{"command", "bar"})
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.After, 2)
expect(t, counts.Total, 2)
app.Before = beforeNoError
resetCounts()
app.After = nil
_ = app.Run([]string{"command", "bar"})
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.SubCommand, 2)
expect(t, counts.Total, 2)
app.After = afterNoError
resetCounts()
app.After = afterError
err := app.Run([]string{"command", "bar"})
if err == nil {
t.Fatalf("expected a non-nil error")
}
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.SubCommand, 2)
expect(t, counts.After, 3)
expect(t, counts.Total, 3)
app.After = afterNoError
resetCounts()
oldCommands := app.Commands
app.Commands = nil
_ = app.Run([]string{"command"})
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.Action, 2)
expect(t, counts.After, 3)
expect(t, counts.Total, 3)
app.Commands = oldCommands
} | explode_data.jsonl/52582 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1278
} | [
2830,
3393,
2164,
53267,
2124,
35120,
1155,
353,
8840,
836,
8,
341,
18032,
82,
1669,
609,
453,
63731,
31483,
70343,
63731,
1669,
2915,
368,
314,
14579,
284,
609,
453,
63731,
6257,
555,
28236,
1669,
609,
2164,
515,
197,
197,
11084,
25287... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestQueueScaling_AcceptableBacklogPerTask(t *testing.T) {
testCases := map[string]struct {
in QueueScaling
wantedBacklog int
wantedErr error
}{
"should return an error if queue scaling is empty": {
in: QueueScaling{},
wantedErr: errors.New(`"queue_delay" must be specified in order to calculate the acceptable backlog`),
},
"should return an error if queue scaling is invalid": {
in: QueueScaling{
AcceptableLatency: durationp(1 * time.Second),
AvgProcessingTime: durationp(0 * time.Second),
},
wantedErr: errors.New("some error"),
},
"should round up to an integer if backlog number has a decimal": {
in: QueueScaling{
AcceptableLatency: durationp(10 * time.Second),
AvgProcessingTime: durationp(300 * time.Millisecond),
},
wantedBacklog: 34,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
actual, err := tc.in.AcceptableBacklogPerTask()
if tc.wantedErr != nil {
require.NotNil(t, err)
} else {
require.Equal(t, tc.wantedBacklog, actual)
}
})
}
} | explode_data.jsonl/70121 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 444
} | [
2830,
3393,
7554,
59684,
1566,
66,
1484,
480,
3707,
839,
3889,
6262,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
2415,
14032,
60,
1235,
341,
197,
17430,
310,
18745,
59684,
198,
197,
6692,
7566,
3707,
839,
526,
198,
197,
6692,
75... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestValidate(t *testing.T) {
tests := []struct {
name string
req Message
rep []Message
err error
}{
{
name: "mismatched sequence",
req: Message{
Header: Header{
Sequence: 1,
},
},
rep: []Message{{
Header: Header{
Sequence: 2,
},
}},
err: errMismatchedSequence,
},
{
name: "mismatched sequence second message",
req: Message{
Header: Header{
Sequence: 1,
},
},
rep: []Message{
{
Header: Header{
Sequence: 1,
},
},
{
Header: Header{
Sequence: 2,
},
},
},
err: errMismatchedSequence,
},
{
name: "mismatched PID",
req: Message{
Header: Header{
PID: 1,
},
},
rep: []Message{{
Header: Header{
PID: 2,
},
}},
err: errMismatchedPID,
},
{
name: "mismatched PID second message",
req: Message{
Header: Header{
PID: 1,
},
},
rep: []Message{
{
Header: Header{
PID: 1,
},
},
{
Header: Header{
PID: 2,
},
},
},
err: errMismatchedPID,
},
{
name: "OK matching sequence and PID",
req: Message{
Header: Header{
Sequence: 1,
PID: 1,
},
},
rep: []Message{{
Header: Header{
Sequence: 1,
PID: 1,
},
}},
},
{
name: "OK multicast messages",
// No request
req: Message{},
rep: []Message{{
Header: Header{
Sequence: 1,
PID: 0,
},
}},
},
{
name: "OK no PID assigned yet",
// No request
req: Message{
Header: Header{
Sequence: 1,
PID: 0,
},
},
rep: []Message{{
Header: Header{
Sequence: 1,
PID: 9999,
},
}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := Validate(tt.req, tt.rep)
if want, got := tt.err, err; want != got {
t.Fatalf("unexpected error:\n- want: %v\n- got: %v",
want, got)
}
})
}
} | explode_data.jsonl/25585 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1104
} | [
2830,
3393,
17926,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
24395,
220,
4856,
198,
197,
73731,
220,
3056,
2052,
198,
197,
9859,
220,
1465,
198,
197,
59403,
197,
197,
515,
298,
11609,
25,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestLintImages(t *testing.T) {
ctx := context.TODO()
for _, f := range []string{"edcon_tool.png", "edcon_tool.pdf", "hello.txt.gz"} {
for _, err := range eclint.Lint(ctx, fmt.Sprintf("./testdata/images/%s", f)) {
if err != nil {
t.Fatalf("no errors where expected, got %s", err)
}
}
}
} | explode_data.jsonl/82393 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 137
} | [
2830,
3393,
47556,
14228,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
2266,
90988,
2822,
2023,
8358,
282,
1669,
2088,
3056,
917,
4913,
291,
443,
22785,
3508,
497,
330,
291,
443,
22785,
15995,
497,
330,
14990,
3909,
20963,
9207,
341,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestEvalPointerLimitNumberOfDereferences(t *testing.T) {
var data = make(VarMap)
var i *int
data.Set("intPointer", &i)
RunJetTest(t, data, nil, "IntPointer_i", `{{ intPointer }}`, "")
j := &i
data.Set("intPointer", &j)
RunJetTest(t, data, nil, "IntPointer_j", `{{ intPointer }}`, "")
k := &j
data.Set("intPointer", &k)
RunJetTest(t, data, nil, "IntPointer_1", `{{ intPointer }}`, "<nil>")
} | explode_data.jsonl/22902 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 176
} | [
2830,
3393,
54469,
9084,
16527,
40619,
35,
485,
4901,
1155,
353,
8840,
836,
8,
341,
2405,
821,
284,
1281,
7,
3962,
2227,
692,
2405,
600,
353,
396,
198,
8924,
4202,
445,
396,
9084,
497,
609,
72,
340,
85952,
35641,
2271,
1155,
11,
821... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_PJWHash(t *testing.T) {
var x uint32 = 7244206
gtest.C(t, func(t *gtest.T) {
j := ghash.PJWHash(strBasic)
t.Assert(j, x)
})
} | explode_data.jsonl/60234 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 77
} | [
2830,
3393,
1088,
41,
54,
6370,
1155,
353,
8840,
836,
8,
341,
2405,
856,
2622,
18,
17,
284,
220,
22,
17,
19,
19,
17,
15,
21,
198,
3174,
1944,
727,
1155,
11,
2915,
1155,
353,
82038,
836,
8,
341,
197,
12428,
1669,
342,
8296,
1069,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestPreAllocatedBuffer_allocateNotExpiredInUse(t *testing.T) {
allocateSz := 16
now := time.Now()
expiration := now
data := make([]byte, 129)
preAllocated := newPreAllocatedBuffer(data)
bufCap := len(data) / allocateSz
for i := 0; i < bufCap-1; i++ {
buf, ok := preAllocated.allocate(allocateSz, expiration)
require.True(t, ok)
assert.Len(t, buf.Bytes(), allocateSz)
}
buf, ok := preAllocated.allocate(allocateSz, expiration)
require.True(t, ok)
buf.inUse()
assert.False(t, preAllocated.isExpired(now))
buf.Free()
assert.True(t, preAllocated.isExpired(now))
} | explode_data.jsonl/8640 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 232
} | [
2830,
3393,
4703,
25154,
657,
4095,
77078,
2623,
54349,
641,
10253,
1155,
353,
8840,
836,
8,
341,
197,
31191,
89837,
1669,
220,
16,
21,
198,
80922,
1669,
882,
13244,
741,
48558,
19629,
1669,
1431,
271,
8924,
1669,
1281,
10556,
3782,
11,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestLaunchWithHashCollision(t *testing.T) {
server, closeFn, dc, informers, clientset := setup(t)
defer closeFn()
ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t)
defer framework.DeleteTestingNamespace(ns, server, t)
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
podInformer := informers.Core().V1().Pods().Informer()
nodeClient := clientset.CoreV1().Nodes()
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
go dc.Run(1, stopCh)
setupScheduler(t, clientset, informers, stopCh)
// Create single node
_, err := nodeClient.Create(newNode("single-node", nil))
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}
// Create new DaemonSet with RollingUpdate strategy
orgDs := newDaemonSet("foo", ns.Name)
oneIntString := intstr.FromInt(1)
orgDs.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{
Type: apps.RollingUpdateDaemonSetStrategyType,
RollingUpdate: &apps.RollingUpdateDaemonSet{
MaxUnavailable: &oneIntString,
},
}
ds, err := dsClient.Create(orgDs)
if err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err)
}
// Wait for the DaemonSet to be created before proceeding
err = waitForDaemonSetAndControllerRevisionCreated(clientset, ds.Name, ds.Namespace)
if err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err)
}
ds, err = dsClient.Get(ds.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get DaemonSet: %v", err)
}
var orgCollisionCount int32
if ds.Status.CollisionCount != nil {
orgCollisionCount = *ds.Status.CollisionCount
}
// Look up the ControllerRevision for the DaemonSet
_, name := hashAndNameForDaemonSet(ds)
revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
if err != nil || revision == nil {
t.Fatalf("Failed to look up ControllerRevision: %v", err)
}
// Create a "fake" ControllerRevision that we know will create a hash collision when we make
// the next update
one := int64(1)
ds.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
newHash, newName := hashAndNameForDaemonSet(ds)
newRevision := &apps.ControllerRevision{
ObjectMeta: metav1.ObjectMeta{
Name: newName,
Namespace: ds.Namespace,
Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, apps.DefaultDaemonSetUniqueLabelKey, newHash),
Annotations: ds.Annotations,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, apps.SchemeGroupVersion.WithKind("DaemonSet"))},
},
Data: revision.Data,
Revision: revision.Revision + 1,
}
_, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(newRevision)
if err != nil {
t.Fatalf("Failed to create ControllerRevision: %v", err)
}
// Make an update of the DaemonSet which we know will create a hash collision when
// the next ControllerRevision is created.
ds = updateDS(t, dsClient, ds.Name, func(updateDS *apps.DaemonSet) {
updateDS.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
})
// Wait for any pod with the latest Spec to exist
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
objects := podInformer.GetIndexer().List()
for _, object := range objects {
pod := object.(*v1.Pod)
if *pod.Spec.TerminationGracePeriodSeconds == *ds.Spec.Template.Spec.TerminationGracePeriodSeconds {
return true, nil
}
}
return false, nil
})
if err != nil {
t.Fatalf("Failed to wait for Pods with the latest Spec to be created: %v", err)
}
validateDaemonSetCollisionCount(dsClient, ds.Name, orgCollisionCount+1, t)
} | explode_data.jsonl/8559 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1336
} | [
2830,
3393,
32067,
2354,
6370,
32280,
1155,
353,
8840,
836,
8,
341,
41057,
11,
3265,
24911,
11,
19402,
11,
6051,
388,
11,
2943,
746,
1669,
6505,
1155,
340,
16867,
3265,
24911,
741,
84041,
1669,
12626,
7251,
16451,
22699,
445,
603,
39054... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDistributionToPoints(t *testing.T) {
end := time.Unix(30, 0)
endTimestamp := convertTimestamp(end)
tests := []struct {
name string
counts []int64
count int64
sum float64
buckets []float64
end time.Time
want []*wire.Point
}{
{
name: "3 buckets",
counts: []int64{
1,
2,
3,
},
count: 6,
sum: 40,
buckets: []float64{
0, 5, 10,
},
end: end,
want: []*wire.Point{
{
Value: wire.PointDistributionValue{
DistributionValue: &wire.DistributionValue{
Count: 6,
Sum: 40,
// TODO: SumOfSquaredDeviation?
Buckets: []*wire.Bucket{
&wire.Bucket{
Count: 1,
},
&wire.Bucket{
Count: 2,
},
&wire.Bucket{
Count: 3,
},
},
BucketOptions: wire.BucketOptionsExplicit{
Bounds: []float64{
0, 5, 10,
},
},
},
},
Timestamp: &endTimestamp,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := distributionToPoints(tt.counts, tt.count, tt.sum, tt.buckets, tt.end)
if !reflect.DeepEqual(got, tt.want) {
t.Fatalf("Got:\n%s\nWant:\n%s", marshaled(got), marshaled(tt.want))
}
})
}
} | explode_data.jsonl/80877 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 714
} | [
2830,
3393,
62377,
1249,
11411,
1155,
353,
8840,
836,
8,
341,
6246,
1669,
882,
10616,
941,
7,
18,
15,
11,
220,
15,
340,
6246,
20812,
1669,
5508,
20812,
15076,
692,
78216,
1669,
3056,
1235,
341,
197,
11609,
262,
914,
198,
197,
18032,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestResultField(t *testing.T) {
store, clean := realtikvtest.CreateMockStoreAndSetup(t)
defer clean()
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t (id int);")
tk.MustExec(`INSERT INTO t VALUES (1);`)
tk.MustExec(`INSERT INTO t VALUES (2);`)
r, err := tk.Exec(`SELECT count(*) from t;`)
require.NoError(t, err)
defer r.Close()
fields := r.Fields()
require.NoError(t, err)
require.Len(t, fields, 1)
field := fields[0].Column
require.Equal(t, mysql.TypeLonglong, field.GetType())
require.Equal(t, 21, field.GetFlen())
} | explode_data.jsonl/5776 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 237
} | [
2830,
3393,
2077,
1877,
1155,
353,
8840,
836,
8,
341,
57279,
11,
4240,
1669,
1931,
83,
1579,
85,
1944,
7251,
11571,
6093,
3036,
21821,
1155,
340,
16867,
4240,
2822,
3244,
74,
1669,
1273,
8226,
7121,
2271,
7695,
1155,
11,
3553,
340,
32... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEventProcessCallback(t *testing.T) {
svc, err := New(&protocol.MockProvider{
ServiceMap: map[string]interface{}{
mediator.Coordination: &mockroute.MockMediatorSvc{},
},
})
require.NoError(t, err)
msg := &message{
ThreadID: threadIDValue,
Msg: service.NewDIDCommMsgMap(model.Ack{Type: AckMsgType}),
}
err = svc.handleWithoutAction(msg)
require.Error(t, err)
require.Contains(t, err.Error(), "invalid state name: invalid state name ")
err = svc.abandon(msg.ThreadID, msg.Msg, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "unable to update the state to abandoned")
} | explode_data.jsonl/30527 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 238
} | [
2830,
3393,
1556,
7423,
7494,
1155,
353,
8840,
836,
8,
341,
1903,
7362,
11,
1848,
1669,
1532,
2099,
17014,
24664,
5179,
515,
197,
91619,
2227,
25,
2415,
14032,
31344,
67066,
298,
197,
4404,
850,
52114,
98244,
25,
609,
16712,
8966,
24664... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRootParseExecutableNoHang(t *testing.T) {
root := ggql.NewRoot(nil)
src := "{artist{name}}}"
_, err := root.ParseExecutableString(src)
checkNotNil(t, err, "ParseExecutableString(%s) should fail", src)
} | explode_data.jsonl/48192 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 85
} | [
2830,
3393,
8439,
14463,
94772,
2753,
57038,
1155,
353,
8840,
836,
8,
341,
33698,
1669,
52034,
1470,
7121,
8439,
27907,
340,
41144,
1669,
13868,
18622,
47006,
3417,
11195,
197,
6878,
1848,
1669,
3704,
8937,
94772,
703,
14705,
340,
25157,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestPrometheusGetCondition(t *testing.T) {
tests := []struct {
name string
cs *PrometheusSourceStatus
condQuery apis.ConditionType
want *apis.Condition
}{{
name: "single condition",
cs: &PrometheusSourceStatus{
SourceStatus: duckv1.SourceStatus{
Status: duckv1.Status{
Conditions: []apis.Condition{
condReady,
},
},
},
},
condQuery: apis.ConditionReady,
want: &condReady,
}, {
name: "unknown condition",
cs: &PrometheusSourceStatus{
SourceStatus: duckv1.SourceStatus{
Status: duckv1.Status{
Conditions: []apis.Condition{
condReady,
},
},
},
},
condQuery: apis.ConditionType("foo"),
want: nil,
}, {
name: "mark deployed",
cs: func() *PrometheusSourceStatus {
s := &PrometheusSourceStatus{}
s.InitializeConditions()
s.PropagateDeploymentAvailability(availableDeployment)
return s
}(),
condQuery: PrometheusConditionReady,
want: &apis.Condition{
Type: PrometheusConditionReady,
Status: corev1.ConditionUnknown,
},
}, {
name: "mark sink and deployed",
cs: func() *PrometheusSourceStatus {
s := &PrometheusSourceStatus{}
s.InitializeConditions()
s.MarkSink(apis.HTTP("example"))
s.PropagateDeploymentAvailability(availableDeployment)
return s
}(),
condQuery: PrometheusConditionReady,
want: &apis.Condition{
Type: PrometheusConditionReady,
Status: corev1.ConditionTrue,
},
}}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
got := test.cs.GetCondition(test.condQuery)
ignoreTime := cmpopts.IgnoreFields(apis.Condition{},
"LastTransitionTime", "Severity")
if diff := cmp.Diff(test.want, got, ignoreTime); diff != "" {
t.Errorf("unexpected condition (-want, +got) = %v", diff)
}
})
}
} | explode_data.jsonl/41234 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 746
} | [
2830,
3393,
35186,
39705,
1949,
10547,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
414,
914,
198,
197,
71899,
286,
353,
35186,
39705,
3608,
2522,
198,
197,
197,
1297,
2859,
97723,
75134,
929,
198,
197,
50780,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestContextHelpACI(t *testing.T) {
c := NewParallelE2eCLI(t, binDir)
t.Run("help", func(t *testing.T) {
res := c.RunDockerCmd("context", "create", "aci", "--help")
// Can't use golden here as the help prints the config directory which changes
res.Assert(t, icmd.Expected{Out: "docker context create aci CONTEXT [flags]"})
res.Assert(t, icmd.Expected{Out: "--location"})
res.Assert(t, icmd.Expected{Out: "--subscription-id"})
res.Assert(t, icmd.Expected{Out: "--resource-group"})
})
t.Run("check exec", func(t *testing.T) {
res := c.RunDockerOrExitError("context", "create", "aci", "--subscription-id", "invalid-id")
res.Assert(t, icmd.Expected{
ExitCode: 1,
Err: "accepts 1 arg(s), received 0",
})
assert.Assert(t, !strings.Contains(res.Combined(), "unknown flag"))
})
} | explode_data.jsonl/5428 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 334
} | [
2830,
3393,
1972,
12689,
1706,
40,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
1532,
16547,
36,
17,
68,
63959,
1155,
11,
9544,
6184,
692,
3244,
16708,
445,
8653,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
10202,
1669,
272,
16708,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.