text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func TestWriteSimulationResponse(t *testing.T) {
t.Parallel()
w := httptest.NewRecorder()
rest.WriteSimulationResponse(w, codec.New(), 10)
res := w.Result() //nolint:bodyclose
t.Cleanup(func() { res.Body.Close() })
require.Equal(t, http.StatusOK, res.StatusCode)
bs, err := ioutil.ReadAll(res.Body)
require.NoError(t, err)
t.Cleanup(func() { res.Body.Close() })
require.Equal(t, `{"gas_estimate":"10"}`, string(bs))
} | explode_data.jsonl/55931 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 171
} | [
2830,
3393,
7985,
64554,
2582,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
6692,
1669,
54320,
70334,
7121,
47023,
741,
197,
3927,
4073,
64554,
2582,
3622,
11,
34647,
7121,
1507,
220,
16,
15,
340,
10202,
1669,
289,
18456,
368,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDMLVisitorCover(t *testing.T) {
ce := &checkExpr{}
tableRefsClause := &TableRefsClause{TableRefs: &Join{Left: &TableSource{Source: &TableName{}}, On: &OnCondition{Expr: ce}}}
stmts := []struct {
node Node
expectedEnterCnt int
expectedLeaveCnt int
}{
{&DeleteStmt{TableRefs: tableRefsClause, Tables: &DeleteTableList{}, Where: ce,
Order: &OrderByClause{}, Limit: &Limit{Count: ce, Offset: ce}}, 4, 4},
{&ShowStmt{Table: &TableName{}, Column: &ColumnName{}, Pattern: &PatternLikeExpr{Expr: ce, Pattern: ce}, Where: ce}, 3, 3},
{&LoadDataStmt{Table: &TableName{}, Columns: []*ColumnName{{}}, FieldsInfo: &FieldsClause{}, LinesInfo: &LinesClause{}}, 0, 0},
{&Assignment{Column: &ColumnName{}, Expr: ce}, 1, 1},
{&ByItem{Expr: ce}, 1, 1},
{&GroupByClause{Items: []*ByItem{{Expr: ce}, {Expr: ce}}}, 2, 2},
{&HavingClause{Expr: ce}, 1, 1},
{&Join{Left: &TableSource{Source: &TableName{}}}, 0, 0},
{&Limit{Count: ce, Offset: ce}, 2, 2},
{&OnCondition{Expr: ce}, 1, 1},
{&OrderByClause{Items: []*ByItem{{Expr: ce}, {Expr: ce}}}, 2, 2},
{&SelectField{Expr: ce, WildCard: &WildCardField{}}, 1, 1},
{&TableName{}, 0, 0},
{tableRefsClause, 1, 1},
{&TableSource{Source: &TableName{}}, 0, 0},
{&WildCardField{}, 0, 0},
// TODO: cover childrens
{&InsertStmt{Table: tableRefsClause}, 1, 1},
{&SetOprStmt{}, 0, 0},
{&UpdateStmt{TableRefs: tableRefsClause}, 1, 1},
{&SelectStmt{}, 0, 0},
{&FieldList{}, 0, 0},
{&SetOprSelectList{}, 0, 0},
{&WindowSpec{}, 0, 0},
{&PartitionByClause{}, 0, 0},
{&FrameClause{}, 0, 0},
{&FrameBound{}, 0, 0},
}
for _, v := range stmts {
ce.reset()
v.node.Accept(checkVisitor{})
require.Equal(t, v.expectedEnterCnt, ce.enterCnt)
require.Equal(t, v.expectedLeaveCnt, ce.leaveCnt)
v.node.Accept(visitor1{})
}
} | explode_data.jsonl/27565 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 825
} | [
2830,
3393,
35,
2668,
16796,
30896,
1155,
353,
8840,
836,
8,
341,
197,
346,
1669,
609,
2028,
16041,
31483,
26481,
82807,
28482,
1669,
609,
2556,
82807,
28482,
90,
2556,
82807,
25,
609,
12292,
90,
5415,
25,
609,
2556,
3608,
90,
3608,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestClientRequestDisconnected(t *testing.T) {
// Initialize webwire server given only the request
server := setupServer(
t,
&serverImpl{
onRequest: func(
_ context.Context,
_ wwr.Connection,
_ wwr.Message,
) (wwr.Payload, error) {
return nil, nil
},
},
wwr.ServerOptions{},
)
// Initialize client and skip manual connection establishment
client := newCallbackPoweredClient(
server.Addr().String(),
wwrclt.Options{
DefaultRequestTimeout: 2 * time.Second,
Autoconnect: wwrclt.Disabled,
},
callbackPoweredClientHooks{},
)
// Send request and await reply
_, err := client.connection.Request(
context.Background(),
"",
wwr.NewPayload(wwr.EncodingBinary, []byte("testdata")),
)
require.NoError(t, err)
} | explode_data.jsonl/50659 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 304
} | [
2830,
3393,
2959,
1900,
77021,
1155,
353,
8840,
836,
8,
341,
197,
322,
9008,
3482,
35531,
3538,
2661,
1172,
279,
1681,
198,
41057,
1669,
6505,
5475,
1006,
197,
3244,
345,
197,
197,
5,
4030,
9673,
515,
298,
24630,
1900,
25,
2915,
1006,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIPPoolHasIP(t *testing.T) {
tests := []struct {
name string
ipPools []*antreacrds.ExternalIPPool
ipPoolToCheck string
ipToCheck net.IP
expectedExists bool
}{
{
name: "check for existing IP in IPPool",
ipPools: []*antreacrds.ExternalIPPool{
newExternalIPPool("eip1", "", "10.10.10.2", "10.10.10.3"),
},
ipPoolToCheck: "eip1",
ipToCheck: net.ParseIP("10.10.10.2"),
expectedExists: true,
},
{
name: "check for non-existing IP in IPPool",
ipPools: []*antreacrds.ExternalIPPool{
newExternalIPPool("eip1", "", "10.10.10.2", "10.10.10.3"),
},
ipPoolToCheck: "eip1",
ipToCheck: net.ParseIP("10.10.10.1"),
expectedExists: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
stopCh := make(chan struct{})
defer close(stopCh)
var fakeCRDObjects []runtime.Object
for _, p := range tt.ipPools {
fakeCRDObjects = append(fakeCRDObjects, p)
}
controller := newController(fakeCRDObjects)
controller.crdInformerFactory.Start(stopCh)
controller.crdInformerFactory.WaitForCacheSync(stopCh)
go controller.Run(stopCh)
require.True(t, cache.WaitForCacheSync(stopCh, controller.HasSynced))
exists := controller.IPPoolHasIP(tt.ipPoolToCheck, tt.ipToCheck)
assert.Equal(t, tt.expectedExists, exists)
})
}
} | explode_data.jsonl/10264 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 625
} | [
2830,
3393,
3298,
10551,
10281,
3298,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
1843,
914,
198,
197,
46531,
47,
6178,
286,
29838,
517,
265,
64748,
5356,
5121,
15342,
3298,
10551,
198,
197,
46531,
10551,
124... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestName(t *testing.T) {
l := NewLogger()
if l.String() != "logrus" {
t.Errorf("error: name expected 'logrus' actual: %s", l.String())
}
t.Logf("testing logger name: %s", l.String())
} | explode_data.jsonl/6550 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 80
} | [
2830,
3393,
675,
1155,
353,
8840,
836,
8,
341,
8810,
1669,
1532,
7395,
2822,
743,
326,
6431,
368,
961,
330,
839,
20341,
1,
341,
197,
3244,
13080,
445,
841,
25,
829,
3601,
364,
839,
20341,
6,
5042,
25,
1018,
82,
497,
326,
6431,
239... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestSlowTransfers(t *testing.T) {
channel := make(chan bool)
svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Don't send any response
<-channel
}))
defer svr.CloseClientConnections()
defer svr.Close()
transfers := NewTransferDetails(svr.URL, false)
assert.Equal(t, 2, len(transfers))
assert.Equal(t, svr.URL, transfers[0].Url.String())
finishedChannel := make(chan bool)
var err error
// Do a quick timeout
go func() {
_, err = DownloadHTTP(transfers[0], filepath.Join(t.TempDir(), "test.txt"), "")
finishedChannel <- true
}()
select {
case <-finishedChannel:
if err == nil {
t.Fatal("Download should have failed")
}
case <-time.After(time.Second * 12):
t.Fatal("Download should have failed")
}
// Close the channel to allow the download to complete
channel <- true
// Make sure the errors are correct
assert.NotNil(t, err)
assert.IsType(t, &ConnectionSetupError{}, err, err.Error())
} | explode_data.jsonl/22512 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 353
} | [
2830,
3393,
58289,
3167,
49793,
1155,
353,
8840,
836,
8,
1476,
71550,
1669,
1281,
35190,
1807,
340,
1903,
18920,
1669,
54320,
70334,
7121,
5475,
19886,
89164,
18552,
3622,
1758,
37508,
11,
435,
353,
1254,
9659,
8,
341,
197,
197,
322,
43... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGroupCleanupUserNamespace(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("we need root for credential")
}
checkUserNS(t)
cmd := exec.Command("id")
uid, gid := os.Getuid(), os.Getgid()
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWUSER,
Credential: &syscall.Credential{
Uid: uint32(uid),
Gid: uint32(gid),
},
UidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: uid, Size: 1},
},
GidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: gid, Size: 1},
},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
strOut := strings.TrimSpace(string(out))
// Strings we've seen in the wild.
expected := []string{
"uid=0(root) gid=0(root) groups=0(root)",
"uid=0(root) gid=0(root) groups=0(root),65534(nobody)",
"uid=0(root) gid=0(root) groups=0(root),65534(nogroup)",
"uid=0(root) gid=0(root) groups=0(root),65534",
"uid=0(root) gid=0(root) groups=0(root),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody)", // Alpine; see https://golang.org/issue/19938
}
for _, e := range expected {
if strOut == e {
return
}
}
t.Errorf("id command output: %q, expected one of %q", strOut, expected)
} | explode_data.jsonl/36120 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 595
} | [
2830,
3393,
2808,
67335,
1474,
22699,
1155,
353,
8840,
836,
8,
341,
743,
2643,
2234,
2423,
368,
961,
220,
15,
341,
197,
3244,
57776,
445,
896,
1184,
3704,
369,
40207,
1138,
197,
532,
25157,
1474,
2448,
1155,
340,
25920,
1669,
3883,
12... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestFullCyclePorcelainImportBranchExists(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
rangeManager := mock.NewMockentryCataloger(ctrl)
mri := metaRangeID
prevCommitID := graveler.CommitID("somePrevCommitID")
rangeManager.EXPECT().
GetBranch(gomock.Any(), gomock.Eq(repoID), gomock.Eq(graveler.BranchID(onboard.DefaultImportBranchName))).
Times(1).
Return(&graveler.Branch{
CommitID: prevCommitID,
}, nil)
rangeManager.EXPECT().
ListEntries(gomock.Any(), gomock.Eq(repoID), gomock.Eq(graveler.Ref(onboard.DefaultImportBranchName)), gomock.Any(), gomock.Any()).
Times(1).
Return(catalog.NewEntryListingIterator(testutils.NewFakeEntryIterator([]*catalog.EntryRecord{
{
Path: "some/path",
Entry: &catalog.Entry{},
},
}), "", ""), nil)
rangeManager.EXPECT().WriteMetaRange(gomock.Any(), gomock.Eq(repoID), gomock.Any()).Times(1).Return(&mri, nil)
rangeManager.EXPECT().AddCommitToBranchHead(gomock.Any(), gomock.Eq(repoID), gomock.Eq(graveler.BranchID(onboard.DefaultImportBranchName)), gomock.Any()).
Times(1).Return(commitID, nil)
rocks := onboard.NewCatalogRepoActions(&onboard.Config{
CommitUsername: committer,
RepositoryID: repoID,
DefaultBranchID: "master",
EntryCatalog: rangeManager,
}, logging.Default())
require.NoError(t, rocks.Init(context.Background(), ""))
validIt := getValidIt()
stats, err := rocks.ApplyImport(context.Background(), validIt, false)
require.NoError(t, err)
require.NotNil(t, stats)
retCommitID, err := rocks.Commit(context.Background(), msg, nil)
require.NoError(t, err)
require.Equal(t, string(commitID), retCommitID)
} | explode_data.jsonl/31613 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 655
} | [
2830,
3393,
9432,
44820,
28097,
59142,
11511,
18197,
15575,
1155,
353,
8840,
836,
8,
341,
84381,
1669,
342,
316,
1176,
7121,
2051,
1155,
340,
16867,
23743,
991,
18176,
741,
75087,
2043,
1669,
7860,
7121,
11571,
4085,
41606,
261,
62100,
69... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestFlipVerySmall(t *testing.T) {
rb := NewBitmap()
rb.Flip(0, 10) // got [0,9], card is 10
rb.Flip(0, 1) // give back the number 0, card goes to 9
rbcard := rb.GetCardinality()
assert.EqualValues(t, 9, rbcard)
} | explode_data.jsonl/20348 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 97
} | [
2830,
3393,
46808,
25756,
25307,
1155,
353,
8840,
836,
8,
341,
85589,
1669,
1532,
16773,
741,
85589,
991,
33115,
7,
15,
11,
220,
16,
15,
8,
442,
2684,
508,
15,
11,
24,
1125,
3701,
374,
220,
16,
15,
198,
85589,
991,
33115,
7,
15,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestFirstLast(t *testing.T) {
tests := []struct {
desc string
it *Iter
run func(*Iter) (int, interface{}, bool)
wantIdx int
wantValue interface{}
wantMore bool
}{
{
"First-normal",
New(FromStrings([]string{"a", "1", "b", "2"})),
func(it *Iter) (int, interface{}, bool) {
return it.First(func(v interface{}) bool {
_, err := strconv.Atoi(v.(string))
return err == nil
})
},
1,
"1",
true,
},
{
"First-empty",
New(FromStrings([]string{})),
func(it *Iter) (int, interface{}, bool) {
return it.First(func(v interface{}) bool {
_, err := strconv.Atoi(v.(string))
return err == nil
})
},
-1,
nil,
false,
},
{
"First-nomatch",
New(FromStrings([]string{"a", "b"})),
func(it *Iter) (int, interface{}, bool) {
return it.First(func(v interface{}) bool {
_, err := strconv.Atoi(v.(string))
return err == nil
})
},
-1,
nil,
false,
},
{
"Last-normal",
New(FromStrings([]string{"a", "1", "b", "2"})),
func(it *Iter) (int, interface{}, bool) {
return it.Last(func(v interface{}) bool {
_, err := strconv.Atoi(v.(string))
return err == nil
})
},
3,
"2",
true,
},
{
"Last-empty",
New(FromStrings([]string{})),
func(it *Iter) (int, interface{}, bool) {
return it.Last(func(v interface{}) bool {
_, err := strconv.Atoi(v.(string))
return err == nil
})
},
-1,
nil,
false,
},
{
"Last-nomatch",
New(FromStrings([]string{"a", "b"})),
func(it *Iter) (int, interface{}, bool) {
return it.Last(func(v interface{}) bool {
_, err := strconv.Atoi(v.(string))
return err == nil
})
},
-1,
nil,
false,
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
idx, v, more := tc.run(tc.it)
if idx != tc.wantIdx {
t.Errorf("%s got item index of %d but want: %d", tc.desc, idx, tc.wantIdx)
}
if (v == nil) && tc.wantValue != nil {
t.Errorf("%s got item nil value: but want: %v", tc.desc, tc.wantValue)
}
if (v != nil) && tc.wantValue == nil {
t.Errorf("%s got item value: %v but want nil value.", tc.desc, v)
}
if v != nil && v.(string) != tc.wantValue.(string) {
t.Errorf("%s got item value: %v but want: %v", tc.desc, v, tc.wantValue)
}
if more != tc.wantMore {
t.Errorf("%s got more:%t but want: %t", tc.desc, more, tc.wantMore)
}
})
}
} | explode_data.jsonl/56149 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1238
} | [
2830,
3393,
5338,
5842,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
41653,
414,
914,
198,
197,
23374,
286,
353,
8537,
198,
197,
56742,
981,
2915,
4071,
8537,
8,
320,
396,
11,
3749,
22655,
1807,
340,
197,
50780,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLastOperation_LastOperation(t *testing.T) {
t.Run("Should return last operation when operation ID provided", func(t *testing.T) {
// given
memoryStorage := storage.NewMemoryStorage()
err := memoryStorage.Operations().InsertProvisioningOperation(fixOperation())
assert.NoError(t, err)
lastOperationEndpoint := broker.NewLastOperation(memoryStorage.Operations(), memoryStorage.Instances(), logrus.StandardLogger())
// when
response, err := lastOperationEndpoint.LastOperation(context.TODO(), instID, domain.PollDetails{OperationData: operationID})
assert.NoError(t, err)
// then
assert.Equal(t, domain.LastOperation{
State: domain.Succeeded,
Description: operationDescription,
}, response)
})
t.Run("Should return last operation when operation ID not provided", func(t *testing.T) {
// given
memoryStorage := storage.NewMemoryStorage()
err := memoryStorage.Operations().InsertProvisioningOperation(fixOperation())
assert.NoError(t, err)
err = memoryStorage.Instances().Insert(internal.Instance{
InstanceID: instID,
})
assert.NoError(t, err)
lastOperationEndpoint := broker.NewLastOperation(memoryStorage.Operations(), memoryStorage.Instances(), logrus.StandardLogger())
// when
response, err := lastOperationEndpoint.LastOperation(context.TODO(), instID, domain.PollDetails{OperationData: ""})
assert.NoError(t, err)
// then
assert.Equal(t, domain.LastOperation{
State: domain.Succeeded,
Description: operationDescription,
}, response)
})
} | explode_data.jsonl/24099 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 490
} | [
2830,
3393,
5842,
8432,
84672,
8432,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
14996,
470,
1537,
5666,
979,
5666,
3034,
3897,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
197,
322,
2661,
198,
197,
2109,
4731,
5793,
1669,
5819,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReconcile_CustomTask(t *testing.T) {
names.TestingSeed()
const pipelineRunName = "test-pipelinerun"
const pipelineTaskName = "custom-task"
const namespace = "namespace"
simpleCustomTaskPRYAML := `metadata:
name: test-pipelinerun
namespace: namespace
spec:
pipelineSpec:
tasks:
- name: custom-task
params:
- name: param1
value: value1
retries: 3
taskRef:
apiVersion: example.dev/v0
kind: Example
`
simpleCustomTaskWantRunYAML := `metadata:
annotations: {}
labels:
tekton.dev/memberOf: tasks
tekton.dev/pipeline: test-pipelinerun
tekton.dev/pipelineRun: test-pipelinerun
tekton.dev/pipelineTask: custom-task
name: test-pipelinerun-custom-task
namespace: namespace
ownerReferences:
- apiVersion: tekton.dev/v1beta1
blockOwnerDeletion: true
controller: true
kind: PipelineRun
name: test-pipelinerun
spec:
params:
- name: param1
value: value1
ref:
apiVersion: example.dev/v0
kind: Example
retries: 3
serviceAccountName: default
timeout: 1h0m0s
`
tcs := []struct {
name string
pr *v1beta1.PipelineRun
wantRun *v1alpha1.Run
embeddedStatus string
}{{
name: "simple custom task with taskRef",
pr: parse.MustParsePipelineRun(t, simpleCustomTaskPRYAML),
wantRun: parse.MustParseRun(t, simpleCustomTaskWantRunYAML),
}, {
name: "simple custom task with full embedded status",
pr: parse.MustParsePipelineRun(t, simpleCustomTaskPRYAML),
wantRun: parse.MustParseRun(t, simpleCustomTaskWantRunYAML),
embeddedStatus: config.FullEmbeddedStatus,
}, {
name: "simple custom task with minimal embedded status",
pr: parse.MustParsePipelineRun(t, simpleCustomTaskPRYAML),
wantRun: parse.MustParseRun(t, simpleCustomTaskWantRunYAML),
embeddedStatus: config.MinimalEmbeddedStatus,
}, {
name: "simple custom task with taskSpec",
pr: parse.MustParsePipelineRun(t, `
metadata:
name: test-pipelinerun
namespace: namespace
spec:
pipelineSpec:
tasks:
- name: custom-task
params:
- name: param1
value: value1
taskSpec:
apiVersion: example.dev/v0
kind: Example
metadata:
labels:
test-label: test
spec:
field1: 123
field2: value
`),
wantRun: mustParseRunWithObjectMeta(t,
taskRunObjectMeta("test-pipelinerun-custom-task", "namespace", "test-pipelinerun", "test-pipelinerun", "custom-task", false),
`
spec:
params:
- name: param1
value: value1
serviceAccountName: default
spec:
apiVersion: example.dev/v0
kind: Example
metadata:
labels:
test-label: test
spec:
field1: 123
field2: value
timeout: 1h0m0s
`),
}, {
name: "custom task with workspace",
pr: parse.MustParsePipelineRun(t, `
metadata:
name: test-pipelinerun
namespace: namespace
spec:
pipelineSpec:
tasks:
- name: custom-task
taskRef:
apiVersion: example.dev/v0
kind: Example
workspaces:
- name: taskws
subPath: bar
workspace: pipelinews
workspaces:
- name: pipelinews
workspaces:
- name: pipelinews
persistentVolumeClaim:
claimName: myclaim
subPath: foo
`),
wantRun: mustParseRunWithObjectMeta(t,
taskRunObjectMetaWithAnnotations("test-pipelinerun-custom-task", "namespace", "test-pipelinerun",
"test-pipelinerun", "custom-task", false, map[string]string{
"pipeline.tekton.dev/affinity-assistant": getAffinityAssistantName("pipelinews", pipelineRunName),
}),
`
spec:
ref:
apiVersion: example.dev/v0
kind: Example
serviceAccountName: default
timeout: 1h0m0s
workspaces:
- name: taskws
persistentVolumeClaim:
claimName: myclaim
subPath: foo/bar
`),
}}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
embeddedStatus := tc.embeddedStatus
if embeddedStatus == "" {
embeddedStatus = config.DefaultEmbeddedStatus
}
cms := []*corev1.ConfigMap{withCustomTasks(withEmbeddedStatus(newFeatureFlagsConfigMap(), embeddedStatus))}
d := test.Data{
PipelineRuns: []*v1beta1.PipelineRun{tc.pr},
ConfigMaps: cms,
}
prt := newPipelineRunTest(d, t)
defer prt.Cancel()
wantEvents := []string{
"Normal Started",
"Normal Running Tasks Completed: 0",
}
reconciledRun, clients := prt.reconcileRun(namespace, pipelineRunName, wantEvents, false)
actions := clients.Pipeline.Actions()
if len(actions) < 2 {
t.Fatalf("Expected client to have at least two action implementation but it has %d", len(actions))
}
// Check that the expected Run was created.
actual := actions[0].(ktesting.CreateAction).GetObject()
// Ignore the TypeMeta field, because parse.MustParseRun automatically populates it but the "actual" Run won't have it.
if d := cmp.Diff(tc.wantRun, actual, cmpopts.IgnoreFields(v1alpha1.Run{}, "TypeMeta")); d != "" {
t.Errorf("expected to see Run created: %s", diff.PrintWantGot(d))
}
// This PipelineRun is in progress now and the status should reflect that
condition := reconciledRun.Status.GetCondition(apis.ConditionSucceeded)
if condition == nil || condition.Status != corev1.ConditionUnknown {
t.Errorf("Expected PipelineRun status to be in progress, but was %v", condition)
}
if condition != nil && condition.Reason != v1beta1.PipelineRunReasonRunning.String() {
t.Errorf("Expected reason %q but was %s", v1beta1.PipelineRunReasonRunning.String(), condition.Reason)
}
verifyRunStatusesCount(t, embeddedStatus, reconciledRun.Status, 1)
verifyRunStatusesNames(t, embeddedStatus, reconciledRun.Status, tc.wantRun.Name)
})
}
} | explode_data.jsonl/27276 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2352
} | [
2830,
3393,
693,
40446,
457,
57402,
6262,
1155,
353,
8840,
836,
8,
341,
93940,
8787,
287,
41471,
741,
4777,
15301,
6727,
675,
284,
330,
1944,
2268,
81079,
10453,
359,
698,
4777,
15301,
6262,
675,
284,
330,
9163,
52579,
698,
4777,
4473,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestRunLatestDigest_RunningNotControlled(t *testing.T) {
expectedContainers := []string{"a", "c"}
listContainers = func(client *dockerClient.Client) []types.Container {
return []types.Container{
{
Image: "TestRepo@TestLatestDigest",
},
}
}
getRunningContainerIds = func(_ *dockerClient.Client, _ string, _ string) []string {
return expectedContainers
}
cake := Cake{
LatestDigest: "TestLatestDigest",
Repo: "TestRepo",
ContainersRunning: map[string]int{},
}
cake.RunLatestDigest()
for _, id := range expectedContainers {
if _, ok := cake.ContainersRunning[id]; !(ok) {
t.Logf("Expected containers running %v, but Cake had %v", expectedContainers, cake.ContainersRunning)
t.Fail()
}
}
} | explode_data.jsonl/49467 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 287
} | [
2830,
3393,
6727,
31992,
45217,
2568,
11216,
2623,
3273,
832,
1155,
353,
8840,
836,
8,
341,
42400,
74632,
1669,
3056,
917,
4913,
64,
497,
330,
66,
63159,
14440,
74632,
284,
2915,
12805,
353,
28648,
2959,
11716,
8,
3056,
9242,
33672,
341... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetVecfAt(t *testing.T) {
var cases = []struct {
m Mat
expectedSize int
}{
{NewMatWithSize(1, 1, MatTypeCV8UC1), 1},
{NewMatWithSize(1, 1, MatTypeCV8UC2), 2},
{NewMatWithSize(1, 1, MatTypeCV8UC3), 3},
{NewMatWithSize(1, 1, MatTypeCV8UC4), 4},
}
for _, c := range cases {
vec := c.m.GetVecfAt(0, 0)
if len := len(vec); len != c.expectedSize {
t.Errorf("TestGetVecfAt: expected %d, got: %d.", c.expectedSize, len)
}
}
} | explode_data.jsonl/81760 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 226
} | [
2830,
3393,
1949,
10050,
69,
1655,
1155,
353,
8840,
836,
8,
341,
2405,
5048,
284,
3056,
1235,
341,
197,
2109,
310,
6867,
198,
197,
42400,
1695,
526,
198,
197,
59403,
197,
197,
90,
3564,
11575,
2354,
1695,
7,
16,
11,
220,
16,
11,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestGetSetOverwrite(t *testing.T) {
var r Record
ip := IP4{192, 168, 0, 3}
r.Set(ip)
ip2 := IP4{192, 168, 0, 4}
r.Set(ip2)
var ip3 IP4
require.NoError(t, r.Load(&ip3))
assert.Equal(t, ip2, ip3)
} | explode_data.jsonl/39492 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 107
} | [
2830,
3393,
1949,
1649,
1918,
4934,
1155,
353,
8840,
836,
8,
341,
2405,
435,
13583,
271,
46531,
1669,
6790,
19,
90,
16,
24,
17,
11,
220,
16,
21,
23,
11,
220,
15,
11,
220,
18,
532,
7000,
4202,
23443,
692,
46531,
17,
1669,
6790,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSecureNaming(t *testing.T) {
framework.NewTest(t).
Features("security.peer.secure-naming").
Run(func(ctx framework.TestContext) {
istioCfg := istio.DefaultConfigOrFail(t, ctx)
testNamespace := namespace.NewOrFail(t, ctx, namespace.Config{
Prefix: "secure-naming",
Inject: true,
})
namespace.ClaimOrFail(t, ctx, istioCfg.SystemNamespace)
// Check that the CA certificate in the configmap of each namespace is as expected, which
// is used for data plane to control plane TLS authentication.
retry.UntilSuccessOrFail(t, func() error {
return checkCACert(ctx, t, testNamespace)
}, retry.Delay(time.Second), retry.Timeout(10*time.Second))
var a, b echo.Instance
echoboot.NewBuilder(ctx).
With(&a, util.EchoConfig("a", testNamespace, false, nil, nil)).
With(&b, util.EchoConfig("b", testNamespace, false, nil, nil)).
BuildOrFail(t)
ctx.NewSubTest("mTLS cert validation with plugin CA").
Run(func(ctx framework.TestContext) {
// Verify that the certificate issued to the sidecar is as expected.
connectTarget := fmt.Sprintf("b.%s:80", testNamespace.Name())
out, err := cert.DumpCertFromSidecar(testNamespace, "app=a", "istio-proxy",
connectTarget)
if err != nil {
t.Fatalf("failed to dump certificate: %v", err)
return
}
verifyCertificatesWithPluginCA(t, out)
// Verify mTLS works between a and b
callOptions := echo.CallOptions{
Target: b,
PortName: "http",
Scheme: scheme.HTTP,
}
checker := connection.Checker{
From: a,
Options: callOptions,
ExpectSuccess: true,
}
checker.CheckOrFail(ctx)
})
secureNamingTestCases := []struct {
name string
destinationRule string
expectSuccess bool
}{
{
name: "connection fails when DR doesn't match SA",
destinationRule: defaultIdentityDR,
expectSuccess: false,
},
{
name: "connection succeeds when DR matches SA",
destinationRule: correctIdentityDR,
expectSuccess: true,
},
{
name: "connection fails when DR contains non-matching, non-existing SA",
destinationRule: nonExistIdentityDR,
expectSuccess: false,
},
{
name: "connection succeeds when SA is in the list of SANs",
destinationRule: identityListDR,
expectSuccess: true,
},
}
for _, tc := range secureNamingTestCases {
ctx.NewSubTest(tc.name).
Run(func(ctx framework.TestContext) {
dr := strings.ReplaceAll(tc.destinationRule, "NS", testNamespace.Name())
ctx.Config().ApplyYAMLOrFail(t, testNamespace.Name(), dr)
// Verify mTLS works between a and b
callOptions := echo.CallOptions{
Target: b,
PortName: "http",
Scheme: scheme.HTTP,
}
checker := connection.Checker{
From: a,
Options: callOptions,
ExpectSuccess: tc.expectSuccess,
}
if err := retry.UntilSuccess(
checker.Check, retry.Delay(time.Second), retry.Timeout(15*time.Second), retry.Converge(5)); err != nil {
ctx.Fatal(err)
}
})
}
})
} | explode_data.jsonl/81636 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1433
} | [
2830,
3393,
49813,
85410,
1155,
353,
8840,
836,
8,
341,
1166,
5794,
7121,
2271,
1155,
4292,
197,
197,
21336,
445,
17039,
72864,
54694,
552,
5279,
6469,
38609,
197,
85952,
18552,
7502,
12626,
8787,
1972,
8,
341,
298,
197,
380,
815,
42467... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMoveFileBackupDir(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t)
defer r.Finalise()
if !operations.CanServerSideMove(r.Fremote) {
t.Skip("Skipping test as remote does not support server-side move or copy")
}
oldBackupDir := ci.BackupDir
ci.BackupDir = r.FremoteName + "/backup"
defer func() {
ci.BackupDir = oldBackupDir
}()
file1 := r.WriteFile("dst/file1", "file1 contents", t1)
fstest.CheckItems(t, r.Flocal, file1)
file1old := r.WriteObject(ctx, "dst/file1", "file1 contents old", t1)
fstest.CheckItems(t, r.Fremote, file1old)
err := operations.MoveFile(ctx, r.Fremote, r.Flocal, file1.Path, file1.Path)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal)
file1old.Path = "backup/dst/file1"
fstest.CheckItems(t, r.Fremote, file1old, file1)
} | explode_data.jsonl/51941 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 348
} | [
2830,
3393,
9860,
1703,
56245,
6184,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
2266,
19047,
741,
1444,
72,
1669,
8619,
2234,
2648,
7502,
340,
7000,
1669,
48434,
477,
7121,
6727,
1155,
340,
16867,
435,
991,
977,
1064,
741,
743,
753,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestKeepalive(t *testing.T) {
c := context.New(t, defaultMTU)
defer c.Cleanup()
c.CreateConnected(789, 30000, nil)
c.EP.SetSockOpt(tcpip.KeepaliveIdleOption(10 * time.Millisecond))
c.EP.SetSockOpt(tcpip.KeepaliveIntervalOption(10 * time.Millisecond))
c.EP.SetSockOpt(tcpip.KeepaliveCountOption(5))
c.EP.SetSockOpt(tcpip.KeepaliveEnabledOption(1))
// 5 unacked keepalives are sent. ACK each one, and check that the
// connection stays alive after 5.
for i := 0; i < 10; i++ {
b := c.GetPacket()
checker.IPv4(t, b,
checker.TCP(
checker.DstPort(context.TestPort),
checker.SeqNum(uint32(c.IRS)),
checker.AckNum(uint32(790)),
checker.TCPFlags(header.TCPFlagAck),
),
)
// Acknowledge the keepalive.
c.SendPacket(nil, &context.Headers{
SrcPort: context.TestPort,
DstPort: c.Port,
Flags: header.TCPFlagAck,
SeqNum: 790,
AckNum: c.IRS,
RcvWnd: 30000,
})
}
// Check that the connection is still alive.
if _, _, err := c.EP.Read(nil); err != tcpip.ErrWouldBlock {
t.Fatalf("got c.EP.Read(nil) = %v, want = %v", err, tcpip.ErrWouldBlock)
}
// Send some data and wait before ACKing it. Keepalives should be disabled
// during this period.
view := buffer.NewView(3)
if _, _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil {
t.Fatalf("Write failed: %v", err)
}
next := uint32(c.IRS) + 1
checker.IPv4(t, c.GetPacket(),
checker.PayloadLen(len(view)+header.TCPMinimumSize),
checker.TCP(
checker.DstPort(context.TestPort),
checker.SeqNum(next),
checker.AckNum(790),
checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
),
)
// Wait for the packet to be retransmitted. Verify that no keepalives
// were sent.
checker.IPv4(t, c.GetPacket(),
checker.PayloadLen(len(view)+header.TCPMinimumSize),
checker.TCP(
checker.DstPort(context.TestPort),
checker.SeqNum(next),
checker.AckNum(790),
checker.TCPFlags(header.TCPFlagAck|header.TCPFlagPsh),
),
)
c.CheckNoPacket("Keepalive packet received while unACKed data is pending")
next += uint32(len(view))
// Send ACK. Keepalives should start sending again.
c.SendPacket(nil, &context.Headers{
SrcPort: context.TestPort,
DstPort: c.Port,
Flags: header.TCPFlagAck,
SeqNum: 790,
AckNum: seqnum.Value(next),
RcvWnd: 30000,
})
// Now receive 5 keepalives, but don't ACK them. The connection
// should be reset after 5.
for i := 0; i < 5; i++ {
b := c.GetPacket()
checker.IPv4(t, b,
checker.TCP(
checker.DstPort(context.TestPort),
checker.SeqNum(uint32(next-1)),
checker.AckNum(uint32(790)),
checker.TCPFlags(header.TCPFlagAck),
),
)
}
// The connection should be terminated after 5 unacked keepalives.
checker.IPv4(t, c.GetPacket(),
checker.TCP(
checker.DstPort(context.TestPort),
checker.SeqNum(uint32(next)),
checker.AckNum(uint32(790)),
checker.TCPFlags(header.TCPFlagAck|header.TCPFlagRst),
),
)
if _, _, err := c.EP.Read(nil); err != tcpip.ErrConnectionReset {
t.Fatalf("got c.EP.Read(nil) = %v, want = %v", err, tcpip.ErrConnectionReset)
}
} | explode_data.jsonl/22333 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1396
} | [
2830,
3393,
19434,
50961,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
2266,
7121,
1155,
11,
1638,
8505,
52,
340,
16867,
272,
727,
60639,
2822,
1444,
7251,
21146,
7,
22,
23,
24,
11,
220,
18,
15,
15,
15,
15,
11,
2092,
692,
1444,
5142,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestOnbuild(t *testing.T) {
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
err := onbuild(b, []string{"ADD", ".", "/app/src"}, nil, "ONBUILD ADD . /app/src")
if err != nil {
t.Fatalf("Error should be empty, got: %s", err.Error())
}
expectedOnbuild := "ADD . /app/src"
if b.runConfig.OnBuild[0] != expectedOnbuild {
t.Fatalf("Wrong ONBUILD command. Expected: %s, got: %s", expectedOnbuild, b.runConfig.OnBuild[0])
}
} | explode_data.jsonl/28278 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 184
} | [
2830,
3393,
1925,
5834,
1155,
353,
8840,
836,
8,
341,
2233,
1669,
609,
3297,
90,
11161,
25,
609,
33,
9195,
22655,
1598,
2648,
25,
609,
3586,
10753,
22655,
11156,
33441,
25,
830,
630,
9859,
1669,
389,
5834,
1883,
11,
3056,
917,
4913,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestOldPass(t *testing.T) {
scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2}
vectors := []struct {
pass string
out string
}{
{" pass", "47575c5a435b4251"},
{"pass ", "47575c5a435b4251"},
{"123\t456", "575c47505b5b5559"},
{"C0mpl!ca ted#PASS123", "5d5d554849584a45"},
}
for _, tuple := range vectors {
ours := scrambleOldPassword(scramble, []byte(tuple.pass))
if tuple.out != fmt.Sprintf("%x", ours) {
t.Errorf("Failed old password %q", tuple.pass)
}
}
} | explode_data.jsonl/71463 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 222
} | [
2830,
3393,
18284,
12187,
1155,
353,
8840,
836,
8,
341,
29928,
2396,
891,
1669,
3056,
3782,
90,
24,
11,
220,
23,
11,
220,
22,
11,
220,
21,
11,
220,
20,
11,
220,
19,
11,
220,
18,
11,
220,
17,
532,
5195,
10605,
1669,
3056,
1235,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestConvertVLabsContainerService(t *testing.T) {
vlabsCS := &vlabs.ContainerService{
Location: "westus2",
Plan: &vlabs.ResourcePurchasePlan{
Name: "fooPlan",
PromotionCode: "fooPromoCode",
Product: "fooProduct",
Publisher: "fooPublisher",
},
Tags: map[string]string{
"foo": "bar",
},
Properties: &vlabs.Properties{
ProvisioningState: vlabs.Succeeded,
OrchestratorProfile: &vlabs.OrchestratorProfile{
OrchestratorType: DCOS,
DcosConfig: &vlabs.DcosConfig{
DcosBootstrapURL: "SampleDcosBootstrapURL",
DcosWindowsBootstrapURL: "SampleWindowsDcosBootstrapURL",
Registry: "SampleRegistry",
RegistryPass: "SampleRegistryPass",
RegistryUser: "SampleRegistryUser",
DcosClusterPackageListID: "SampleDcosClusterPackageListID",
DcosProviderPackageID: "SampleDcosProviderPackageID",
BootstrapProfile: &vlabs.BootstrapProfile{
VMSize: "Standard_Ds1_v1",
OSDiskSizeGB: 256,
OAuthEnabled: true,
StaticIP: "172.0.0.1",
Subnet: "255.255.255.0",
},
},
},
WindowsProfile: &vlabs.WindowsProfile{
AdminUsername: "sampleAdminUsername",
AdminPassword: "sampleAdminPassword",
},
AgentPoolProfiles: []*vlabs.AgentPoolProfile{
{
Name: "sampleagent",
Count: 2,
VMSize: "Standard_DS1_v1",
DNSPrefix: "blueorange",
FQDN: "blueorange.westus2.azureapp.com",
OSType: "Linux",
},
{
Name: "sampleAgent-public",
Count: 2,
VMSize: "sampleVM",
DNSPrefix: "blueorange",
FQDN: "blueorange.westus2.com",
OSType: "Linux",
ImageRef: &vlabs.ImageReference{
Name: "testImage",
ResourceGroup: "testRg",
SubscriptionID: "testSub",
Gallery: "testGallery",
Version: "0.0.1",
},
},
},
MasterProfile: &vlabs.MasterProfile{
Count: 1,
PreProvisionExtension: &vlabs.Extension{
Name: "fooExtension",
SingleOrAll: "All",
Template: "{{foobar}}",
},
ImageRef: &vlabs.ImageReference{
Name: "FooImageRef",
ResourceGroup: "FooImageRefResourceGroup",
},
Extensions: []vlabs.Extension{
{
Name: "sampleExtension",
SingleOrAll: "single",
Template: "{{foobar}}",
},
},
},
CertificateProfile: &vlabs.CertificateProfile{
CaCertificate: "SampleCACert",
CaPrivateKey: "SampleCAPrivateKey",
APIServerCertificate: "SampleAPIServerCert",
APIServerPrivateKey: "SampleAPIServerPrivateKey",
ClientCertificate: "SampleClientCert",
ClientPrivateKey: "SampleClientPrivateKey",
KubeConfigCertificate: "SampleKubeConfigCert",
KubeConfigPrivateKey: "SampleKubeConfigPrivateKey",
EtcdClientCertificate: "SampleEtcdClientCert",
EtcdClientPrivateKey: "SampleEtcdClientPrivateKey",
EtcdServerCertificate: "SampleEtcdServerCert",
EtcdServerPrivateKey: "SampleEtcdServerPrivateKey",
},
FeatureFlags: &vlabs.FeatureFlags{
EnableCSERunInBackground: true,
BlockOutboundInternet: false,
EnableTelemetry: false,
},
AADProfile: &vlabs.AADProfile{
ClientAppID: "SampleClientAppID",
ServerAppID: "ServerAppID",
TenantID: "SampleTenantID",
AdminGroupID: "SampleAdminGroupID",
},
ExtensionProfiles: []*vlabs.ExtensionProfile{
{
Name: "fooExtension",
Version: "fooVersion",
ExtensionParameters: "fooExtensionParameters",
ExtensionParametersKeyVaultRef: &vlabs.KeyvaultSecretRef{
VaultID: "fooVaultID",
SecretName: "fooSecretName",
SecretVersion: "fooSecretVersion",
},
RootURL: "fooRootURL",
Script: "fooSsript",
URLQuery: "fooURL",
},
},
LinuxProfile: &vlabs.LinuxProfile{
AdminUsername: "azureuser",
Secrets: []vlabs.KeyVaultSecrets{
{
SourceVault: &vlabs.KeyVaultID{
ID: "sampleKeyVaultID",
},
VaultCertificates: []vlabs.KeyVaultCertificate{
{
CertificateURL: "FooCertURL",
CertificateStore: "BarCertStore",
},
},
},
},
CustomNodesDNS: &vlabs.CustomNodesDNS{
DNSServer: "SampleDNSServer",
},
CustomSearchDomain: &vlabs.CustomSearchDomain{
Name: "FooCustomSearchDomain",
RealmUser: "sampleRealmUser",
RealmPassword: "sampleRealmPassword",
},
},
},
}
apiCs, err := ConvertVLabsContainerService(vlabsCS, false)
if apiCs == nil {
t.Error("unexpected nil output while executing ConvertVLabsContainerService")
}
if err != nil {
t.Errorf("unexpected error while executing ConvertVLabsContainerService: %s", err.Error())
}
//Test Vlabs with Kubernetes Orchestrator
vlabsCS.Properties.OrchestratorProfile.OrchestratorType = Kubernetes
vlabsCS.Properties.OrchestratorProfile.DcosConfig = nil
vlabsCS.Properties.OrchestratorProfile.KubernetesConfig = &vlabs.KubernetesConfig{
Addons: []vlabs.KubernetesAddon{
{
Name: "sampleAddon",
Enabled: to.BoolPtr(true),
Containers: []vlabs.KubernetesContainerSpec{
{
Name: "sampleK8sContainer",
Image: "sampleK8sImage",
MemoryRequests: "20Mi",
CPURequests: "10m",
},
},
Config: map[string]string{
"sampleKey": "sampleVal",
},
},
},
APIServerConfig: map[string]string{
"sampleAPIServerKey": "sampleAPIServerVal",
},
ControllerManagerConfig: map[string]string{
"sampleCMKey": "sampleCMVal",
},
CloudControllerManagerConfig: map[string]string{
"sampleCCMKey": "sampleCCMVal",
},
SchedulerConfig: map[string]string{
"sampleSchedulerKey": "sampleSchedulerVal",
},
PrivateCluster: &vlabs.PrivateCluster{
Enabled: to.BoolPtr(true),
JumpboxProfile: &vlabs.PrivateJumpboxProfile{
Name: "sampleJumpboxProfile",
VMSize: "Standard_DS1_v2",
OSDiskSizeGB: 512,
Username: "userName",
PublicKey: ValidSSHPublicKey,
StorageProfile: StorageAccount,
},
},
PodSecurityPolicyConfig: map[string]string{
"samplePSPConfigKey": "samplePSPConfigVal",
},
}
apiCs, err = ConvertVLabsContainerService(vlabsCS, false)
if apiCs == nil {
t.Error("unexpected nil output while executing ConvertVLabsContainerService")
}
if err != nil {
t.Errorf("unexpected error while executing ConvertVLabsContainerService: %s", err.Error())
}
} | explode_data.jsonl/34635 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 3046
} | [
2830,
3393,
12012,
30698,
3435,
4502,
1860,
1155,
353,
8840,
836,
8,
341,
5195,
70271,
6412,
1669,
609,
14536,
3435,
33672,
1860,
515,
197,
197,
4707,
25,
330,
11039,
355,
17,
756,
197,
197,
20485,
25,
609,
14536,
3435,
20766,
42841,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestClient_Create_Item(t *testing.T) {
dat, err := getTestStructure()
require.NoError(t, err)
cfg := config.Spec{
Name: "azure-storage-files",
Kind: "azure.storage.files",
Properties: map[string]string{
"storage_access_key": dat.storageAccessKey,
"storage_account": dat.storageAccount,
},
}
tests := []struct {
name string
request *types.Request
wantErr bool
}{
{
name: "valid create item",
request: types.NewRequest().
SetMetadataKeyValue("method", "create").
SetMetadataKeyValue("service_url", dat.serviceURL).
SetData(dat.file),
wantErr: false,
}, {
name: "valid create item with metadata",
request: types.NewRequest().
SetMetadataKeyValue("method", "create").
SetMetadataKeyValue("file_metadata", `{"tag":"test","name":"myname"}`).
SetMetadataKeyValue("service_url", dat.serviceURLWithMeta).
SetData(dat.file),
wantErr: false,
}, {
name: "invalid create item - missing service url",
request: types.NewRequest().
SetMetadataKeyValue("method", "create").
SetData(dat.file),
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
c := New()
err = c.Init(ctx, cfg, nil)
require.NoError(t, err)
got, err := c.Do(ctx, tt.request)
if tt.wantErr {
require.Error(t, err)
t.Logf("init() error = %v, wantSetErr %v", err, tt.wantErr)
return
}
require.NoError(t, err)
require.NotNil(t, got)
})
}
} | explode_data.jsonl/25066 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 659
} | [
2830,
3393,
2959,
34325,
27518,
1155,
353,
8840,
836,
8,
341,
2698,
266,
11,
1848,
1669,
633,
2271,
22952,
741,
17957,
35699,
1155,
11,
1848,
340,
50286,
1669,
2193,
36473,
515,
197,
21297,
25,
330,
39495,
62795,
46048,
756,
197,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_query_Query(t *testing.T) {
type fields struct {
session session.ServiceFormatter
}
type args struct {
querier Querier
}
tests := []struct {
name string
fields fields
args args
want *sfdc.Record
wantErr bool
}{
{
name: "Request Error",
fields: fields{
session: &mockSessionFormatter{
url: "123://wrong",
},
},
args: args{
querier: &mockQuery{
sobject: "Account",
fields: []string{
"Name",
"Email",
},
id: "SomeID",
},
},
want: nil,
wantErr: true,
},
{
name: "Response HTTP Error No JSON",
fields: fields{
session: &mockSessionFormatter{
url: "https://test.salesforce.com",
client: mockHTTPClient(func(req *http.Request) *http.Response {
return &http.Response{
StatusCode: 500,
Status: "Some Status",
Body: ioutil.NopCloser(strings.NewReader("resp")),
Header: make(http.Header),
}
}),
},
},
args: args{
querier: &mockQuery{
sobject: "Account",
fields: []string{
"Name",
"Email",
},
id: "SomeID",
},
},
want: nil,
wantErr: true,
},
{
name: "Response HTTP Error JSON",
fields: fields{
session: &mockSessionFormatter{
url: "https://test.salesforce.com",
client: mockHTTPClient(func(req *http.Request) *http.Response {
resp := `
[
{
"message" : "Email: invalid email address: Not a real email address",
"errorCode" : "INVALID_EMAIL_ADDRESS"
}
]`
return &http.Response{
StatusCode: 500,
Status: "Some Status",
Body: ioutil.NopCloser(strings.NewReader(resp)),
Header: make(http.Header),
}
}),
},
},
args: args{
querier: &mockQuery{
sobject: "Account",
fields: []string{
"Name",
"Email",
},
id: "SomeID",
},
},
want: nil,
wantErr: true,
},
{
name: "Response JSON Error",
fields: fields{
session: &mockSessionFormatter{
url: "https://test.salesforce.com",
client: mockHTTPClient(func(req *http.Request) *http.Response {
resp := `
{`
return &http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(strings.NewReader(resp)),
Header: make(http.Header),
}
}),
},
},
args: args{
querier: &mockQuery{
sobject: "Account",
fields: []string{
"Name",
"Email",
},
id: "SomeID",
},
},
want: nil,
wantErr: true,
},
{
name: "Response Passing",
fields: fields{
session: &mockSessionFormatter{
url: "https://test.salesforce.com",
client: mockHTTPClient(func(req *http.Request) *http.Response {
resp := `
{
"AccountNumber" : "CD656092",
"BillingPostalCode" : "27215"
}`
return &http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(strings.NewReader(resp)),
Header: make(http.Header),
}
}),
},
},
args: args{
querier: &mockQuery{
sobject: "Account",
fields: []string{
"AccountNumber",
"BillingPostalCode",
},
id: "SomeID",
},
},
want: testNewRecord([]byte(`
{
"AccountNumber" : "CD656092",
"BillingPostalCode" : "27215"
}`)),
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
q := &query{
session: tt.fields.session,
}
got, err := q.callout(tt.args.querier)
if (err != nil) != tt.wantErr {
t.Errorf("query.Query() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("query.Query() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/52250 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2003
} | [
2830,
3393,
5738,
48042,
1155,
353,
8840,
836,
8,
341,
13158,
5043,
2036,
341,
197,
25054,
3797,
13860,
14183,
198,
197,
532,
13158,
2827,
2036,
341,
197,
197,
15959,
1268,
3406,
261,
1268,
198,
197,
532,
78216,
1669,
3056,
1235,
341,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestWalRepair(t *testing.T) {
var enc RecordEncoder
for name, test := range map[string]struct {
corrFunc func(rec []byte) []byte // Func that applies the corruption to a record.
rec []byte
totalRecs int
expRecs int
}{
"invalid_record": {
func(rec []byte) []byte {
rec[0] = byte(RecordInvalid)
return rec
},
enc.Series([]RefSeries{{Ref: 1, Labels: labels.FromStrings("a", "b")}}, []byte{}),
9,
5,
},
"decode_series": {
func(rec []byte) []byte {
return rec[:3]
},
enc.Series([]RefSeries{{Ref: 1, Labels: labels.FromStrings("a", "b")}}, []byte{}),
9,
5,
},
"decode_samples": {
func(rec []byte) []byte {
return rec[:3]
},
enc.Samples([]RefSample{{Ref: 0, T: 99, V: 1}}, []byte{}),
9,
5,
},
"decode_tombstone": {
func(rec []byte) []byte {
return rec[:3]
},
enc.Tombstones([]Stone{{ref: 1, intervals: Intervals{}}}, []byte{}),
9,
5,
},
} {
t.Run(name, func(t *testing.T) {
dir, err := ioutil.TempDir("", "wal_head_repair")
testutil.Ok(t, err)
defer func() {
testutil.Ok(t, os.RemoveAll(dir))
}()
w, err := wal.New(nil, nil, dir)
testutil.Ok(t, err)
defer w.Close()
for i := 1; i <= test.totalRecs; i++ {
// At this point insert a corrupted record.
if i-1 == test.expRecs {
testutil.Ok(t, w.Log(test.corrFunc(test.rec)))
continue
}
testutil.Ok(t, w.Log(test.rec))
}
h, err := NewHead(nil, nil, w, 1)
testutil.Ok(t, err)
testutil.Equals(t, 0.0, prom_testutil.ToFloat64(h.metrics.walCorruptionsTotal))
testutil.Ok(t, h.Init(math.MinInt64))
testutil.Equals(t, 1.0, prom_testutil.ToFloat64(h.metrics.walCorruptionsTotal))
sr, err := wal.NewSegmentsReader(dir)
testutil.Ok(t, err)
defer sr.Close()
r := wal.NewReader(sr)
var actRec int
for r.Next() {
actRec++
}
testutil.Ok(t, r.Err())
testutil.Equals(t, test.expRecs, actRec, "Wrong number of intact records")
})
}
} | explode_data.jsonl/38173 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 973
} | [
2830,
3393,
88298,
98386,
1155,
353,
8840,
836,
8,
341,
2405,
3209,
13583,
19921,
198,
2023,
829,
11,
1273,
1669,
2088,
2415,
14032,
60,
1235,
341,
197,
1444,
17391,
9626,
220,
2915,
20635,
3056,
3782,
8,
3056,
3782,
442,
18016,
429,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDashboardsAsConfig(t *testing.T) {
Convey("Dashboards as configuration", t, func() {
logger := log.New("test-logger")
Convey("Can read config file version 1 format", func() {
cfgProvider := configReader{path: simpleDashboardConfig, log: logger}
cfg, err := cfgProvider.readConfig()
So(err, ShouldBeNil)
validateDashboardAsConfig(t, cfg)
})
Convey("Can read config file in version 0 format", func() {
cfgProvider := configReader{path: oldVersion, log: logger}
cfg, err := cfgProvider.readConfig()
So(err, ShouldBeNil)
validateDashboardAsConfig(t, cfg)
})
Convey("Should skip invalid path", func() {
cfgProvider := configReader{path: "/invalid-directory", log: logger}
cfg, err := cfgProvider.readConfig()
if err != nil {
t.Fatalf("readConfig return an error %v", err)
}
So(len(cfg), ShouldEqual, 0)
})
Convey("Should skip broken config files", func() {
cfgProvider := configReader{path: brokenConfigs, log: logger}
cfg, err := cfgProvider.readConfig()
if err != nil {
t.Fatalf("readConfig return an error %v", err)
}
So(len(cfg), ShouldEqual, 0)
})
})
} | explode_data.jsonl/63950 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 429
} | [
2830,
3393,
42263,
19270,
2121,
2648,
1155,
353,
8840,
836,
8,
341,
93070,
5617,
445,
42263,
19270,
438,
6546,
497,
259,
11,
2915,
368,
341,
197,
17060,
1669,
1487,
7121,
445,
1944,
12,
9786,
5130,
197,
93070,
5617,
445,
6713,
1349,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReplaceDomainsConfig(t *testing.T) {
assert := assertlib.New(t)
globalConfig := viper.New()
globalConfig.Set("domains", []map[string]interface{}{{"domains": []string{"localhost", "other"}}})
application, _ := New()
application.ReplaceDomainsConfig(globalConfig)
expected := []domain.ConfigItem{{
Domains: []string{"localhost", "other"},
AllUsersGroup: 0,
TempUsersGroup: 0,
}}
config, _ := DomainsConfig(application.Config)
assert.Equal(expected, config)
// not tested: that it is been pushed to the API
} | explode_data.jsonl/61949 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 183
} | [
2830,
3393,
23107,
74713,
2648,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
2740,
7121,
1155,
340,
18842,
2648,
1669,
95132,
7121,
741,
18842,
2648,
4202,
445,
59621,
497,
3056,
2186,
14032,
31344,
6257,
2979,
1,
59621,
788,
3056,
9... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLink(t *testing.T) {
t.Run("default", func(t *testing.T) {
c := New("foo-bar-baz")
l := c.Link("http://www.example.net/article", "fantasy").SetVersion("1.2.3")
expected := "https://chatbase.com/r?api_key=foo-bar-baz&platform=fantasy&url=http%3A%2F%2Fwww.example.net%2Farticle&version=1.2.3"
href, err := l.Encode()
if err != nil {
t.Errorf("Unexpected error %v", err)
}
if expected != href {
t.Errorf("Expected %v, got %v", expected, href)
}
})
} | explode_data.jsonl/41282 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 215
} | [
2830,
3393,
3939,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
2258,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
1444,
1669,
1532,
445,
7975,
15773,
1455,
1370,
1138,
197,
8810,
1669,
272,
22534,
445,
1254,
1110,
2136,
7724,
5071... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestInvalidFields(t *testing.T) {
_, err := Load("testdata/invalid_config.yml")
assert.EqualError(t, err, "yaml: unmarshal errors:\n line 2: field invalid_yaml not found in type config.Build")
} | explode_data.jsonl/38052 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 73
} | [
2830,
3393,
7928,
8941,
1155,
353,
8840,
836,
8,
341,
197,
6878,
1848,
1669,
8893,
445,
92425,
14,
11808,
5332,
33936,
1138,
6948,
12808,
1454,
1155,
11,
1848,
11,
330,
41466,
25,
650,
27121,
5975,
7190,
77,
220,
1555,
220,
17,
25,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestToken_lexKeyword(t *testing.T) {
tests := []struct {
keyword bool
value string
}{
{
keyword: true,
value: "select ",
},
{
keyword: true,
value: "from",
},
{
keyword: true,
value: "as",
},
{
keyword: true,
value: "SELECT",
},
{
keyword: true,
value: "into",
},
// false tests
{
keyword: false,
value: " into",
},
{
keyword: false,
value: "flubbrety",
},
}
for _, test := range tests {
tok, _, ok := lexKeyword(test.value, cursor{})
assert.Equal(t, test.keyword, ok, test.value)
if ok {
test.value = strings.TrimSpace(test.value)
assert.Equal(t, strings.ToLower(test.value), tok.Value, test.value)
}
}
} | explode_data.jsonl/60003 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 360
} | [
2830,
3393,
3323,
74547,
34481,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
197,
19863,
1807,
198,
197,
16309,
256,
914,
198,
197,
59403,
197,
197,
515,
298,
197,
19863,
25,
830,
345,
298,
16309,
25,
256,
330,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestScanExpiration(t *testing.T) {
var at []int64
var mkeys [][]byte
// setUp fill the expiration list
now := Now()
setUp := func() {
txn, err := mockDB.Begin()
assert.NoError(t, err)
// cleanup the keys left by other tests(TODO these dirty data should be deleted where it is generated)
iter, err := txn.t.Iter(expireKeyPrefix, nil)
assert.NoError(t, err)
defer iter.Close()
for iter.Valid() && iter.Key().HasPrefix(expireKeyPrefix) {
txn.t.Delete(iter.Key())
iter.Next()
}
for i := 0; i < 10; i++ {
ts := now - 10 + int64(i)*int64(time.Second)
mkey := MetaKey(txn.db, []byte(fmt.Sprintf("expire_key_%d", i)))
err := expireAt(txn.t, mkey, mkey, ObjectString, 0, ts)
assert.NoError(t, err)
at = append(at, ts)
mkeys = append(mkeys, mkey)
}
assert.NoError(t, txn.Commit(context.Background()))
}
tearDown := func() {
txn, err := mockDB.Begin()
assert.NoError(t, err)
for i := range at {
assert.NoError(t, unExpireAt(txn.t, mkeys[i], at[i]))
}
assert.NoError(t, txn.Commit(context.Background()))
}
setUp()
type args struct {
from int64
to int64
count int64
}
type want struct {
s int // start index of the result
e int // end index of the result(not included)
}
tests := []struct {
name string
args args
want want
}{
{"escan 0 max 10", args{0, math.MaxInt64, 10}, want{0, 10}},
{"escan 0 max 1", args{0, math.MaxInt64, 1}, want{0, 1}},
{"escan 0 0 1", args{0, 0, 1}, want{0, 0}},
{"escan max max 1", args{math.MaxInt64, math.MaxInt64, 1}, want{0, 0}},
{"escan 0 max 20", args{0, math.MaxInt64, 10}, want{0, 10}},
{"escan at[2] at[8] 10", args{at[2], at[8], 10}, want{2, 8}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Log(tt.name)
txn, err := mockDB.Begin()
assert.NoError(t, err)
tses, keys, err := ScanExpiration(txn, tt.args.from, tt.args.to, tt.args.count)
assert.NoError(t, err)
assert.NoError(t, txn.Commit(context.Background()))
assert.Equal(t, tt.want.e-tt.want.s, len(tses))
assert.Equal(t, tt.want.e-tt.want.s, len(keys))
for i := range tses {
assert.Equal(t, at[tt.want.s+i], tses[i])
assert.Equal(t, mkeys[tt.want.s+i], keys[i])
}
})
}
tearDown()
} | explode_data.jsonl/52398 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1030
} | [
2830,
3393,
26570,
66301,
1155,
353,
8840,
836,
8,
341,
2405,
518,
3056,
396,
21,
19,
198,
2405,
296,
10563,
52931,
3782,
271,
197,
322,
18620,
5155,
279,
31692,
1140,
198,
80922,
1669,
4695,
741,
8196,
2324,
1669,
2915,
368,
341,
197... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestReceivedSegmentQueuing(t *testing.T) {
// This test sends 200 segments containing a few bytes each to an
// endpoint and checks that they're all received and acknowledged by
// the endpoint, that is, that none of the segments are dropped by
// internal queues.
c := context.New(t, defaultMTU)
defer c.Cleanup()
c.CreateConnected(789, 30000, nil)
// Send 200 segments.
data := []byte{1, 2, 3}
for i := 0; i < 200; i++ {
c.SendPacket(data, &context.Headers{
SrcPort: context.TestPort,
DstPort: c.Port,
Flags: header.TCPFlagAck,
SeqNum: seqnum.Value(790 + i*len(data)),
AckNum: c.IRS.Add(1),
RcvWnd: 30000,
})
}
// Receive ACKs for all segments.
last := seqnum.Value(790 + 200*len(data))
for {
b := c.GetPacket()
checker.IPv4(t, b,
checker.TCP(
checker.DstPort(context.TestPort),
checker.SeqNum(uint32(c.IRS)+1),
checker.TCPFlags(header.TCPFlagAck),
),
)
tcpHdr := header.TCP(header.IPv4(b).Payload())
ack := seqnum.Value(tcpHdr.AckNumber())
if ack == last {
break
}
if last.LessThan(ack) {
t.Fatalf("Acknowledge (%v) beyond the expected (%v)", ack, last)
}
}
} | explode_data.jsonl/22320 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 503
} | [
2830,
3393,
23260,
21086,
25776,
7471,
1155,
353,
8840,
836,
8,
341,
197,
322,
1096,
1273,
21308,
220,
17,
15,
15,
20632,
8482,
264,
2421,
5820,
1817,
311,
458,
198,
197,
322,
14887,
323,
12341,
429,
807,
2299,
678,
3949,
323,
25498,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestParser_ParseGeneralApiInfoFailed(t *testing.T) {
gopath := os.Getenv("GOPATH")
assert.NotNil(t, gopath)
p := New()
assert.Error(t, p.ParseGeneralAPIInfo("testdata/noexist.go"))
} | explode_data.jsonl/63549 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 78
} | [
2830,
3393,
6570,
77337,
15415,
6563,
1731,
9408,
1155,
353,
8840,
836,
8,
341,
3174,
35111,
1669,
2643,
64883,
445,
98733,
4827,
1138,
6948,
93882,
1155,
11,
342,
35111,
340,
3223,
1669,
1532,
741,
6948,
6141,
1155,
11,
281,
8937,
1541... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestAutocompleteEmoji(t *testing.T) {
th := Setup(t).InitBasic()
defer th.TearDown()
Client := th.Client
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.EnableCustomEmoji = true })
searchTerm1 := model.NewId()
emojis := []*model.Emoji{
{
CreatorId: th.BasicUser.Id,
Name: searchTerm1,
},
{
CreatorId: th.BasicUser.Id,
Name: "blargh_" + searchTerm1,
},
}
for idx, emoji := range emojis {
newEmoji, resp := Client.CreateEmoji(emoji, utils.CreateTestGif(t, 10, 10), "image.gif")
CheckNoError(t, resp)
emojis[idx] = newEmoji
}
remojis, resp := Client.AutocompleteEmoji(searchTerm1, "")
CheckNoError(t, resp)
CheckOKStatus(t, resp)
found1 := false
found2 := false
for _, e := range remojis {
if e.Name == emojis[0].Name {
found1 = true
}
if e.Name == emojis[1].Name {
found2 = true
}
}
assert.True(t, found1)
assert.False(t, found2)
_, resp = Client.AutocompleteEmoji("", "")
CheckBadRequestStatus(t, resp)
Client.Logout()
_, resp = Client.AutocompleteEmoji(searchTerm1, "")
CheckUnauthorizedStatus(t, resp)
} | explode_data.jsonl/26616 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 488
} | [
2830,
3393,
19602,
20104,
92731,
1155,
353,
8840,
836,
8,
341,
70479,
1669,
18626,
1155,
568,
3803,
15944,
741,
16867,
270,
836,
682,
4454,
741,
71724,
1669,
270,
11716,
271,
70479,
5105,
16689,
2648,
18552,
28272,
353,
2528,
10753,
8,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestListener(t *testing.T) {
dataDir := "etcd_data"
defer func() {
err := os.RemoveAll(dataDir)
if err != nil {
log.Println("remove failed")
}
}()
metadata := mock.NewEtcdMetadata()
metadata.DataDir = dataDir
mockEtcd := &mock.MockEtcd{}
mockEtcd.StartMockEtcd(metadata)
defer mockEtcd.StopMockEtcd()
loader := NewRemoteConfigurationLoader(props, getEtcdConfiguration())
loader.Init()
defer func() {
err := loader.Close()
log.Println(err)
}()
loader.AddRouterListener(&mockListener{})
err := modifyRouterConfig()
if err != nil {
log.Println(err)
}
assert.Nil(t, err)
active, err := loader.etcdClient.Get(loader.activeKey)
assert.Nil(t, err)
assert.NotNil(t, active)
} | explode_data.jsonl/31935 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 296
} | [
2830,
3393,
2743,
1155,
353,
8840,
836,
8,
341,
8924,
6184,
1669,
330,
295,
4385,
1769,
698,
16867,
2915,
368,
341,
197,
9859,
1669,
2643,
84427,
2592,
6184,
340,
197,
743,
1848,
961,
2092,
341,
298,
6725,
12419,
445,
5399,
4641,
1138... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGetTokenWithoutAuth(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 10})
defer clus.Terminate(t)
authapi := clus.RandClient()
var err error
var client *clientv3.Client
// make sure "auth" was disabled
if _, err = authapi.AuthDisable(context.TODO()); err != nil {
t.Fatal(err)
}
// "Username" and "Password" must be used
cfg := clientv3.Config{
Endpoints: authapi.Endpoints(),
DialTimeout: 1 * time.Second, // make sure all connection time of connect all endpoint must be more DialTimeout
Username: "root",
Password: "123",
}
client, err = clientv3.New(cfg)
if err == nil {
defer client.Close()
}
switch err {
case nil:
t.Log("passes as expected, but may be connection time less than DialTimeout")
case context.DeadlineExceeded:
t.Errorf("not expected result:%v with endpoint:%s", err, authapi.Endpoints())
case rpctypes.ErrAuthNotEnabled:
t.Logf("passes as expected:%v", err)
default:
t.Errorf("other errors:%v", err)
}
} | explode_data.jsonl/69918 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 389
} | [
2830,
3393,
1949,
3323,
26040,
5087,
1155,
353,
8840,
836,
8,
341,
16867,
1273,
1314,
36892,
2271,
1155,
692,
197,
4163,
1669,
17590,
7121,
28678,
53,
18,
1155,
11,
609,
60168,
72883,
2648,
90,
1695,
25,
220,
16,
15,
3518,
16867,
1185... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestWaitReadsPipeSourceToCompletion(t *testing.T) {
t.Parallel()
source := bytes.NewBufferString("hello")
script.NewPipe().WithReader(source).FilterLine(strings.ToUpper).Wait()
if source.Len() > 0 {
t.Errorf("incomplete read: %d bytes of input remaining: %q", source.Len(), source.String())
}
} | explode_data.jsonl/51526 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 107
} | [
2830,
3393,
14190,
4418,
82,
34077,
3608,
1249,
33190,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
47418,
1669,
5820,
7121,
4095,
703,
445,
14990,
1138,
86956,
7121,
34077,
1005,
2354,
5062,
12437,
568,
5632,
2460,
51442,
49396,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestStatefulSetControllerUpdatePodRelease(t *testing.T) {
ssc, spc, _ := newFakeStatefulSetController()
set := newStatefulSet(3)
set2 := newStatefulSet(3)
set2.Name = "foo2"
pod := newStatefulSetPod(set, 0)
spc.setsIndexer.Add(set)
spc.setsIndexer.Add(set2)
clone := *pod
clone.OwnerReferences = nil
fakeResourceVersion(&clone)
ssc.updatePod(pod, &clone)
if got, want := ssc.queue.Len(), 2; got != want {
t.Errorf("queue.Len() = %v, want %v", got, want)
}
} | explode_data.jsonl/18619 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 193
} | [
2830,
3393,
1397,
1262,
1649,
2051,
4289,
23527,
16077,
1155,
353,
8840,
836,
8,
341,
34472,
66,
11,
978,
66,
11,
716,
1669,
501,
52317,
1397,
1262,
1649,
2051,
741,
8196,
1669,
36848,
1262,
1649,
7,
18,
340,
8196,
17,
1669,
36848,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestReceiveFromFile(t *testing.T) {
const node1_HWID = "node1"
const inputType = types.InputTypeImage
const instance = types.DefaultInputInstance
const instance2 = "secondinstance"
const instance3 = "notafile"
const testFile = "../test/testImage.jpg"
const test2File = "../test/testImage.jpg"
const test3File = "~/test/doesntexist.jpg"
var fileTouched = ""
handler := func(input *types.InputDiscoveryMessage, sender string, file string) {
fileTouched = file
}
regInputs := inputs.NewRegisteredInputs(domain, publisher1ID)
iff := inputs.NewReceiveFromFiles(regInputs)
iff.Start()
addr1 := iff.CreateInput(node1_HWID, inputType, instance, testFile, handler)
assert.NotEmpty(t, addr1, "No input address")
// adding twice should return the same address
addr1b := iff.CreateInput(node1_HWID, inputType, instance, testFile, handler)
assert.Equal(t, addr1, addr1b, "Different address second add")
// second input on the same file
addr2 := iff.CreateInput(node1_HWID, inputType, instance2, test2File, handler)
assert.NotEmpty(t, addr2, "Failed with two inputs on same file")
// invalid file
addr3 := iff.CreateInput(node1_HWID, inputType, instance3, test3File, handler)
assert.Empty(t, addr3, "No error when watching non existing file")
// trigger handler on change
err := ioutil.WriteFile(testFile, []byte("Hello World"), 0644)
time.Sleep(time.Second)
assert.NoError(t, err, "Unexpected problem touching test file")
assert.NotEmpty(t, fileTouched, "Handler not called when touching file")
// no more trigger after deleting input
iff.DeleteInput(node1_HWID, inputType, instance)
fileTouched = ""
ioutil.WriteFile(testFile, []byte("Hello World again"), 0644)
time.Sleep(time.Second)
assert.Empty(t, fileTouched, "Handler not called when touching file")
input := regInputs.GetInputByNodeHWID(node1_HWID, inputType, instance)
assert.Nil(t, input, "Deleted input is still there")
// error cases
// - delete input with empty source
input = regInputs.GetInputByNodeHWID(node1_HWID, inputType, instance2)
input.Source = ""
iff.DeleteInput(node1_HWID, inputType, instance2)
input = regInputs.GetInputByNodeHWID(node1_HWID, inputType, instance2)
assert.Nil(t, input, "Deleted input2 is still there")
// - delete non existing input (it was just deleted)
iff.DeleteInput(node1_HWID, inputType, instance2)
// delete last input
iff.DeleteInput(node1_HWID, inputType, instance3)
iff.Stop()
} | explode_data.jsonl/82690 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 825
} | [
2830,
3393,
14742,
43633,
1155,
353,
8840,
836,
8,
341,
4777,
2436,
16,
44013,
915,
284,
330,
3509,
16,
698,
4777,
1946,
929,
284,
4494,
16130,
929,
1906,
198,
4777,
2867,
284,
4494,
13275,
2505,
2523,
198,
4777,
2867,
17,
284,
330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestHmhdBoxFactory_New(t *testing.T) {
data := []byte{
0x11,
}
bmfcommon.PushBytes(&data, uint16(0x22))
bmfcommon.PushBytes(&data, uint16(0x33))
bmfcommon.PushBytes(&data, uint32(0x44))
bmfcommon.PushBytes(&data, uint32(0x55))
b := []byte{}
bmfcommon.PushBox(&b, "hmnd", data)
// Parse.
sb := rifs.NewSeekableBufferWithBytes(b)
file, err := bmfcommon.NewResource(sb, int64(len(b)))
log.PanicIf(err)
box, err := file.ReadBaseBox(0)
log.PanicIf(err)
cb, _, err := hmhdBoxFactory{}.New(box)
log.PanicIf(err)
hb := cb.(*HmhdBox)
if hb.Version() != 0x11 {
t.Fatalf("Version() not correct.")
} else if hb.MaxPDUSize() != 0x22 {
t.Fatalf("MaxPDUSize() not correct: (0x%04x)", hb.MaxPDUSize())
} else if hb.AvgPDUSize() != 0x33 {
t.Fatalf("AvgPDUSize() not correct: (0x%04x)", hb.AvgPDUSize())
} else if hb.MaxBitrate() != 0x44 {
t.Fatalf("MaxBitrate() not correct: (0x%08x)", hb.MaxBitrate())
} else if hb.AvgBitrate() != 0x55 {
t.Fatalf("AvgBitrate() not correct: (0x%08x)", hb.AvgBitrate())
}
} | explode_data.jsonl/11014 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 476
} | [
2830,
3393,
39,
76,
15990,
1611,
4153,
39582,
1155,
353,
8840,
836,
8,
341,
8924,
1669,
3056,
3782,
515,
197,
197,
15,
87,
16,
16,
345,
197,
630,
2233,
28124,
5464,
34981,
7078,
2099,
691,
11,
2622,
16,
21,
7,
15,
87,
17,
17,
11... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestClientNoUserAgent(t *testing.T) {
ln := fasthttputil.NewInmemoryListener()
userAgentSeen := ""
s := &Server{
Handler: func(ctx *RequestCtx) {
userAgentSeen = string(ctx.UserAgent())
},
}
go s.Serve(ln) //nolint:errcheck
c := &Client{
NoDefaultUserAgentHeader: true,
Dial: func(addr string) (net.Conn, error) {
return ln.Dial()
},
}
req := AcquireRequest()
res := AcquireResponse()
req.SetRequestURI("http://example.com")
err := c.Do(req, res)
if err != nil {
t.Fatal(err)
}
if userAgentSeen != "" {
t.Fatalf("User-Agent wrong %q != %q", userAgentSeen, "")
}
} | explode_data.jsonl/79346 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 254
} | [
2830,
3393,
2959,
2753,
1474,
16810,
1155,
353,
8840,
836,
8,
341,
197,
2261,
1669,
4937,
96336,
628,
321,
7121,
641,
17269,
2743,
2822,
19060,
16810,
85675,
1669,
8389,
1903,
1669,
609,
5475,
515,
197,
197,
3050,
25,
2915,
7502,
353,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestWithRetry(t *testing.T) {
type test struct {
name string
rt retry.Retry
checkFunc func(Option) error
}
tests := []test{
func() test {
rt := new(mock.Retry)
return test{
name: "set success when rt is not nil",
rt: rt,
checkFunc: func(opt Option) error {
got := new(logger)
opt(got)
if !reflect.DeepEqual(got.retry, rt) {
return errors.New("invalid params was set")
}
return nil
},
}
}(),
func() test {
rt := new(mock.Retry)
return test{
name: "returns nothing when rt is not nil",
rt: nil,
checkFunc: func(opt Option) error {
got := &logger{
retry: rt,
}
opt(got)
if !reflect.DeepEqual(got.retry, rt) {
return errors.New("invalid params was set")
}
return nil
},
}
}(),
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
opt := WithRetry(tt.rt)
if err := tt.checkFunc(opt); err != nil {
t.Error(err)
}
})
}
} | explode_data.jsonl/2667 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 514
} | [
2830,
3393,
2354,
51560,
1155,
353,
8840,
836,
8,
341,
13158,
1273,
2036,
341,
197,
11609,
414,
914,
198,
197,
55060,
286,
22683,
2013,
15149,
198,
197,
25157,
9626,
2915,
7,
5341,
8,
1465,
198,
197,
630,
78216,
1669,
3056,
1944,
515,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGenerator(t *testing.T) {
manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{})
require.NoError(t, err)
require.True(t, len(manifests) >= 24, "insufficient combinations %d", len(manifests))
// this just means that the numbers reported by the test
// failures map to the test cases that you'd see locally.
e2e.SortManifests(manifests, false /* ascending */)
for idx, m := range manifests {
t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) {
numStateSyncs := 0
for name, node := range m.Nodes {
if node.StateSync != e2e.StateSyncDisabled {
numStateSyncs++
}
t.Run(name, func(t *testing.T) {
if node.StartAt > m.InitialHeight+5 && !node.Stateless() {
require.NotEqual(t, node.StateSync, e2e.StateSyncDisabled)
}
if node.StateSync != e2e.StateSyncDisabled {
require.Zero(t, node.Seeds, node.StateSync)
require.True(t, len(node.PersistentPeers) >= 2 || len(node.PersistentPeers) == 0,
"peers: %v", node.PersistentPeers)
}
})
}
require.True(t, numStateSyncs <= 2)
})
}
} | explode_data.jsonl/59802 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 460
} | [
2830,
3393,
12561,
1155,
353,
8840,
836,
8,
341,
197,
42315,
82,
11,
1848,
1669,
19813,
37595,
7121,
37595,
7121,
3608,
25110,
41471,
5731,
14566,
37790,
17957,
35699,
1155,
11,
1848,
340,
17957,
32443,
1155,
11,
2422,
60671,
6962,
82,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestReExportDefaultExternalES6(t *testing.T) {
default_suite.expectBundled(t, bundled{
files: map[string]string{
"/entry.js": `
export {default as foo} from 'foo'
export {bar} from './bar'
`,
"/bar.js": `
export {default as bar} from 'bar'
`,
},
entryPaths: []string{"/entry.js"},
options: config.Options{
Mode: config.ModeBundle,
AbsOutputFile: "/out.js",
OutputFormat: config.FormatESModule,
ExternalModules: config.ExternalModules{
NodeModules: map[string]bool{
"foo": true,
"bar": true,
},
},
},
})
} | explode_data.jsonl/38550 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 267
} | [
2830,
3393,
693,
16894,
3675,
25913,
1570,
21,
1155,
353,
8840,
836,
8,
341,
11940,
57239,
25952,
33,
1241,
832,
1155,
11,
51450,
515,
197,
74075,
25,
2415,
14032,
30953,
515,
298,
197,
3115,
4085,
2857,
788,
22074,
571,
59440,
314,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_MapCopy(t *testing.T) {
dtest.C(t, func(t *dtest.T) {
m1 := d.Map{
"k1": "v1",
}
m2 := dutil.MapCopy(m1)
m2["k2"] = "v2"
t.Assert(m1["k1"], "v1")
t.Assert(m1["k2"], nil)
t.Assert(m2["k1"], "v1")
t.Assert(m2["k2"], "v2")
})
} | explode_data.jsonl/16852 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 162
} | [
2830,
3393,
56992,
12106,
1155,
353,
8840,
836,
8,
341,
2698,
1944,
727,
1155,
11,
2915,
1155,
353,
67,
1944,
836,
8,
341,
197,
2109,
16,
1669,
294,
10104,
515,
298,
197,
62911,
16,
788,
330,
85,
16,
756,
197,
197,
532,
197,
2109,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestClusterTopologyValidationWithClient(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
g := NewWithT(t)
tests := []struct {
name string
cluster *clusterv1.Cluster
class *clusterv1.ClusterClass
objects []client.Object
wantErr bool
}{
{
name: "Accept a cluster with an existing clusterclass named in cluster.spec.topology.class",
cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1").
WithTopology(
builder.ClusterTopology().
WithClass("clusterclass").
WithVersion("v1.22.2").
WithControlPlaneReplicas(3).
Build()).
Build(),
class: builder.ClusterClass(metav1.NamespaceDefault, "clusterclass").
Build(),
wantErr: false,
},
{
name: "Reject a cluster which has a non-existent clusterclass named in cluster.spec.topology.class",
cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1").
WithTopology(
builder.ClusterTopology().
WithClass("wrongName").
WithVersion("v1.22.2").
WithControlPlaneReplicas(3).
Build()).
Build(),
class: builder.ClusterClass(metav1.NamespaceDefault, "clusterclass").
Build(),
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Sets up the fakeClient for the test case.
fakeClient := fake.NewClientBuilder().
WithObjects(tt.class).
WithScheme(fakeScheme).
Build()
// Create the webhook and add the fakeClient as its client. This is required because the test uses a Managed Topology.
c := &Cluster{Client: fakeClient}
// Checks the return error.
if tt.wantErr {
g.Expect(c.ValidateCreate(ctx, tt.cluster)).NotTo(Succeed())
} else {
g.Expect(c.ValidateCreate(ctx, tt.cluster)).To(Succeed())
}
})
}
} | explode_data.jsonl/82293 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 741
} | [
2830,
3393,
28678,
60954,
13799,
2354,
2959,
1155,
353,
8840,
836,
8,
341,
16867,
4094,
12753,
4202,
13859,
42318,
16014,
2271,
1155,
11,
4565,
1224,
973,
11,
4565,
72883,
60954,
11,
830,
8,
741,
3174,
1669,
1532,
2354,
51,
1155,
692,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_docs_update_by_query_d8b115341da772a628a024e7d1644e73(t *testing.T) {
es, _ := elasticsearch.NewDefaultClient()
// tag:d8b115341da772a628a024e7d1644e73[]
res, err := es.UpdateByQuery(
[]string{"twitter"},
es.UpdateByQuery.WithRouting("1"),
)
fmt.Println(res, err)
if err != nil { // SKIP
t.Fatalf("Error getting the response: %s", err) // SKIP
} // SKIP
defer res.Body.Close() // SKIP
// end:d8b115341da772a628a024e7d1644e73[]
} | explode_data.jsonl/78123 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 200
} | [
2830,
3393,
49692,
8882,
3710,
5738,
814,
23,
65,
16,
16,
20,
18,
19,
16,
3235,
22,
22,
17,
64,
21,
17,
23,
64,
15,
17,
19,
68,
22,
67,
16,
21,
19,
19,
68,
22,
18,
1155,
353,
8840,
836,
8,
341,
78966,
11,
716,
1669,
655,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestBasicFunctionality(t *testing.T) {
lines := []string{
"# comment",
"weather,location=us-midwest temperature=82 1465839830100400200", // basic line
"weather,location=us-midwest temperature=82u", // no timestamp
"# comment",
"weather2,location=us-midwest,source=test-source temperature=82,foo=12.3,bar=-1202.23 1465839830100400201"}
currentTime := time.Now()
actual, _ := Parse(strings.Join(lines, "\n"), currentTime)
expected := []general.Point{
{Measurement: "weather", Fields: map[string]interface{}{"temperature": 82.0}, Tags: map[string]string{"location": "us-midwest"}, Timestamp: time.Unix(0, int64(1465839830100400200))},
{Measurement: "weather", Fields: map[string]interface{}{"temperature": int64(82)}, Tags: map[string]string{"location": "us-midwest"}, Timestamp: currentTime}, // make this nil
{Measurement: "weather2", Fields: map[string]interface{}{"temperature": 82.0, "foo": 12.3, "bar": -1202.23}, Tags: map[string]string{"location": "us-midwest", "source": "test-source"}, Timestamp: time.Unix(0, int64(1465839830100400201))},
}
assert.Equal(t, expected, actual, "The two objects should be the same.")
} | explode_data.jsonl/15580 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 416
} | [
2830,
3393,
15944,
5152,
2719,
1155,
353,
8840,
836,
8,
341,
78390,
1669,
3056,
917,
515,
197,
197,
57676,
3980,
756,
197,
197,
1,
15206,
11,
2527,
28,
355,
1448,
307,
11039,
9315,
28,
23,
17,
220,
16,
19,
21,
20,
23,
18,
24,
23... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestUpdateUserAuth(t *testing.T) {
th := Setup(t)
defer th.TearDown()
team := th.CreateTeamWithClient(th.SystemAdminClient)
user := th.CreateUser()
th.LinkUserToTeam(user, team)
_, err := th.App.Srv().Store.User().VerifyEmail(user.Id, user.Email)
require.NoError(t, err)
userAuth := &model.UserAuth{}
userAuth.AuthData = user.AuthData
userAuth.AuthService = user.AuthService
userAuth.Password = user.Password
// Regular user can not use endpoint
_, respErr, _ := th.SystemAdminClient.UpdateUserAuth(user.Id, userAuth)
require.NotNil(t, respErr, "Shouldn't have permissions. Only Admins")
userAuth.AuthData = model.NewString("test@test.com")
userAuth.AuthService = model.UserAuthServiceSaml
userAuth.Password = "newpassword"
ruser, _, err := th.SystemAdminClient.UpdateUserAuth(user.Id, userAuth)
require.NoError(t, err)
// AuthData and AuthService are set, password is set to empty
require.Equal(t, *userAuth.AuthData, *ruser.AuthData)
require.Equal(t, model.UserAuthServiceSaml, ruser.AuthService)
require.Empty(t, ruser.Password)
// When AuthData or AuthService are empty, password must be valid
userAuth.AuthData = user.AuthData
userAuth.AuthService = ""
userAuth.Password = "1"
_, respErr, _ = th.SystemAdminClient.UpdateUserAuth(user.Id, userAuth)
require.NotNil(t, respErr)
// Regular user can not use endpoint
user2 := th.CreateUser()
th.LinkUserToTeam(user2, team)
_, err = th.App.Srv().Store.User().VerifyEmail(user2.Id, user2.Email)
require.NoError(t, err)
th.SystemAdminClient.Login(user2.Email, "passwd1")
userAuth.AuthData = user.AuthData
userAuth.AuthService = user.AuthService
userAuth.Password = user.Password
_, respErr, _ = th.SystemAdminClient.UpdateUserAuth(user.Id, userAuth)
require.NotNil(t, respErr, "Should have errored")
} | explode_data.jsonl/47508 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 642
} | [
2830,
3393,
4289,
1474,
5087,
1155,
353,
8840,
836,
8,
341,
70479,
1669,
18626,
1155,
340,
16867,
270,
836,
682,
4454,
2822,
197,
9196,
1669,
270,
7251,
14597,
2354,
2959,
24365,
16620,
7210,
2959,
692,
19060,
1669,
270,
7251,
1474,
282... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestConvertMetaV2ContainerStats(t *testing.T) {
stats := &v2.ContainerStats{
CPU: v2.CPUStats{
System: 3951680000000,
Usage: v2.CPUUsage{
Kernelmode: 2260000000,
Total: 9743590394,
Usermode: 7450000000,
},
},
Memory: v2.MemStats{
Details: v2.DetailedMem{
RSS: 1564672,
Cache: 65499136,
PgFault: 430478,
},
Limit: 268435456,
MaxUsage: 139751424,
Usage: 77254656,
},
IO: v2.IOStats{
BytesPerDeviceAndKind: []v2.OPStat{
{
Kind: "Read",
Major: 259,
Minor: 0,
Value: 12288,
},
{
Kind: "Write",
Major: 259,
Minor: 0,
Value: 144908288,
},
{
Kind: "Sync",
Major: 259,
Minor: 0,
Value: 8122368,
},
{
Kind: "Async",
Major: 259,
Minor: 0,
Value: 136798208,
},
{
Kind: "Total",
Major: 259,
Minor: 0,
Value: 144920576,
},
},
OPPerDeviceAndKind: []v2.OPStat{
{
Kind: "Read",
Major: 259,
Minor: 0,
Value: 3,
},
{
Kind: "Write",
Major: 259,
Minor: 0,
Value: 1618,
},
{
Kind: "Sync",
Major: 259,
Minor: 0,
Value: 514,
},
{
Kind: "Async",
Major: 259,
Minor: 0,
Value: 1107,
},
{
Kind: "Total",
Major: 259,
Minor: 0,
Value: 1621,
},
},
ReadBytes: 1024,
WriteBytes: 256,
},
Network: v2.NetStats{},
}
expectedCPU := &metrics.ContainerCPUStats{
User: 7450000000,
System: 2260000000,
SystemUsage: 3951680000000,
}
expectedMem := &metrics.ContainerMemStats{
Cache: 65499136,
MemUsageInBytes: 77254656,
Pgfault: 430478,
RSS: 1564672,
}
expectedIO := &metrics.ContainerIOStats{
ReadBytes: 1024,
WriteBytes: 256,
}
containerMetrics, memLimit := convertMetaV2ContainerStats(stats)
assert.Equal(t, expectedCPU, containerMetrics.CPU)
assert.Equal(t, expectedMem, containerMetrics.Memory)
assert.Equal(t, expectedIO, containerMetrics.IO)
assert.Equal(t, uint64(268435456), memLimit)
} | explode_data.jsonl/51432 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1147
} | [
2830,
3393,
12012,
12175,
53,
17,
4502,
16635,
1155,
353,
8840,
836,
8,
341,
79659,
1669,
609,
85,
17,
33672,
16635,
515,
197,
6258,
6325,
25,
348,
17,
727,
6325,
16635,
515,
298,
5816,
25,
220,
18,
24,
20,
16,
21,
23,
15,
15,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestKraken_CancelOrder(t *testing.T) {
r, err := k.CancelOrder("O6EAJC-YAC3C-XDEEXQ", goex.NewCurrencyPair(goex.XBT, goex.USD))
assert.Nil(t, err)
t.Log(r)
} | explode_data.jsonl/44495 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 85
} | [
2830,
3393,
81165,
3366,
97485,
4431,
1155,
353,
8840,
836,
8,
341,
7000,
11,
1848,
1669,
595,
36491,
4431,
445,
46,
21,
19112,
58367,
29137,
1706,
18,
34,
30550,
1150,
3257,
48,
497,
728,
327,
7121,
26321,
12443,
47415,
327,
4338,
17... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestDownsamplerAggregationWithOverrideRules(t *testing.T) {
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
sampleAppenderOpts: &SampleAppenderOptions{
Override: true,
OverrideRules: SamplesAppenderOverrideRules{
MappingRules: []MappingRule{
{
Aggregations: []aggregation.Type{aggregation.Mean},
Policies: []policy.StoragePolicy{
policy.MustParseStoragePolicy("4s:1d"),
},
},
},
},
},
expectedAdjusted: map[string]float64{
"gauge0": 5.0,
"counter0": 2.0,
},
autoMappingRules: []MappingRule{
{
Aggregations: []aggregation.Type{testAggregationType},
Policies: testAggregationStoragePolicies,
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
} | explode_data.jsonl/9267 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 330
} | [
2830,
3393,
4454,
82,
34501,
9042,
34442,
2354,
2177,
26008,
1155,
353,
8840,
836,
8,
341,
18185,
4454,
82,
34501,
1669,
501,
2271,
4454,
82,
34501,
1155,
11,
1273,
4454,
82,
34501,
3798,
515,
197,
1903,
1516,
2164,
1659,
43451,
25,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEncodeDecode(t *testing.T) {
p1 := testutils.GetPollWithVotes()
p2 := poll.DecodePollFromByte(p1.EncodeToByte())
assert.Equal(t, p1, p2)
} | explode_data.jsonl/30285 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 66
} | [
2830,
3393,
32535,
32564,
1155,
353,
8840,
836,
8,
341,
3223,
16,
1669,
1273,
6031,
2234,
49207,
2354,
75535,
741,
3223,
17,
1669,
7085,
56372,
49207,
3830,
7153,
1295,
16,
50217,
1249,
7153,
2398,
6948,
12808,
1155,
11,
281,
16,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestPrepareCacheDeferredFunction(t *testing.T) {
store, clean := testkit.CreateMockStore(t)
defer clean()
orgEnable := core.PreparedPlanCacheEnabled()
defer core.SetPreparedPlanCache(orgEnable)
core.SetPreparedPlanCache(true)
se, err := session.CreateSession4TestWithOpt(store, &session.Opt{
PreparedPlanCache: kvcache.NewSimpleLRUCache(100, 0.1, math.MaxUint64),
})
require.NoError(t, err)
tk := testkit.NewTestKitWithSession(t, store, se)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (id int PRIMARY KEY, c1 TIMESTAMP(3) NOT NULL DEFAULT '2019-01-14 10:43:20', KEY idx1 (c1))")
tk.MustExec("prepare sel1 from 'select id, c1 from t1 where c1 < now(3)'")
sql1 := "execute sel1"
expectedPattern := `IndexReader\(Index\(t1.idx1\)\[\[-inf,[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1]) (2[0-3]|[01][0-9]):[0-5][0-9]:[0-5][0-9].[0-9][0-9][0-9]\)\]\)->Sel\(\[lt\(test.t1.c1, now\(3\)\)\]\)`
var cnt [2]float64
var planStr [2]string
metrics.ResettablePlanCacheCounterFortTest = true
metrics.PlanCacheCounter.Reset()
counter := metrics.PlanCacheCounter.WithLabelValues("prepare")
ctx := context.TODO()
p := parser.New()
p.SetParserConfig(parser.ParserConfig{EnableWindowFunction: true, EnableStrictDoubleTypeCheck: true})
for i := 0; i < 2; i++ {
stmt, err := p.ParseOneStmt(sql1, "", "")
require.NoError(t, err)
is := tk.Session().GetInfoSchema().(infoschema.InfoSchema)
builder, _ := core.NewPlanBuilder().Init(tk.Session(), is, &hint.BlockHintProcessor{})
p, err := builder.Build(ctx, stmt)
require.NoError(t, err)
execPlan, ok := p.(*core.Execute)
require.True(t, ok)
err = executor.ResetContextOfStmt(tk.Session(), stmt)
require.NoError(t, err)
err = execPlan.OptimizePreparedPlan(ctx, tk.Session(), is)
require.NoError(t, err)
planStr[i] = core.ToString(execPlan.Plan)
require.Regexpf(t, expectedPattern, planStr[i], "for %dth %s", i, sql1)
pb := &dto.Metric{}
err = counter.Write(pb)
require.NoError(t, err)
cnt[i] = pb.GetCounter().GetValue()
require.Equal(t, float64(i), cnt[i])
time.Sleep(time.Millisecond * 10)
}
require.Lessf(t, planStr[0], planStr[1], "plan 1: %v, plan 2: %v", planStr[0], planStr[1])
} | explode_data.jsonl/5499 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 958
} | [
2830,
3393,
50590,
8233,
88417,
5152,
1155,
353,
8840,
836,
8,
341,
57279,
11,
4240,
1669,
1273,
8226,
7251,
11571,
6093,
1155,
340,
16867,
4240,
741,
87625,
11084,
1669,
6200,
28770,
7212,
20485,
8233,
5462,
741,
16867,
6200,
4202,
4703,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestCombine(t *testing.T) {
newPkgFromYmlStr := func(t *testing.T, pkgStr string) *Pkg {
t.Helper()
return newParsedPkg(t, FromString(pkgStr), EncodingYAML, ValidSkipParseError())
}
associationsEqual := func(t *testing.T, summaryLabels []SummaryLabel, names ...string) {
t.Helper()
require.Len(t, summaryLabels, len(names))
m := make(map[string]bool)
for _, n := range names {
m[n] = true
}
for _, l := range summaryLabels {
if !m[l.Name] {
assert.Fail(t, "did not find label: "+l.Name)
}
delete(m, l.Name)
}
if len(m) > 0 {
var unexpectedLabels []string
for name := range m {
unexpectedLabels = append(unexpectedLabels, name)
}
assert.Failf(t, "additional labels found", "got: %v", unexpectedLabels)
}
}
t.Run("multiple pkgs with associations across files", func(t *testing.T) {
var pkgs []*Pkg
numLabels := 5
for i := 0; i < numLabels; i++ {
pkg := newPkgFromYmlStr(t, fmt.Sprintf(`
apiVersion: %[1]s
kind: Label
metadata:
name: label-%d
`, APIVersion, i))
pkgs = append(pkgs, pkg)
}
pkgs = append(pkgs, newPkgFromYmlStr(t, fmt.Sprintf(`
apiVersion: %[1]s
kind: Bucket
metadata:
name: rucket-1
spec:
associations:
- kind: Label
name: label-1
`, APIVersion)))
pkgs = append(pkgs, newPkgFromYmlStr(t, fmt.Sprintf(`
apiVersion: %[1]s
kind: Bucket
metadata:
name: rucket-2
spec:
associations:
- kind: Label
name: label-2
`, APIVersion)))
pkgs = append(pkgs, newPkgFromYmlStr(t, fmt.Sprintf(`
apiVersion: %[1]s
kind: Bucket
metadata:
name: rucket-3
spec:
associations:
- kind: Label
name: label-1
- kind: Label
name: label-2
`, APIVersion)))
combinedPkg, err := Combine(pkgs)
require.NoError(t, err)
sum := combinedPkg.Summary()
require.Len(t, sum.Labels, numLabels)
for i := 0; i < numLabels; i++ {
assert.Equal(t, fmt.Sprintf("label-%d", i), sum.Labels[i].Name)
}
require.Len(t, sum.Labels, numLabels)
for i := 0; i < numLabels; i++ {
assert.Equal(t, fmt.Sprintf("label-%d", i), sum.Labels[i].Name)
}
require.Len(t, sum.Buckets, 3)
assert.Equal(t, "rucket-1", sum.Buckets[0].Name)
associationsEqual(t, sum.Buckets[0].LabelAssociations, "label-1")
assert.Equal(t, "rucket-2", sum.Buckets[1].Name)
associationsEqual(t, sum.Buckets[1].LabelAssociations, "label-2")
assert.Equal(t, "rucket-3", sum.Buckets[2].Name)
associationsEqual(t, sum.Buckets[2].LabelAssociations, "label-1", "label-2")
})
} | explode_data.jsonl/50596 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1101
} | [
2830,
3393,
81114,
1155,
353,
8840,
836,
8,
341,
8638,
47,
7351,
3830,
56,
1014,
2580,
1669,
2915,
1155,
353,
8840,
836,
11,
24793,
2580,
914,
8,
353,
47,
7351,
341,
197,
3244,
69282,
741,
197,
853,
501,
82959,
47,
7351,
1155,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLanguageDetection(t *testing.T) {
var langs = []struct {
Code, ELang, ERegion string
DLang, DRegion string
Detected bool
EError error
Switch int
}{
{
Switch: 1,
Code: "en_US.UTF_8",
ELang: "en",
ERegion: "US",
Detected: true,
}, {
Switch: 2,
Code: "es_CL.ISO_8859_1",
ELang: "es",
ERegion: "CL",
Detected: true,
}, {
Switch: 1,
Code: "co_CR",
ELang: "co",
ERegion: "CR",
Detected: true,
}, {
Switch: 2,
Code: "es_419.iso_8859_15",
ELang: "es",
ERegion: "419",
Detected: true,
}, {
Switch: 1,
Code: "this_is_not_valid1",
Detected: false,
EError: ErrNoLangDetected,
}, {
Switch: 1,
Code: "this_is_not_valid2",
ELang: "en",
ERegion: "US",
DLang: "en",
DRegion: "US",
Detected: false,
EError: nil,
},
}
for _, tv := range langs {
validateLang(t, tv.Switch, tv.Code, tv.DLang, tv.DRegion, tv.ELang, tv.ERegion, tv.Detected, tv.EError)
}
} | explode_data.jsonl/53397 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 623
} | [
2830,
3393,
13806,
54817,
1155,
353,
8840,
836,
8,
341,
2405,
63952,
284,
3056,
1235,
341,
197,
90774,
11,
17258,
524,
11,
468,
14091,
914,
198,
197,
10957,
26223,
11,
422,
14091,
981,
914,
198,
197,
10957,
295,
1569,
1797,
1807,
198,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGrpc_GetSequenceByHash(t *testing.T) {
qapi.On("GetSequenceByHash", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil)
_, err := g.GetSequenceByHash(getOkCtx(), &pb.ReqHash{})
assert.NoError(t, err)
} | explode_data.jsonl/335 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 89
} | [
2830,
3393,
6464,
3992,
13614,
14076,
1359,
6370,
1155,
353,
8840,
836,
8,
341,
18534,
2068,
8071,
445,
1949,
14076,
1359,
6370,
497,
7860,
13311,
1596,
11,
7860,
13311,
1596,
11,
7860,
13311,
1596,
568,
5598,
27907,
11,
2092,
340,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSource_ExtractVideoId3(t *testing.T) {
_, err := ytsrc.ExtractVideoId(rick3)
assert.NotNil(t, err, "the error is supposed to not be nil")
} | explode_data.jsonl/3643 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 59
} | [
2830,
3393,
3608,
62,
28959,
10724,
764,
18,
1155,
353,
8840,
836,
8,
341,
197,
6878,
1848,
1669,
379,
2576,
1287,
5121,
2144,
10724,
764,
2601,
865,
18,
340,
6948,
93882,
1155,
11,
1848,
11,
330,
1782,
1465,
374,
9966,
311,
537,
38... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestPullImagesToArchive(t *testing.T) {
impl := spdxDefaultImplementation{}
// First. If the tag does not represent an image, expect an error
_, err := impl.PullImagesToArchive("k8s.gcr.io/pause:0.0", "/tmp")
require.Error(t, err)
// Create a temp workdir
dir, err := os.MkdirTemp("", "extract-image-")
require.NoError(t, err)
defer os.RemoveAll(dir)
// The pause 1.0 image is a single image
images, err := impl.PullImagesToArchive("k8s.gcr.io/pause:1.0", dir)
require.NoError(t, err)
require.Len(t, images, 1)
require.FileExists(t, filepath.Join(dir, "a78c2d6208eff9b672de43f880093100050983047b7b0afe0217d3656e1b0d5f.tar"))
foundFiles := []string{}
expectedFiles := []string{
"sha256:350b164e7ae1dcddeffadd65c76226c9b6dc5553f5179153fb0e36b78f2a5e06",
"a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4.tar.gz",
"4964c72cd0245a7f77da38425dc98b472b2699ba6c49d5a9221fb32b972bc06b.tar.gz",
"manifest.json",
}
tarFile, err := os.Open(filepath.Join(dir, "a78c2d6208eff9b672de43f880093100050983047b7b0afe0217d3656e1b0d5f.tar"))
require.NoError(t, err)
defer tarFile.Close()
tarReader := tar.NewReader(tarFile)
for {
header, err := tarReader.Next()
if err != nil {
if err == io.EOF {
break
}
}
if header == nil {
break
}
foundFiles = append(foundFiles, header.Name)
}
require.Equal(t, expectedFiles, foundFiles)
} | explode_data.jsonl/7742 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 629
} | [
2830,
3393,
36068,
14228,
1249,
42502,
1155,
353,
8840,
836,
8,
341,
197,
6383,
1669,
978,
12719,
3675,
36850,
31483,
197,
322,
5512,
13,
1416,
279,
4772,
1558,
537,
4009,
458,
2168,
11,
1720,
458,
1465,
198,
197,
6878,
1848,
1669,
11... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestWorkflowValidateInputOutputNames(t *testing.T) {
workflow1 := &Workflow{
Name: "testWorkflow1",
Version: "v0.0.0.0.1",
TaskTemplates: []TaskTemplate{
TaskTemplate{
TaskName: "GetHTML",
Initial: true,
ActionTemplates: []ActionTemplate{
ActionTemplate{
Name: "HTTP1",
StructName: "HTTPAction",
},
},
DataPipeTemplates: []DataPipeTemplate{
DataPipeTemplate{
TaskInputName: "url_params",
DestActionName: "HTTP1",
DestInputName: "params",
},
},
},
},
}
err := workflow1.validateInputOutputNames()
assert.NotNil(t, err)
workflow2 := &Workflow{
Name: "testWorkflow2",
Version: "v0.0.0.0.1",
TaskTemplates: []TaskTemplate{
TaskTemplate{
TaskName: "GetHTML",
Initial: true,
ActionTemplates: []ActionTemplate{
ActionTemplate{
Name: "HTTP1",
StructName: "HTTPAction",
},
},
DataPipeTemplates: []DataPipeTemplate{
DataPipeTemplate{
TaskInputName: "url_params",
DestActionName: "HTTP1",
DestInputName: HTTPActionInputURLParams,
},
DataPipeTemplate{
SourceActionName: "HTTTP1",
SourceOutputName: "body",
TaskOutputName: "body",
},
},
},
},
}
err = workflow2.validateInputOutputNames()
assert.NotNil(t, err)
workflow3 := &Workflow{
Name: "testWorkflow3",
Version: "v0.0.0.0.1",
TaskTemplates: []TaskTemplate{
TaskTemplate{
TaskName: "GetHTML",
Initial: true,
ActionTemplates: []ActionTemplate{
ActionTemplate{
Name: "HTTP1",
StructName: "HTTPAction",
},
},
DataPipeTemplates: []DataPipeTemplate{
DataPipeTemplate{
TaskInputName: "url_params",
DestActionName: "HTTP1",
DestInputName: HTTPActionInputURLParams,
},
DataPipeTemplate{
SourceActionName: "HTTP1",
SourceOutputName: HTTPActionOutputBody,
TaskOutputName: "body",
},
},
},
},
}
err = workflow3.validateInputOutputNames()
assert.Nil(t, err)
} | explode_data.jsonl/19594 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 990
} | [
2830,
3393,
62768,
17926,
2505,
5097,
7980,
1155,
353,
8840,
836,
8,
341,
197,
56249,
16,
1669,
609,
62768,
515,
197,
21297,
25,
262,
330,
1944,
62768,
16,
756,
197,
77847,
25,
330,
85,
15,
13,
15,
13,
15,
13,
15,
13,
16,
756,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMain(t *testing.T) {
RegisterFailHandler(Fail)
pluginbuilder.BuildTestBinary(filepath.Join("..", "fixtures", "plugins"), "test_1")
pluginbuilder.BuildTestBinary(filepath.Join("..", "fixtures", "plugins"), "test_2")
pluginbuilder.BuildTestBinary(filepath.Join("..", "fixtures", "plugins"), "test_with_push")
pluginbuilder.BuildTestBinary(filepath.Join("..", "fixtures", "plugins"), "test_with_push_short_name")
pluginbuilder.BuildTestBinary(filepath.Join("..", "fixtures", "plugins"), "test_with_help")
pluginbuilder.BuildTestBinary(filepath.Join("..", "fixtures", "plugins"), "my_say")
pluginbuilder.BuildTestBinary(filepath.Join("..", "fixtures", "plugins"), "call_core_cmd")
pluginbuilder.BuildTestBinary(filepath.Join("..", "fixtures", "plugins"), "input")
pluginbuilder.BuildTestBinary(filepath.Join("..", "fixtures", "plugins"), "panics")
//compile plugin examples to ensure they're up to date
pluginbuilder.BuildTestBinary(filepath.Join("..", "plugin_examples"), "basic_plugin")
pluginbuilder.BuildTestBinary(filepath.Join("..", "plugin_examples"), "echo")
pluginbuilder.BuildTestBinary(filepath.Join("..", "plugin_examples"), "interactive")
RunSpecs(t, "Main Suite")
} | explode_data.jsonl/50144 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 398
} | [
2830,
3393,
6202,
1155,
353,
8840,
836,
8,
341,
79096,
19524,
3050,
7832,
604,
692,
197,
9138,
17850,
25212,
2271,
21338,
34793,
22363,
95032,
497,
330,
45247,
497,
330,
18716,
3975,
330,
1944,
62,
16,
1138,
197,
9138,
17850,
25212,
227... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetServingRuntime(t *testing.T) {
g := gomega.NewGomegaWithT(t)
namespace := "default"
tfRuntime := "tf-runtime"
sklearnRuntime := "sklearn-runtime"
servingRuntimeSpecs := map[string]v1alpha1.ServingRuntimeSpec{
tfRuntime: {
SupportedModelFormats: []v1alpha1.SupportedModelFormat{
{
Name: "tensorflow",
Version: proto.String("1"),
},
},
ServingRuntimePodSpec: v1alpha1.ServingRuntimePodSpec{
Containers: []v1.Container{
{
Name: "kserve-container",
Image: tfRuntime + "-image:latest",
},
},
},
Disabled: proto.Bool(false),
},
sklearnRuntime: {
SupportedModelFormats: []v1alpha1.SupportedModelFormat{
{
Name: "sklearn",
Version: proto.String("0"),
},
},
ServingRuntimePodSpec: v1alpha1.ServingRuntimePodSpec{
Containers: []v1.Container{
{
Name: "kserve-container",
Image: sklearnRuntime + "-image:latest",
},
},
},
Disabled: proto.Bool(false),
},
}
runtimes := &v1alpha1.ServingRuntimeList{
Items: []v1alpha1.ServingRuntime{
{
ObjectMeta: metav1.ObjectMeta{
Name: tfRuntime,
Namespace: namespace,
},
Spec: servingRuntimeSpecs[tfRuntime],
},
},
}
clusterRuntimes := &v1alpha1.ClusterServingRuntimeList{
Items: []v1alpha1.ClusterServingRuntime{
{
ObjectMeta: metav1.ObjectMeta{
Name: sklearnRuntime,
},
Spec: servingRuntimeSpecs[sklearnRuntime],
},
},
}
scenarios := map[string]struct {
runtimeName string
expected v1alpha1.ServingRuntimeSpec
}{
"NamespaceServingRuntime": {
runtimeName: tfRuntime,
expected: servingRuntimeSpecs[tfRuntime],
},
"ClusterServingRuntime": {
runtimeName: sklearnRuntime,
expected: servingRuntimeSpecs[sklearnRuntime],
},
}
s := runtime.NewScheme()
v1alpha1.AddToScheme(s)
mockClient := fake.NewClientBuilder().WithLists(runtimes, clusterRuntimes).WithScheme(s).Build()
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
res, _ := GetServingRuntime(mockClient, scenario.runtimeName, namespace)
if !g.Expect(res).To(gomega.Equal(&scenario.expected)) {
t.Errorf("got %v, want %v", res, &scenario.expected)
}
})
}
// Check invalid case
t.Run("InvalidServingRuntime", func(t *testing.T) {
res, err := GetServingRuntime(mockClient, "foo", namespace)
if !g.Expect(res).To(gomega.BeNil()) {
t.Errorf("got %v, want %v", res, nil)
}
g.Expect(err.Error()).To(gomega.ContainSubstring("No ServingRuntimes or ClusterServingRuntimes with the name"))
})
} | explode_data.jsonl/54486 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1112
} | [
2830,
3393,
1949,
50,
19505,
15123,
1155,
353,
8840,
836,
8,
341,
3174,
1669,
342,
32696,
7121,
38,
32696,
2354,
51,
1155,
692,
56623,
1669,
330,
2258,
1837,
3244,
69,
15123,
1669,
330,
8935,
68912,
698,
1903,
74,
12675,
15123,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_datastoreRestGet(t *testing.T) {
c := mustNewContext(nil)
defer c.Close()
var cfg Config
status, err := datastoreRestGet(c, configKey(c), &cfg)
assert.Equal(t, http.StatusInternalServerError, status)
assert.Error(t, err)
cfg.ClientSecret = "foo"
mustPut(t, c, &cfg)
status, err = datastoreRestGet(c, configKey(c), &cfg)
assert.Equal(t, http.StatusOK, status)
assert.NoError(t, err)
} | explode_data.jsonl/10297 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 163
} | [
2830,
3393,
1769,
4314,
12416,
1949,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
1969,
3564,
1972,
27907,
340,
16867,
272,
10421,
741,
2405,
13286,
5532,
198,
23847,
11,
1848,
1669,
64986,
12416,
1949,
1337,
11,
2193,
1592,
1337,
701,
609... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRemoveAllPushNotificationsBuildPath(t *testing.T) {
assert := assert.New(t)
opts := &removeAllPushChannelsForDeviceOpts{
DeviceIDForPush: "deviceId",
PushType: PNPushTypeAPNS,
pubnub: pubnub,
}
str, err := opts.buildPath()
assert.Equal("/v1/push/sub-key/sub_key/devices/deviceId/remove", str)
assert.Nil(err)
} | explode_data.jsonl/51096 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 152
} | [
2830,
3393,
13021,
2403,
16644,
34736,
11066,
1820,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
692,
64734,
1669,
609,
5399,
2403,
16644,
35925,
2461,
6985,
43451,
515,
197,
197,
6985,
915,
2461,
16644,
25,
330,
94202,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPongBIP0031(t *testing.T) {
// Use the protocol version just prior to BIP0031Version changes.
pver := wire.BIP0031Version
nonce, err := wire.RandomUint64()
if err != nil {
t.Errorf("Error generating nonce: %v", err)
}
msg := wire.NewMsgPong(nonce)
if msg.Nonce != nonce {
t.Errorf("Should get same nonce back out.")
}
// Ensure max payload is expected value for old protocol version.
size := msg.MaxPayloadLength(pver)
if size != 0 {
t.Errorf("Max length should be 0 for pong protocol version %d.",
pver)
}
// Test encode with old protocol version.
var buf bytes.Buffer
err = msg.BtcEncode(&buf, pver)
if err == nil {
t.Errorf("encode of MsgPong succeeded when it shouldn't have %v",
msg)
}
// Test decode with old protocol version.
readmsg := wire.NewMsgPong(0)
err = readmsg.BtcDecode(&buf, pver)
if err == nil {
t.Errorf("decode of MsgPong succeeded when it shouldn't have %v",
spew.Sdump(buf))
}
// Since this protocol version doesn't support pong, make sure the
// nonce didn't get encoded and decoded back out.
if msg.Nonce == readmsg.Nonce {
t.Errorf("Should not get same nonce for protocol version %d", pver)
}
return
} | explode_data.jsonl/6620 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 431
} | [
2830,
3393,
47,
644,
33,
3298,
15,
15,
18,
16,
1155,
353,
8840,
836,
8,
341,
197,
322,
5443,
279,
11507,
2319,
1101,
4867,
311,
425,
3298,
15,
15,
18,
16,
5637,
4344,
624,
3223,
423,
1669,
9067,
1785,
3298,
15,
15,
18,
16,
5637,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestIndexOf(t *testing.T) {
items := []int{23, 24, 2, 5, 10}
interfaceItems := make([]interface{}, len(items))
for i, v := range items {
interfaceItems[i] = v
}
a := New(interfaceItems)
result := a.IndexOf(5)
if result != 3 {
t.Log("IndexOf should return the position of specified item")
t.Log("Expected", 3, "\n Got", result)
t.Fail()
}
nonExistentIndex := a.IndexOf(0)
if nonExistentIndex != -1 {
t.Log("IndexOf should return -1 if item does not exist in array")
t.Log("Expected", -1, "\n Got", nonExistentIndex)
t.Fail()
}
} | explode_data.jsonl/47100 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 220
} | [
2830,
3393,
27376,
1155,
353,
8840,
836,
8,
341,
46413,
1669,
3056,
396,
90,
17,
18,
11,
220,
17,
19,
11,
220,
17,
11,
220,
20,
11,
220,
16,
15,
532,
58915,
1564,
4353,
1669,
1281,
10556,
4970,
22655,
2422,
24337,
4390,
2023,
600,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func Test_isFailedLoginCountSet(t *testing.T) {
var username string
var object, schema types.M
var accountLockout *AccountLockout
var isSet bool
var err error
/*****************************************************************/
initEnv()
username = "joe"
schema = types.M{
"fields": types.M{
"username": types.M{"type": "String"},
"password": types.M{"type": "String"},
},
}
orm.Adapter.CreateClass("_User", schema)
object = types.M{
"objectId": "01",
"username": username,
}
orm.Adapter.CreateObject("_User", schema, object)
accountLockout = NewAccountLockout(username)
isSet, err = accountLockout.isFailedLoginCountSet()
if err != nil || isSet != false {
t.Error("expect:", false, "result:", isSet, err)
}
orm.TomatoDBController.DeleteEverything()
/*****************************************************************/
initEnv()
username = "joe"
schema = types.M{
"fields": types.M{
"username": types.M{"type": "String"},
"password": types.M{"type": "String"},
},
}
orm.Adapter.CreateClass("_User", schema)
object = types.M{
"objectId": "01",
"username": username,
"_failed_login_count": 3,
}
orm.Adapter.CreateObject("_User", schema, object)
accountLockout = NewAccountLockout(username)
isSet, err = accountLockout.isFailedLoginCountSet()
if err != nil || isSet != true {
t.Error("expect:", true, "result:", isSet, err)
}
orm.TomatoDBController.DeleteEverything()
} | explode_data.jsonl/73721 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 527
} | [
2830,
3393,
6892,
9408,
6231,
2507,
1649,
1155,
353,
8840,
836,
8,
341,
2405,
5934,
914,
198,
2405,
1633,
11,
10802,
4494,
1321,
198,
2405,
2692,
11989,
411,
353,
7365,
11989,
411,
198,
2405,
374,
1649,
1807,
198,
2405,
1848,
1465,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestIssue41(t *testing.T) {
r, w := io.Pipe()
zw := lz4.NewWriter(w)
zr := lz4.NewReader(r)
data := "x"
go func() {
_, _ = fmt.Fprint(zw, data)
_ = zw.Flush()
_ = zw.Close()
_ = w.Close()
}()
var buf bytes.Buffer
_, _ = buf.ReadFrom(zr)
if got, want := buf.String(), data; got != want {
t.Fatal("uncompressed data does not match original")
}
} | explode_data.jsonl/40567 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 175
} | [
2830,
3393,
42006,
19,
16,
1155,
353,
8840,
836,
8,
341,
7000,
11,
289,
1669,
6399,
1069,
3444,
741,
20832,
86,
1669,
95982,
19,
7121,
6492,
3622,
340,
20832,
81,
1669,
95982,
19,
68587,
2601,
692,
8924,
1669,
330,
87,
698,
30680,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSanitizeQuotesFromAlterStatement(t *testing.T) {
parser := NewAlterTableParser()
{
alterStatement := "add column e enum('a','b','c')"
strippedStatement := parser.sanitizeQuotesFromAlterStatement(alterStatement)
test.S(t).ExpectEquals(strippedStatement, "add column e enum('','','')")
}
{
alterStatement := "change column i int 'some comment, with comma'"
strippedStatement := parser.sanitizeQuotesFromAlterStatement(alterStatement)
test.S(t).ExpectEquals(strippedStatement, "change column i int ''")
}
} | explode_data.jsonl/71264 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 175
} | [
2830,
3393,
23729,
26310,
43780,
3830,
74290,
8636,
1155,
353,
8840,
836,
8,
341,
55804,
1669,
1532,
74290,
2556,
6570,
741,
197,
515,
197,
197,
37277,
8636,
1669,
330,
718,
3250,
384,
7618,
492,
64,
1844,
65,
1844,
66,
84212,
197,
18... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSetTagErrors(t *testing.T) {
tcs := []struct {
name string
tag string
revision string
webhooksBefore admit_v1.MutatingWebhookConfigurationList
namespaces corev1.NamespaceList
outputMatches []string
error string
}{
{
name: "TestErrorWhenRevisionWithNameCollision",
tag: "revision",
revision: "revision",
webhooksBefore: admit_v1.MutatingWebhookConfigurationList{
Items: []admit_v1.MutatingWebhookConfiguration{revisionCanonicalWebhook},
},
namespaces: corev1.NamespaceList{},
outputMatches: []string{},
error: "cannot create revision tag \"revision\"",
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
var out bytes.Buffer
client := fake.NewSimpleClientset(tc.webhooksBefore.DeepCopyObject(), tc.namespaces.DeepCopyObject())
mockClient := kube.MockClient{
Interface: client,
}
err := setTag(context.Background(), mockClient, tc.tag, tc.revision, &out)
if tc.error == "" && err != nil {
t.Fatalf("expected no error, got %v", err)
}
if tc.error != "" {
if err == nil {
t.Fatalf("expected error to include \"%s\" but got none", tc.error)
}
if !strings.Contains(err.Error(), tc.error) {
t.Fatalf("expected \"%s\" in error, got %v", tc.error, err)
}
}
})
}
} | explode_data.jsonl/15895 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 588
} | [
2830,
3393,
1649,
5668,
13877,
1155,
353,
8840,
836,
8,
341,
3244,
4837,
1669,
3056,
1235,
341,
197,
11609,
1843,
914,
198,
197,
60439,
310,
914,
198,
197,
197,
28342,
981,
914,
198,
197,
97250,
38560,
10227,
16698,
2273,
16,
1321,
33... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestLocalityMatchScore(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
locality string
constraints string
leasePrefs string
expected float64
}{
{locality: "region=us,dc=east", constraints: "[]", expected: 0.0},
{locality: "region=us,dc=east", constraints: "[+region=eu,+dc=uk,+dc=de]", expected: 0.0},
{locality: "region=us,dc=east", constraints: "[-region=us,+dc=east]", expected: 0.0},
{locality: "region=us,dc=east", constraints: "[+region=eu,+dc=east]", expected: 0.0},
{locality: "region=us,dc=east", constraints: "[-region=eu]", expected: 0.0},
{locality: "region=us,dc=east", constraints: "[+region=us]", expected: 0.5},
{locality: "region=us,dc=east", constraints: "[+region=us,+region=eu]", expected: 0.5},
{locality: "region=us,dc=east", constraints: "[+region=eu,+region=ap,+region=us]", expected: 0.5},
{locality: "region=us,dc=east", constraints: "[+region=us,-dc=east]", expected: 0.5},
{locality: "region=us,dc=east", constraints: "[+region=us,+dc=west]", expected: 0.5},
{locality: "region=us,dc=east", constraints: "[+region=us,+dc=east]", expected: 1.0},
{locality: "region=us,dc=east", constraints: "[+dc=east]", expected: 1.0},
{locality: "region=us,dc=east", constraints: "[+dc=west,+dc=east]", expected: 1.0},
{locality: "region=us,dc=east", constraints: "[-region=eu,+dc=east]", expected: 1.0},
{locality: "region=us,dc=east", constraints: "[+region=eu,+dc=east,+region=us,+dc=west]", expected: 1.0},
{locality: "region=us,dc=east", constraints: "[+region=us,+dc=east,+rack=1,-ssd]", expected: 1.0},
{locality: "region=us,dc=east", constraints: `{"+region=us,+dc=east":3,"-dc=east":2}`, expected: 0.0},
{locality: "region=us,dc=east", constraints: `{"+region=us,+dc=east":3,"+region=eu,+dc=east":2}`, expected: 0.0},
{locality: "region=us,dc=east", constraints: `{"+region=us,+dc=east":3,"+region=us,+region=eu":2}`, expected: 0.5},
{locality: "region=us,dc=east", constraints: `{"+region=us,+dc=east":3,"+dc=east,+dc=west":2}`, expected: 1.0},
{locality: "region=us,dc=east", leasePrefs: "[[]]", expected: 0.0},
{locality: "region=us,dc=east", leasePrefs: "[[+dc=west]]", expected: 0.0},
{locality: "region=us,dc=east", leasePrefs: "[[+region=us]]", expected: 0.17},
{locality: "region=us,dc=east", leasePrefs: "[[+region=us,+dc=east]]", expected: 0.33},
{locality: "region=us,dc=east", constraints: "[+region=eu]", leasePrefs: "[[+dc=west]]", expected: 0.0},
{locality: "region=us,dc=east", constraints: "[+region=eu]", leasePrefs: "[[+region=us]]", expected: 0.17},
{locality: "region=us,dc=east", constraints: "[+region=eu]", leasePrefs: "[[+dc=east]]", expected: 0.33},
{locality: "region=us,dc=east", constraints: "[+region=us]", leasePrefs: "[[+dc=west]]", expected: 0.33},
{locality: "region=us,dc=east", constraints: "[+region=us]", leasePrefs: "[[+region=us]]", expected: 0.50},
{locality: "region=us,dc=east", constraints: "[+region=us]", leasePrefs: "[[+dc=east]]", expected: 0.67},
{locality: "region=us,dc=east", constraints: "[+dc=east]", leasePrefs: "[[+region=us]]", expected: 0.83},
{locality: "region=us,dc=east", constraints: "[+dc=east]", leasePrefs: "[[+dc=east]]", expected: 1.0},
{locality: "region=us,dc=east", constraints: "[+region=us,+dc=east]", leasePrefs: "[[+region=us,+dc=east]]", expected: 1.0},
}
for _, tc := range testCases {
zone := &config.ZoneConfig{}
var locality roachpb.Locality
if err := locality.Set(tc.locality); err != nil {
t.Fatal(err)
}
if tc.constraints != "" {
constraintsList := &config.ConstraintsList{}
if err := yaml.UnmarshalStrict([]byte(tc.constraints), constraintsList); err != nil {
t.Fatal(err)
}
zone.Constraints = constraintsList.Constraints
}
if tc.leasePrefs != "" {
if err := yaml.UnmarshalStrict([]byte(tc.leasePrefs), &zone.LeasePreferences); err != nil {
t.Fatal(err)
}
}
actual := math.Round(localityMatchScore(zone, locality)*100) / 100
if actual != tc.expected {
t.Errorf("locality=%v, constraints=%v, leasePrefs=%v: expected %v, got %v",
tc.locality, tc.constraints, tc.leasePrefs, tc.expected, actual)
}
}
} | explode_data.jsonl/64339 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1731
} | [
2830,
3393,
9152,
2719,
8331,
10570,
1155,
353,
8840,
836,
8,
341,
16867,
23352,
1944,
36892,
2271,
1155,
8,
2822,
18185,
37302,
1669,
3056,
1235,
341,
197,
8854,
487,
262,
914,
198,
197,
37203,
7458,
914,
198,
197,
197,
1623,
31434,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func Test_RemoveSet(t *testing.T) {
a := makeSet([]int{6, 3, 1})
a.Remove(3)
if a.Cardinality() != 2 {
t.Error("RemoveSet should only have 2 items in the set")
}
if !(a.Contains(6) && a.Contains(1)) {
t.Error("RemoveSet should have only items 6 and 1 in the set")
}
a.Remove(6)
a.Remove(1)
if a.Cardinality() != 0 {
t.Error("RemoveSet should be an empty set after removing 6 and 1")
}
} | explode_data.jsonl/172 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 167
} | [
2830,
3393,
66843,
1649,
1155,
353,
8840,
836,
8,
341,
11323,
1669,
1281,
1649,
10556,
396,
90,
21,
11,
220,
18,
11,
220,
16,
8824,
11323,
13270,
7,
18,
692,
743,
264,
48613,
80777,
368,
961,
220,
17,
341,
197,
3244,
6141,
445,
13... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestExpectStatefulSetMinReadySeconds(t *testing.T) {
a := monitoringv1.Alertmanager{}
replicas := int32(3)
a.Spec.Version = operator.DefaultAlertmanagerVersion
a.Spec.Replicas = &replicas
// assert defaults to zero if nil
statefulSet, err := makeStatefulSetSpec(&a, defaultTestConfig)
if err != nil {
t.Fatal(err)
}
if statefulSet.MinReadySeconds != 0 {
t.Fatalf("expected MinReadySeconds to be zero but got %d", statefulSet.MinReadySeconds)
}
// assert set correctly if not nil
var expect uint32 = 5
a.Spec.MinReadySeconds = &expect
statefulSet, err = makeStatefulSetSpec(&a, defaultTestConfig)
if err != nil {
t.Fatal(err)
}
if statefulSet.MinReadySeconds != int32(expect) {
t.Fatalf("expected MinReadySeconds to be %d but got %d", expect, statefulSet.MinReadySeconds)
}
} | explode_data.jsonl/25267 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 291
} | [
2830,
3393,
17536,
1397,
1262,
1649,
6217,
19202,
15343,
1155,
353,
8840,
836,
8,
341,
11323,
1669,
16558,
85,
16,
40143,
13297,
16094,
73731,
52210,
1669,
526,
18,
17,
7,
18,
340,
11323,
36473,
35842,
284,
5675,
13275,
9676,
13297,
563... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestMultiFileInitOrder(t *testing.T) {
fset := token.NewFileSet()
mustParse := func(src string) *ast.File {
f, err := parser.ParseFile(fset, "main", src, 0)
if err != nil {
t.Fatal(err)
}
return f
}
fileA := mustParse(`package main; var a = 1`)
fileB := mustParse(`package main; var b = 2`)
// The initialization order must not depend on the parse
// order of the files, only on the presentation order to
// the type-checker.
for _, test := range []struct {
files []*ast.File
want string
}{
{[]*ast.File{fileA, fileB}, "[a = 1 b = 2]"},
{[]*ast.File{fileB, fileA}, "[b = 2 a = 1]"},
} {
var info Info
if _, err := new(Config).Check("main", fset, test.files, &info); err != nil {
t.Fatal(err)
}
if got := fmt.Sprint(info.InitOrder); got != test.want {
t.Fatalf("got %s; want %s", got, test.want)
}
}
} | explode_data.jsonl/55544 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 356
} | [
2830,
3393,
20358,
1703,
3803,
4431,
1155,
353,
8840,
836,
8,
341,
1166,
746,
1669,
3950,
7121,
1703,
1649,
741,
2109,
590,
14463,
1669,
2915,
14705,
914,
8,
353,
559,
8576,
341,
197,
1166,
11,
1848,
1669,
6729,
8937,
1703,
955,
746,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestBinEval(t *testing.T) {
cases := []struct {
op ItemType
lhs interface{}
rhs interface{}
pass bool
}{
{
op: GT,
lhs: int64(4),
rhs: int64(3),
pass: true,
},
{
op: GTE,
lhs: int64(4),
rhs: int64(3),
pass: true,
},
{
op: GTE,
lhs: int64(4),
rhs: int64(4),
pass: true,
},
{
op: EQ,
lhs: int64(3),
rhs: int64(3),
pass: true,
},
{
op: EQ,
lhs: "abc",
rhs: "def",
pass: false,
},
{
op: LT,
lhs: "abc",
rhs: "def",
pass: true,
},
{
op: LT,
lhs: "abc",
rhs: 123.4,
pass: false,
},
{
op: LT,
lhs: 123.4,
rhs: "abc",
pass: false,
},
{
op: EQ,
lhs: 123.4,
rhs: "abc",
pass: false,
},
{
op: NEQ,
lhs: 123.4,
rhs: "abc",
pass: false,
},
{
op: IN,
lhs: 123.4,
rhs: "abc",
pass: false,
},
}
for _, tc := range cases {
tu.Equals(t, tc.pass, binEval(tc.op, tc.lhs, tc.rhs))
t.Logf("[ok] %v %s %v => %v", tc.lhs, tc.op, tc.rhs, tc.pass)
}
} | explode_data.jsonl/57796 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 696
} | [
2830,
3393,
28794,
54469,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
3056,
1235,
341,
197,
39703,
256,
73081,
198,
197,
8810,
4997,
220,
3749,
16094,
197,
7000,
4997,
220,
3749,
16094,
197,
41431,
1807,
198,
197,
59403,
197,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestKazaamTransformThreeOpWithOverRequire(t *testing.T) {
spec := `[{
"operation": "shift",
"spec":{"a": "key.array1[0].array2[*]"},
"require": true
},
{
"operation": "concat",
"over": "a",
"spec": {"sources": [{"path": "foo"}, {"value": "KEY"}], "targetPath": "url", "delim": ":" }
}, {
"operation": "shift",
"spec": {"urls": "a[*].url" }
}]`
jsonIn := `{"key":{"not_array1":[{"array2":[{"foo": 0}, {"foo": 1}, {"foo": 2}]}]}}`
kazaamTransform, _ := kazaam.NewKazaam(spec)
_, err := kazaamTransform.TransformJSONStringToString(jsonIn)
if err == nil {
t.Error("Transform path does not exist in message and should throw an error")
t.FailNow()
}
} | explode_data.jsonl/11865 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 279
} | [
2830,
3393,
42,
12707,
309,
8963,
19641,
7125,
2354,
1918,
17959,
1155,
353,
8840,
836,
8,
341,
98100,
1669,
77644,
515,
197,
197,
1,
9262,
788,
330,
13418,
756,
197,
197,
1,
9535,
22317,
64,
788,
330,
792,
7234,
16,
58,
15,
936,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestContextRenderString(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.String(http.StatusCreated, "test %s %d", "string", 2)
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "test string 2", w.Body.String())
assert.Equal(t, "text/plain; charset=utf-8", w.Header().Get("Content-Type"))
} | explode_data.jsonl/26786 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 131
} | [
2830,
3393,
1972,
6750,
703,
1155,
353,
8840,
836,
8,
341,
6692,
1669,
54320,
70334,
7121,
47023,
741,
1444,
11,
716,
1669,
4230,
2271,
1972,
3622,
692,
1444,
6431,
19886,
10538,
11694,
11,
330,
1944,
1018,
82,
1018,
67,
497,
330,
917... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTLSServerNameOverwrite(t *testing.T) {
overwriteServerName := "over.write.server.name"
creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", overwriteServerName)
if err != nil {
t.Fatalf("Failed to create credentials %v", err)
}
conn, err := Dial("Non-Existent.Server:80", WithTransportCredentials(creds))
if err != nil {
t.Fatalf("Dial(_, _) = _, %v, want _, <nil>", err)
}
conn.Close()
if conn.authority != overwriteServerName {
t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName)
}
} | explode_data.jsonl/6659 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 212
} | [
2830,
3393,
13470,
1220,
2836,
675,
1918,
4934,
1155,
353,
8840,
836,
8,
341,
197,
64915,
5475,
675,
1669,
330,
1975,
3836,
12638,
2644,
698,
197,
85734,
11,
1848,
1669,
16387,
7121,
2959,
45439,
43633,
1155,
4730,
6184,
5172,
924,
4937... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestChanSyncUnableToSync(t *testing.T) {
t.Parallel()
// Create a test channel which will be used for the duration of this
// unittest. The channel will be funded evenly with Alice having 5 BTC,
// and Bob having 5 BTC.
aliceChannel, bobChannel, cleanUp, err := CreateTestChannels()
if err != nil {
t.Fatalf("unable to create test channels: %v", err)
}
defer cleanUp()
// If we immediately send both sides a "bogus" ChanSync message, then
// they both should conclude that they're unable to synchronize the
// state.
badChanSync := &lnwire.ChannelReestablish{
ChanID: lnwire.NewChanIDFromOutPoint(
&aliceChannel.channelState.FundingOutpoint,
),
NextLocalCommitHeight: 1000,
RemoteCommitTailHeight: 9000,
}
_, _, _, err = bobChannel.ProcessChanSyncMsg(badChanSync)
if err != ErrCannotSyncCommitChains {
t.Fatalf("expected error instead have: %v", err)
}
_, _, _, err = aliceChannel.ProcessChanSyncMsg(badChanSync)
if err != ErrCannotSyncCommitChains {
t.Fatalf("expected error instead have: %v", err)
}
} | explode_data.jsonl/46440 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 359
} | [
2830,
3393,
46019,
12154,
17075,
1249,
12154,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
197,
322,
4230,
264,
1273,
5496,
892,
686,
387,
1483,
369,
279,
8090,
315,
419,
198,
197,
322,
19905,
13,
576,
5496,
686,
387,
23906,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestDownload_route(t *testing.T) {
t.Parallel()
check := func(t *testing.T, repo *sqliteDownloadRepository) {
w := httptest.NewRecorder()
req := httptest.NewRequest("GET", "/downloads", nil)
req.Header.Set("x-user-id", "test")
repo.recordStats(&DownloadStats{SDNs: 1, Alts: 421, Addresses: 1511, DeniedPersons: 731, SectoralSanctions: 289, BISEntities: 189})
router := mux.NewRouter()
addDownloadRoutes(log.NewNopLogger(), router, repo)
router.ServeHTTP(w, req)
w.Flush()
if w.Code != http.StatusOK {
t.Errorf("bogus status code: %d", w.Code)
}
var downloads []DownloadStats
if err := json.NewDecoder(w.Body).Decode(&downloads); err != nil {
t.Error(err)
}
if len(downloads) != 1 {
t.Errorf("got %d downloads: %v", len(downloads), downloads)
}
}
// SQLite tests
sqliteDB := database.CreateTestSqliteDB(t)
defer sqliteDB.Close()
check(t, &sqliteDownloadRepository{sqliteDB.DB, log.NewNopLogger()})
// MySQL tests
mysqlDB := database.TestMySQLConnection(t)
check(t, &sqliteDownloadRepository{mysqlDB, log.NewNopLogger()})
} | explode_data.jsonl/29465 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 437
} | [
2830,
3393,
11377,
28109,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
25157,
1669,
2915,
1155,
353,
8840,
836,
11,
15867,
353,
37042,
11377,
4624,
8,
341,
197,
6692,
1669,
54320,
70334,
7121,
47023,
741,
197,
24395,
1669,
543... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestCheckPointNil(t *testing.T) {
setup()
assert := require.New(t)
// With wrong log level
r := strings.NewReader(`2021-01-14T12:16:54.579+0800 ERROR CheckPoint {"host": "172.16.5.140", "port": "22", "user": "tidb", "cmd": "test cmd", "stdout": "success", "stderr": ""}`)
c, err := NewCheckPoint(r)
assert.Nil(err)
ctx := NewContext(context.Background())
p := c.Acquire(ctx, map[string]interface{}{
"host": "172.16.5.140",
"port": 22,
"user": "tidb",
"cmd": "test cmd",
})
assert.Nil(p.Hit())
p.Release(nil)
// With wrong log title
r = strings.NewReader(`2021-01-14T12:16:54.579+0800 INFO XXXCommand {"host": "172.16.5.140", "port": "22", "user": "tidb", "cmd": "test cmd", "stdout": "success", "stderr": ""}`)
c, err = NewCheckPoint(r)
assert.Nil(err)
p = c.Acquire(ctx, map[string]interface{}{
"host": "172.16.5.140",
"port": 22,
"user": "tidb",
"cmd": "test cmd",
})
assert.Nil(p.Hit())
p.Release(nil)
// With wrong log host
r = strings.NewReader(`2021-01-14T12:16:54.579+0800 INFO CheckPoint {"host": "172.16.5.141", "port": "22", "user": "tidb", "cmd": "test cmd", "stdout": "success", "stderr": ""}`)
c, err = NewCheckPoint(r)
assert.Nil(err)
p = c.Acquire(ctx, map[string]interface{}{
"host": "172.16.5.140",
"port": 22,
"user": "tidb",
"cmd": "test cmd",
})
assert.Nil(p.Hit())
p.Release(nil)
// With wrong port
r = strings.NewReader(`2021-01-14T12:16:54.579+0800 INFO CheckPoint {"host": "172.16.5.140", "port": "23", "user": "tidb", "cmd": "test cmd", "stdout": "success", "stderr": ""}`)
c, err = NewCheckPoint(r)
assert.Nil(err)
p = c.Acquire(ctx, map[string]interface{}{
"host": "172.16.5.140",
"port": 22,
"user": "tidb",
"cmd": "test cmd",
})
assert.Nil(p.Hit())
p.Release(nil)
// With wrong user
r = strings.NewReader(`2021-01-14T12:16:54.579+0800 INFO CheckPoint {"host": "172.16.5.140", "port": "22", "user": "yidb", "cmd": "test cmd", "stdout": "success", "stderr": ""}`)
c, err = NewCheckPoint(r)
assert.Nil(err)
p = c.Acquire(ctx, map[string]interface{}{
"host": "172.16.5.140",
"port": 22,
"user": "tidb",
"cmd": "test cmd",
})
assert.Nil(p.Hit())
p.Release(nil)
// With wrong cmd
r = strings.NewReader(`2021-01-14T12:16:54.579+0800 INFO CheckPoint {"host": "172.16.5.140", "port": "22", "user": "tidb", "cmd": "test cmd", "stdout": "success", "stderr": ""}`)
c, err = NewCheckPoint(r)
assert.Nil(err)
p = c.Acquire(ctx, map[string]interface{}{
"host": "172.16.5.140",
"port": 22,
"user": "tidb",
"cmd": "test cmd",
})
assert.Nil(p.Hit())
assert.True(p.acquired)
p.Release(nil)
} | explode_data.jsonl/27337 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1278
} | [
2830,
3393,
3973,
2609,
19064,
1155,
353,
8840,
836,
8,
341,
84571,
2822,
6948,
1669,
1373,
7121,
1155,
340,
197,
322,
3085,
4969,
1487,
2188,
198,
7000,
1669,
9069,
68587,
5809,
17,
15,
17,
16,
12,
15,
16,
12,
16,
19,
51,
16,
17,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIgnoresMirrorPod(t *testing.T) {
pod := &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
kubelet.ConfigMirrorAnnotationKey: "true",
},
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{VolumeSource: api.VolumeSource{}},
},
},
}
attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)
err := NewServiceAccount().Admit(attrs)
if err != nil {
t.Errorf("Expected mirror pod without service account or secrets allowed, got err: %v", err)
}
} | explode_data.jsonl/61339 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 237
} | [
2830,
3393,
40,
70,
2152,
416,
54216,
23527,
1155,
353,
8840,
836,
8,
341,
3223,
347,
1669,
609,
2068,
88823,
515,
197,
23816,
12175,
25,
77520,
16,
80222,
515,
298,
197,
21418,
25,
2415,
14032,
30953,
515,
571,
16463,
3760,
1149,
107... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGenerateMetricTasksIncomplete(t *testing.T) {
run := &v1alpha1.AnalysisRun{
Spec: v1alpha1.AnalysisRunSpec{
Metrics: []v1alpha1.Metric{{
Name: "success-rate",
}},
},
Status: v1alpha1.AnalysisRunStatus{
Phase: v1alpha1.AnalysisPhaseRunning,
MetricResults: []v1alpha1.MetricResult{{
Name: "success-rate",
Phase: v1alpha1.AnalysisPhaseRunning,
Measurements: []v1alpha1.Measurement{{
Value: "99",
Phase: v1alpha1.AnalysisPhaseSuccessful,
StartedAt: timePtr(metav1.NewTime(time.Now().Add(-50 * time.Second))),
}},
}},
},
}
{
// ensure we don't take measurement when interval is not specified and we already took measurement
tasks := generateMetricTasks(run, run.Spec.Metrics)
assert.Equal(t, 1, len(tasks))
assert.NotNil(t, tasks[0].incompleteMeasurement)
}
} | explode_data.jsonl/75810 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 352
} | [
2830,
3393,
31115,
54310,
25449,
96698,
1155,
353,
8840,
836,
8,
341,
56742,
1669,
609,
85,
16,
7141,
16,
8624,
9092,
6727,
515,
197,
7568,
992,
25,
348,
16,
7141,
16,
8624,
9092,
6727,
8327,
515,
298,
9209,
13468,
25,
3056,
85,
16,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestApplyPatch(t *testing.T) {
t.Parallel()
fName := "TestApplyPatch"
defer cleanup(fName)
writeOldFile(fName, t)
patch := new(bytes.Buffer)
err := binarydist.Diff(bytes.NewReader(oldFile), bytes.NewReader(newFile), patch)
if err != nil {
t.Fatalf("Failed to create patch: %v", err)
}
err = Apply(patch, Options{
TargetPath: fName,
Patcher: NewBSDiffPatcher(),
})
validateUpdate(fName, err, t)
} | explode_data.jsonl/73984 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 167
} | [
2830,
3393,
28497,
43622,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
1166,
675,
1669,
330,
2271,
28497,
43622,
698,
16867,
21290,
955,
675,
340,
24945,
18284,
1703,
955,
675,
11,
259,
692,
3223,
754,
1669,
501,
23158,
22622,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFindVPCID(t *testing.T) {
awsServices := NewFakeAWSServices()
c, err := newAWSCloud(strings.NewReader("[global]"), awsServices)
if err != nil {
t.Errorf("Error building aws cloud: %v", err)
return
}
vpcID, err := c.findVPCID()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if vpcID != "vpc-mac0" {
t.Errorf("Unexpected vpcID: %s", vpcID)
}
} | explode_data.jsonl/12849 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 166
} | [
2830,
3393,
9885,
53,
4872,
915,
1155,
353,
8840,
836,
8,
341,
197,
8635,
11025,
1669,
1532,
52317,
14419,
1220,
2161,
741,
1444,
11,
1848,
1669,
501,
14419,
3540,
52178,
51442,
68587,
10937,
9752,
60,
3975,
31521,
11025,
340,
743,
1848... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestGetInt32Var(t *testing.T) {
for _, tt := range getInt32VarFlagTests {
t.Run(tt.valueIn, func(t *testing.T) {
os.Clearenv()
if err := os.Setenv(variableName, tt.valueIn); err != nil {
t.Error(err)
}
v, err := GetInt32Var(variableName, tt.fallback)
if err != nil {
t.Error(err)
}
if v != tt.valueOut {
t.Errorf("Variable %s not equal to value '%v'", variableName, tt.valueOut)
}
})
}
} | explode_data.jsonl/18927 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 208
} | [
2830,
3393,
85097,
18,
17,
3962,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
17853,
1669,
2088,
85133,
18,
17,
3962,
12135,
18200,
341,
197,
3244,
16708,
47152,
2824,
641,
11,
2915,
1155,
353,
8840,
836,
8,
341,
298,
25078,
727,
273,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestReaderOnSpecificMessage(t *testing.T) {
client, err := NewClient(ClientOptions{
URL: lookupURL,
})
assert.Nil(t, err)
defer client.Close()
topic := newTopicName()
ctx := context.Background()
// create producer
producer, err := client.CreateProducer(ProducerOptions{
Topic: topic,
DisableBatching: true,
})
assert.Nil(t, err)
defer producer.Close()
// send 10 messages
msgIDs := [10]MessageID{}
for i := 0; i < 10; i++ {
msgID, err := producer.Send(ctx, &ProducerMessage{
Payload: []byte(fmt.Sprintf("hello-%d", i)),
})
assert.NoError(t, err)
assert.NotNil(t, msgID)
msgIDs[i] = msgID
}
// create reader on 5th message (not included)
reader, err := client.CreateReader(ReaderOptions{
Topic: topic,
StartMessageID: msgIDs[4],
})
assert.Nil(t, err)
defer reader.Close()
// receive the remaining 5 messages
for i := 5; i < 10; i++ {
msg, err := reader.Next(context.Background())
assert.NoError(t, err)
expectMsg := fmt.Sprintf("hello-%d", i)
assert.Equal(t, []byte(expectMsg), msg.Payload())
}
// create reader on 5th message (included)
readerInclusive, err := client.CreateReader(ReaderOptions{
Topic: topic,
StartMessageID: msgIDs[4],
StartMessageIDInclusive: true,
})
assert.Nil(t, err)
defer readerInclusive.Close()
// receive the remaining 6 messages
for i := 4; i < 10; i++ {
msg, err := readerInclusive.Next(context.Background())
assert.NoError(t, err)
expectMsg := fmt.Sprintf("hello-%d", i)
assert.Equal(t, []byte(expectMsg), msg.Payload())
}
} | explode_data.jsonl/6384 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 637
} | [
2830,
3393,
5062,
1925,
47514,
2052,
1155,
353,
8840,
836,
8,
341,
25291,
11,
1848,
1669,
1532,
2959,
46851,
3798,
515,
197,
79055,
25,
18615,
3144,
345,
197,
8824,
6948,
59678,
1155,
11,
1848,
340,
16867,
2943,
10421,
2822,
3244,
24810... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestFormatterValueError(test *testing.T) {
formatted, err := formatter.Format("Error", error(StructValueError{"message"}))
assert.NoError(test, err)
assert.Equal(test, "Error message", formatted)
} | explode_data.jsonl/39768 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 65
} | [
2830,
3393,
14183,
1130,
1454,
8623,
353,
8840,
836,
8,
341,
37410,
12127,
11,
1848,
1669,
24814,
9978,
445,
1454,
497,
1465,
7,
9422,
1130,
1454,
4913,
1994,
9207,
4390,
6948,
35699,
8623,
11,
1848,
340,
6948,
12808,
8623,
11,
330,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestFindNotificationChannels(t *testing.T) {
tests := []struct {
resp map[string]interface{} // the response of the mackerel api
want []NotificationChannel
}{
// email type
{
resp: map[string]interface{}{
"channels": []interface{}{
map[string]interface{}{
"id": "ch-foobar",
"name": "notification-test",
"type": "email",
"emails": []interface{}{"john.doe@example.com"},
"userIds": []interface{}{"user-john-doe"},
"events": []interface{}{"alert", "alertGroup"},
},
},
},
want: []NotificationChannel{
&NotificationChannelEmail{
ID: "ch-foobar",
Name: "notification-test",
Type: NotificationChannelTypeEmail,
Emails: []string{"john.doe@example.com"},
UserIDs: []string{"user-john-doe"},
Events: []NotificationEvent{NotificationEventAlert, NotificationEventAlertGroup},
},
},
},
// slack
{
resp: map[string]interface{}{
"channels": []interface{}{
map[string]interface{}{
"id": "ch-foobar",
"name": "notification-test",
"type": "slack",
"url": "http://example.com",
"enabledGraphImage": true,
"events": []interface{}{"alert", "alertGroup", "hostStatus", "hostRegister", "hostRetire", "monitor"},
},
},
},
want: []NotificationChannel{
&NotificationChannelSlack{
ID: "ch-foobar",
Name: "notification-test",
Type: NotificationChannelTypeSlack,
URL: "http://example.com",
EnabledGraphImage: true,
Events: []NotificationEvent{
NotificationEventAlert, NotificationEventAlertGroup, NotificationEventHostStatus, NotificationEventHostRegister, NotificationEventHostRetire, NotificationEventMonitor,
},
},
},
},
// webhook
{
resp: map[string]interface{}{
"channels": []interface{}{
map[string]interface{}{
"id": "ch-foobar",
"name": "notification-test",
"type": "webhook",
"url": "http://example.com",
"events": []interface{}{"alert", "alertGroup", "hostStatus", "hostRegister", "hostRetire", "monitor"},
},
},
},
want: []NotificationChannel{
&NotificationChannelWebHook{
ID: "ch-foobar",
Name: "notification-test",
Type: NotificationChannelTypeWebHook,
URL: "http://example.com",
Events: []NotificationEvent{
NotificationEventAlert, NotificationEventAlertGroup, NotificationEventHostStatus, NotificationEventHostRegister, NotificationEventHostRetire, NotificationEventMonitor,
},
},
},
},
// other notification channel types
{
resp: map[string]interface{}{
"channels": []interface{}{
map[string]interface{}{
"id": "ch-foobar",
"name": "notification-test",
"type": "line",
},
},
},
want: []NotificationChannel{
&NotificationChannelBase{
ID: "ch-foobar",
Name: "notification-test",
Type: NotificationChannelType("line"),
},
},
},
}
for i, tc := range tests {
t.Run(fmt.Sprintf("FindNotificationChannel-%d", i), func(t *testing.T) {
ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
t.Errorf("unexpected method: want %s, got %s", http.MethodGet, r.Method)
}
if r.URL.Path != "/api/v0/channels" {
t.Errorf("unexpected path, want %s, got %s", "/api/v0/channels", r.URL.Path)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
enc := json.NewEncoder(w)
enc.Encode(tc.resp)
}))
defer ts.Close()
u, err := url.Parse(ts.URL)
if err != nil {
t.Fatal(err)
}
c := &Client{
BaseURL: u,
APIKey: "DUMMY-API-KEY",
HTTPClient: ts.Client(),
}
got, err := c.FindNotificationChannels(context.Background())
if err != nil {
t.Error(err)
return
}
if diff := cmp.Diff(got, tc.want); diff != "" {
t.Errorf("FindNotificationChannels differs: (-got +want)\n%s", diff)
}
})
}
} | explode_data.jsonl/61828 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1897
} | [
2830,
3393,
9885,
11196,
35925,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
34653,
2415,
14032,
31344,
6257,
442,
279,
2033,
315,
279,
296,
9683,
301,
6330,
198,
197,
50780,
3056,
11196,
9629,
198,
197,
59403,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestLDSIngressHTTPUseProxyProtocol(t *testing.T) {
rh, cc, done := setup(t, func(reh *contour.ResourceEventHandler) {
reh.CacheHandler.UseProxyProto = true
})
defer done()
// assert that without any ingress objects registered
// there is only a static listener
assertEqual(t, &v2.DiscoveryResponse{
VersionInfo: "0",
Resources: []types.Any{
any(t, staticListener()),
},
TypeUrl: listenerType,
Nonce: "0",
}, streamLDS(t, cc))
// i1 is a simple ingress, no hostname, no tls.
i1 := &v1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "simple",
Namespace: "default",
},
Spec: v1beta1.IngressSpec{
Backend: backend("backend", intstr.FromInt(80)),
},
}
// add it and assert that we now have a ingress_http listener using
// the proxy protocol (the true param to filterchain)
rh.OnAdd(i1)
assertEqual(t, &v2.DiscoveryResponse{
VersionInfo: "1",
Resources: []types.Any{
any(t, &v2.Listener{
Name: "ingress_http",
Address: *envoy.SocketAddress("0.0.0.0", 8080),
ListenerFilters: []listener.ListenerFilter{
envoy.ProxyProtocol(),
},
FilterChains: envoy.FilterChains(envoy.HTTPConnectionManager("ingress_http", "/dev/stdout")),
}),
any(t, staticListener()),
},
TypeUrl: listenerType,
Nonce: "1",
}, streamLDS(t, cc))
} | explode_data.jsonl/22823 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 543
} | [
2830,
3393,
43,
5936,
641,
2483,
9230,
10253,
16219,
20689,
1155,
353,
8840,
836,
8,
341,
7000,
71,
11,
12527,
11,
2814,
1669,
6505,
1155,
11,
2915,
5801,
71,
353,
772,
413,
20766,
17945,
8,
341,
197,
197,
11063,
46130,
3050,
9046,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestZeroX50_IntegrityHash(t *testing.T) {
assert := assert.New(t)
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
type fields struct {
Version int32
EncryptedURL []byte
DecryptedHash []byte
}
type args struct {
decrypter cipher.Decrypter
}
tests := []struct {
name string
fields fields
args args
want []byte
wantErr bool
}{
{
"success",
fields{
0,
[]byte("EncryptedURL"),
[]byte("DecryptedHash"),
},
args{
func() cipher.Decrypter {
m := ciphertest.NewMockDecrypter(mockCtrl)
m.EXPECT().Decrypt(cipher.EncryptedContent([]byte("EncryptedURL"))).Return(cipher.PlainContent("https://domain.com/resource-220455078214"), nil)
return m
}(),
},
encodingtest.MustDecodeHex("220455078214"),
false,
},
{
"err-invalid-hash",
fields{
0,
[]byte("EncryptedURL"),
[]byte("DecryptedHash"),
},
args{
func() cipher.Decrypter {
m := ciphertest.NewMockDecrypter(mockCtrl)
m.EXPECT().Decrypt(cipher.EncryptedContent([]byte("EncryptedURL"))).Return(cipher.PlainContent("https://domain.com/resource-h22055078214"), nil)
return m
}(),
},
[]byte{},
true,
},
{
"err-parts",
fields{
0,
[]byte("EncryptedURL"),
[]byte("DecryptedHash"),
},
args{
func() cipher.Decrypter {
m := ciphertest.NewMockDecrypter(mockCtrl)
m.EXPECT().Decrypt(cipher.EncryptedContent([]byte("EncryptedURL"))).Return(cipher.PlainContent("https://domain.com/resource"), nil)
return m
}(),
},
nil,
true,
},
{
"err-decrypt",
fields{
0,
[]byte("EncryptedURL"),
[]byte("DecryptedHash"),
},
args{
func() cipher.Decrypter {
m := ciphertest.NewMockDecrypter(mockCtrl)
m.EXPECT().Decrypt(cipher.EncryptedContent([]byte("EncryptedURL"))).Return(nil, errors.Errorf("failed"))
return m
}(),
},
nil,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
d := &ZeroX50{
Version: tt.fields.Version,
EncryptedURL: tt.fields.EncryptedURL,
DecryptedHash: tt.fields.DecryptedHash,
}
got, err := d.IntegrityHash(tt.args.decrypter)
if (err != nil) != tt.wantErr {
t.Errorf("ZeroX50.IntegrityHash() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !assert.Equal(tt.want, got) {
t.Errorf("ZeroX50.IntegrityHash() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/48054 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1160
} | [
2830,
3393,
17999,
55,
20,
15,
32054,
67212,
6370,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
340,
77333,
15001,
1669,
342,
316,
1176,
7121,
2051,
1155,
340,
16867,
7860,
15001,
991,
18176,
741,
13158,
5043,
2036,
341,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTOOLS2403(t *testing.T) {
testtype.SkipUnlessTestType(t, testtype.UnitTestType)
singleIntent := testIntents[0]
demux := &Demultiplexer{
In: buildSingleIntentArchive(t, singleIntent),
NamespaceStatus: make(map[string]int),
}
muxOut := &RegularCollectionReceiver{
Intent: singleIntent,
Demux: demux,
Origin: singleIntent.Namespace(),
}
muxOut.Open()
var demuxErr error
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
demuxErr = demux.Run()
}()
// Read all the documents, but don't read past into EOF.
bs := make([]byte, db.MaxBSONSize)
for i := 0; i < testDocCount; i++ {
_, err := muxOut.Read(bs)
if err != nil {
t.Fatal(err)
}
}
// Closing the intent before reading EOF should not deadlock.
muxOut.Close()
wg.Wait()
if demuxErr != nil {
t.Fatalf("unexpected error: %v", demuxErr)
}
return
} | explode_data.jsonl/48802 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 382
} | [
2830,
3393,
93932,
17,
19,
15,
18,
1155,
353,
8840,
836,
8,
341,
18185,
1313,
57776,
35587,
2271,
929,
1155,
11,
1273,
1313,
25159,
2271,
929,
692,
1903,
2173,
11536,
1669,
1273,
1072,
805,
58,
15,
2533,
2698,
336,
2200,
1669,
609,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestExtractPoilcyNumber(t *testing.T) {
key := "unprocessed/1234567890.pdf"
polNumber := key[strings.Index(key, "/")+1 : strings.Index(key, ".")]
if polNumber != "1234567890" {
t.Errorf("policy number not found")
}
} | explode_data.jsonl/67070 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 96
} | [
2830,
3393,
28959,
32904,
321,
11130,
2833,
1155,
353,
8840,
836,
8,
972,
23634,
1669,
330,
359,
34022,
14,
16,
17,
18,
19,
20,
21,
22,
23,
24,
15,
15995,
5031,
3223,
337,
2833,
1669,
1376,
17303,
819,
18338,
4857,
11,
3521,
70329,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestShellCompletionForIncompleteFlags(t *testing.T) {
app := &App{
Flags: []Flag{
&IntFlag{
Name: "test-completion",
},
},
EnableShellCompletion: true,
ShellComplete: func(ctx *Context) {
for _, command := range ctx.App.Commands {
if command.Hidden {
continue
}
for _, name := range command.Names() {
fmt.Fprintln(ctx.App.Writer, name)
}
}
for _, flag := range ctx.App.Flags {
for _, name := range flag.Names() {
if name == genCompName() {
continue
}
switch name = strings.TrimSpace(name); len(name) {
case 0:
case 1:
fmt.Fprintln(ctx.App.Writer, "-"+name)
default:
fmt.Fprintln(ctx.App.Writer, "--"+name)
}
}
}
},
Action: func(ctx *Context) error {
return fmt.Errorf("should not get here")
},
}
err := app.Run([]string{"", "--test-completion", "--" + genCompName()})
if err != nil {
t.Errorf("app should not return an error: %s", err)
}
} | explode_data.jsonl/52599 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 449
} | [
2830,
3393,
25287,
33190,
2461,
96698,
9195,
1155,
353,
8840,
836,
8,
341,
28236,
1669,
609,
2164,
515,
197,
197,
9195,
25,
3056,
12135,
515,
298,
197,
5,
1072,
12135,
515,
571,
21297,
25,
330,
1944,
11476,
14386,
756,
298,
197,
1583,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func TestNodeUpdate(t *testing.T) {
testCases := []struct {
args []string
flags map[string]string
nodeInspectFunc func() (swarm.Node, []byte, error)
nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error
}{
{
args: []string{"nodeID"},
flags: map[string]string{
"role": "manager",
},
nodeInspectFunc: func() (swarm.Node, []byte, error) {
return *Node(), []byte{}, nil
},
nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error {
if node.Role != swarm.NodeRoleManager {
return errors.Errorf("expected role manager, got %s", node.Role)
}
return nil
},
},
{
args: []string{"nodeID"},
flags: map[string]string{
"availability": "drain",
},
nodeInspectFunc: func() (swarm.Node, []byte, error) {
return *Node(), []byte{}, nil
},
nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error {
if node.Availability != swarm.NodeAvailabilityDrain {
return errors.Errorf("expected drain availability, got %s", node.Availability)
}
return nil
},
},
{
args: []string{"nodeID"},
flags: map[string]string{
"label-add": "lbl",
},
nodeInspectFunc: func() (swarm.Node, []byte, error) {
return *Node(), []byte{}, nil
},
nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error {
if _, present := node.Annotations.Labels["lbl"]; !present {
return errors.Errorf("expected 'lbl' label, got %v", node.Annotations.Labels)
}
return nil
},
},
{
args: []string{"nodeID"},
flags: map[string]string{
"label-add": "key=value",
},
nodeInspectFunc: func() (swarm.Node, []byte, error) {
return *Node(), []byte{}, nil
},
nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error {
if value, present := node.Annotations.Labels["key"]; !present || value != "value" {
return errors.Errorf("expected 'key' label to be 'value', got %v", node.Annotations.Labels)
}
return nil
},
},
{
args: []string{"nodeID"},
flags: map[string]string{
"label-rm": "key",
},
nodeInspectFunc: func() (swarm.Node, []byte, error) {
return *Node(NodeLabels(map[string]string{
"key": "value",
})), []byte{}, nil
},
nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error {
if len(node.Annotations.Labels) > 0 {
return errors.Errorf("expected no labels, got %v", node.Annotations.Labels)
}
return nil
},
},
}
for _, tc := range testCases {
cmd := newUpdateCommand(
test.NewFakeCli(&fakeClient{
nodeInspectFunc: tc.nodeInspectFunc,
nodeUpdateFunc: tc.nodeUpdateFunc,
}))
cmd.SetArgs(tc.args)
for key, value := range tc.flags {
cmd.Flags().Set(key, value)
}
assert.NilError(t, cmd.Execute())
}
} | explode_data.jsonl/54561 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1224
} | [
2830,
3393,
1955,
4289,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
31215,
310,
3056,
917,
198,
197,
59516,
1843,
2415,
14032,
30953,
198,
197,
20831,
58533,
9626,
2915,
368,
320,
2280,
2178,
21714,
11,
3056,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestKubectlDownload(t *testing.T) {
lookPathDir = func(cwd string, env expand.Environ, file string) (string, error) {
return "", errors.New("not found")
}
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
err := ExecuteShellCommand("kubectl", nil, ".", stdout, stderr, nil)
if err != nil {
t.Fatal(err)
}
stdout1 := &bytes.Buffer{}
err = ExecuteShellCommand("kubectl -h", nil, ".", stdout1, stderr, nil)
if err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/31032 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 192
} | [
2830,
3393,
42,
53380,
11377,
1155,
353,
8840,
836,
8,
341,
197,
7201,
1820,
6184,
284,
2915,
1337,
6377,
914,
11,
6105,
9225,
22834,
2772,
11,
1034,
914,
8,
320,
917,
11,
1465,
8,
341,
197,
853,
7342,
5975,
7121,
445,
1921,
1730,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDefaultSigner(t *testing.T) {
testBytes := []byte("test string")
privKey, err := crypto.GenerateSecp256k1Key()
if err != nil {
t.Fatal(err)
}
signer := crypto.NewDefaultSigner(privKey)
signature, err := signer.Sign(testBytes)
if err != nil {
t.Fatal(err)
}
t.Run("OK - sign & recover", func(t *testing.T) {
pubKey, err := crypto.Recover(signature, testBytes)
if err != nil {
t.Fatal(err)
}
if pubKey.X.Cmp(privKey.PublicKey.X) != 0 || pubKey.Y.Cmp(privKey.PublicKey.Y) != 0 {
t.Fatalf("wanted %v but got %v", pubKey, &privKey.PublicKey)
}
})
t.Run("OK - recover with invalid data", func(t *testing.T) {
pubKey, err := crypto.Recover(signature, []byte("invalid"))
if err != nil {
t.Fatal(err)
}
if pubKey.X.Cmp(privKey.PublicKey.X) == 0 && pubKey.Y.Cmp(privKey.PublicKey.Y) == 0 {
t.Fatal("expected different public key")
}
})
t.Run("OK - recover with short signature", func(t *testing.T) {
_, err := crypto.Recover([]byte("invalid"), testBytes)
if err == nil {
t.Fatal("expected invalid length error but got none")
}
if !errors.Is(err, crypto.ErrInvalidLength) {
t.Fatalf("expected invalid length error but got %v", err)
}
})
} | explode_data.jsonl/31473 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 502
} | [
2830,
3393,
3675,
7264,
261,
1155,
353,
8840,
836,
8,
341,
18185,
7078,
1669,
3056,
3782,
445,
1944,
914,
1138,
71170,
1592,
11,
1848,
1669,
19028,
57582,
8430,
79,
17,
20,
21,
74,
16,
1592,
741,
743,
1848,
961,
2092,
341,
197,
3244... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestToKubeContainer(t *testing.T) {
c := &runtimeapi.Container{
Id: "test-id",
Metadata: &runtimeapi.ContainerMetadata{
Name: "test-name",
Attempt: 1,
},
Image: &runtimeapi.ImageSpec{Image: "test-image"},
ImageRef: "test-image-ref",
State: runtimeapi.ContainerState_CONTAINER_RUNNING,
Annotations: map[string]string{
containerHashLabel: "1234",
},
}
expect := &kubecontainer.Container{
ID: kubecontainer.ContainerID{
Type: runtimetesting.FakeRuntimeName,
ID: "test-id",
},
Name: "test-name",
ImageID: "test-image-ref",
Image: "test-image",
Hash: uint64(0x1234),
State: kubecontainer.ContainerStateRunning,
}
_, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
got, err := m.toKubeContainer(c)
assert.NoError(t, err)
assert.Equal(t, expect, got)
} | explode_data.jsonl/22801 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 363
} | [
2830,
3393,
1249,
42,
3760,
4502,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
609,
22255,
2068,
33672,
515,
197,
67211,
25,
330,
1944,
12897,
756,
197,
9209,
7603,
25,
609,
22255,
2068,
33672,
14610,
515,
298,
21297,
25,
262,
330,
1944,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestScaleBounds(t *testing.T) {
cases := []struct {
name string
pa *PodAutoscaler
wantMin int32
wantMax int32
}{{
name: "present",
pa: pa(map[string]string{
autoscaling.MinScaleAnnotationKey: "1",
autoscaling.MaxScaleAnnotationKey: "100",
}),
wantMin: 1,
wantMax: 100,
}, {
name: "absent",
pa: pa(map[string]string{}),
wantMin: 0,
wantMax: 0,
}, {
name: "only min",
pa: pa(map[string]string{
autoscaling.MinScaleAnnotationKey: "1",
}),
wantMin: 1,
wantMax: 0,
}, {
name: "only max",
pa: pa(map[string]string{
autoscaling.MaxScaleAnnotationKey: "1",
}),
wantMin: 0,
wantMax: 1,
}, {
name: "malformed",
pa: pa(map[string]string{
autoscaling.MinScaleAnnotationKey: "ham",
autoscaling.MaxScaleAnnotationKey: "sandwich",
}),
wantMin: 0,
wantMax: 0,
}, {
name: "too small",
pa: pa(map[string]string{
autoscaling.MinScaleAnnotationKey: "-1",
autoscaling.MaxScaleAnnotationKey: "-1",
}),
wantMin: 0,
wantMax: 0,
}}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
min, max := tc.pa.ScaleBounds()
if min != tc.wantMin {
t.Errorf("got min: %v wanted: %v", min, tc.wantMin)
}
if max != tc.wantMax {
t.Errorf("got max: %v wanted: %v", max, tc.wantMax)
}
})
}
} | explode_data.jsonl/32168 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 626
} | [
2830,
3393,
6947,
11394,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
3056,
1235,
341,
197,
11609,
262,
914,
198,
197,
3223,
64,
414,
353,
23527,
19602,
436,
63084,
198,
197,
50780,
6217,
526,
18,
17,
198,
197,
50780,
5974,
526,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestTwoDecimalTruncate(t *testing.T) {
store, clean := testkit.CreateMockStore(t)
defer clean()
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set sql_mode=''")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t1(a decimal(10,5), b decimal(10,1))")
tk.MustExec("insert into t1 values(123.12345, 123.12345)")
tk.MustExec("update t1 set b = a")
res := tk.MustQuery("select a, b from t1")
res.Check(testkit.Rows("123.12345 123.1"))
res = tk.MustQuery("select 2.00000000000000000000000000000001 * 1.000000000000000000000000000000000000000000002")
res.Check(testkit.Rows("2.000000000000000000000000000000"))
} | explode_data.jsonl/65466 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 240
} | [
2830,
3393,
11613,
11269,
1282,
26900,
1155,
353,
8840,
836,
8,
341,
57279,
11,
4240,
1669,
1273,
8226,
7251,
11571,
6093,
1155,
340,
16867,
4240,
2822,
3244,
74,
1669,
1273,
8226,
7121,
2271,
7695,
1155,
11,
3553,
692,
3244,
74,
50463,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestQueryFunc(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Header == nil {
t.Errorf("Expected non-nil request Header")
}
v := r.URL.Query()
checkQuery(t, v, "query1", "test1")
checkQuery(t, v, "query2", "test2")
}))
defer ts.Close()
New().Post(ts.URL).
Query("query1=test1").
Query("query2=test2").
End()
qq := struct {
Query1 string
Query2 string
}{
Query1: "test1",
Query2: "test2",
}
New().Post(ts.URL).
Query(qq).
End()
} | explode_data.jsonl/25469 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 240
} | [
2830,
3393,
2859,
9626,
1155,
353,
8840,
836,
8,
341,
57441,
1669,
54320,
70334,
7121,
5475,
19886,
89164,
18552,
3622,
1758,
37508,
11,
435,
353,
1254,
9659,
8,
341,
197,
743,
435,
15753,
621,
2092,
341,
298,
3244,
13080,
445,
18896,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.