text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func TestProcessWikipediaDumpExplicit(t *testing.T) {
client := retryablehttp.NewClient()
client.RequestLogHook = func(logger retryablehttp.Logger, req *http.Request, retry int) {
req.Header.Set("User-Agent", testUserAgent)
}
cacheDir := t.TempDir()
dumpPath := filepath.Join(cacheDir, path.Base(wikipediaTestDump))
articleCounter := int64(0)
errE := mediawiki.ProcessWikipediaDump(
context.Background(),
&mediawiki.ProcessDumpConfig{
URL: wikipediaTestDump,
Path: dumpPath,
Client: client,
},
func(_ context.Context, a mediawiki.Article) errors.E {
atomic.AddInt64(&articleCounter, int64(1))
b, errE := x.MarshalWithoutEscapeHTML(a)
if errE != nil {
return errors.Wrapf(errE, "cannot marshal json: %+v", a)
}
var c mediawiki.Article
err := json.Unmarshal(b, &c)
if err != nil {
return errors.Wrapf(err, "cannot unmarshal json: %s", string(b))
}
d, err := x.MarshalWithoutEscapeHTML(c)
if err != nil {
return errors.Wrapf(err, "cannot marshal json again: %+v", c)
}
bStr := string(b)
dStr := string(d)
// We have to use JSONEq instead of Equal so that empty slice is equal to nil slice.
assert.JSONEq(t, bStr, dStr)
return nil
},
)
assert.NoError(t, errE)
assert.Equal(t, int64(10), articleCounter)
assert.FileExists(t, dumpPath)
info, err := os.Stat(dumpPath)
require.NoError(t, err)
assert.Equal(t, int64(64819), info.Size())
articleCounter = int64(0)
errE = mediawiki.ProcessWikipediaDump(
context.Background(),
&mediawiki.ProcessDumpConfig{
Path: dumpPath,
},
func(_ context.Context, a mediawiki.Article) errors.E {
atomic.AddInt64(&articleCounter, int64(1))
b, errE := x.MarshalWithoutEscapeHTML(a) //nolint:govet
if errE != nil {
return errors.Wrapf(errE, "cannot marshal json: %+v", a)
}
var c mediawiki.Article
err := json.Unmarshal(b, &c)
if err != nil {
return errors.Wrapf(err, "cannot unmarshal json: %s", string(b))
}
d, err := x.MarshalWithoutEscapeHTML(c)
if err != nil {
return errors.Wrapf(err, "cannot marshal json again: %+v", c)
}
bStr := string(b)
dStr := string(d)
// We have to use JSONEq instead of Equal so that empty slice is equal to nil slice.
assert.JSONEq(t, bStr, dStr)
return nil
},
)
assert.NoError(t, errE)
assert.Equal(t, int64(10), articleCounter)
} | explode_data.jsonl/58093 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 991
} | [
2830,
3393,
7423,
54,
14939,
51056,
98923,
1155,
353,
8840,
836,
8,
341,
25291,
1669,
22683,
480,
1254,
7121,
2959,
741,
25291,
9659,
2201,
31679,
284,
2915,
37833,
22683,
480,
1254,
12750,
11,
4232,
353,
1254,
9659,
11,
22683,
526,
8,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTimeoutHandlerRace(t *testing.T) {
defer afterTest(t)
delayHi := HandlerFunc(func(w ResponseWriter, r *Request) {
ms, _ := strconv.Atoi(r.URL.Path[1:])
if ms == 0 {
ms = 1
}
for i := 0; i < ms; i++ {
w.Write([]byte("hi"))
time.Sleep(time.Millisecond)
}
})
ts := httptest.NewServer(TimeoutHandler(delayHi, 20*time.Millisecond, ""))
defer ts.Close()
var wg sync.WaitGroup
gate := make(chan bool, 10)
n := 50
if testing.Short() {
n = 10
gate = make(chan bool, 3)
}
for i := 0; i < n; i++ {
gate <- true
wg.Add(1)
go func() {
defer wg.Done()
defer func() { <-gate }()
res, err := Get(fmt.Sprintf("%s/%d", ts.URL, rand.Intn(50)))
if err == nil {
io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
}
}()
}
wg.Wait()
} | explode_data.jsonl/22423 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 378
} | [
2830,
3393,
7636,
3050,
55991,
1155,
353,
8840,
836,
8,
341,
16867,
1283,
2271,
1155,
692,
55174,
13048,
1669,
19954,
9626,
18552,
3622,
5949,
6492,
11,
435,
353,
1900,
8,
341,
197,
47691,
11,
716,
1669,
33317,
67107,
2601,
20893,
17474... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestUpdatingCheck(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
filePath := "sub dir/local test"
r.WriteFile(filePath, "content", time.Now())
fd, err := os.Open(path.Join(r.LocalName, filePath))
if err != nil {
t.Fatalf("failed opening file %q: %v", filePath, err)
}
fi, err := fd.Stat()
o := &Object{size: fi.Size(), modTime: fi.ModTime()}
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
hash, err := hash.NewMultiHasherTypes(hash.Supported)
in := localOpenFile{
o: o,
in: wrappedFd,
hash: hash,
fd: fd,
}
buf := make([]byte, 1)
_, err = in.Read(buf)
require.NoError(t, err)
r.WriteFile(filePath, "content updated", time.Now())
_, err = in.Read(buf)
require.Errorf(t, err, "can't copy - source file is being updated")
// turn the checking off and try again
*noCheckUpdated = true
defer func() {
*noCheckUpdated = false
}()
r.WriteFile(filePath, "content updated", time.Now())
_, err = in.Read(buf)
require.NoError(t, err)
} | explode_data.jsonl/40986 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 406
} | [
2830,
3393,
46910,
3973,
1155,
353,
8840,
836,
8,
341,
7000,
1669,
48434,
477,
7121,
6727,
1155,
340,
16867,
435,
991,
977,
1064,
741,
17661,
1820,
1669,
330,
1966,
5419,
22270,
1273,
698,
7000,
4073,
1703,
29605,
11,
330,
1796,
497,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGossipDataCoding(t *testing.T) {
// Check whether encoding and decoding the data is symmetric.
now := utcNow()
cases := []struct {
entries []*pb.MeshSilence
}{
{
entries: []*pb.MeshSilence{
{
Silence: &pb.Silence{
Id: "3be80475-e219-4ee7-b6fc-4b65114e362f",
Matchers: []*pb.Matcher{
{Name: "label1", Pattern: "val1", Type: pb.Matcher_EQUAL},
{Name: "label2", Pattern: "val.+", Type: pb.Matcher_REGEXP},
},
StartsAt: now,
EndsAt: now,
UpdatedAt: now,
},
ExpiresAt: now,
},
{
Silence: &pb.Silence{
Id: "4b1e760d-182c-4980-b873-c1a6827c9817",
Matchers: []*pb.Matcher{
{Name: "label1", Pattern: "val1", Type: pb.Matcher_EQUAL},
},
StartsAt: now.Add(time.Hour),
EndsAt: now.Add(2 * time.Hour),
UpdatedAt: now,
},
ExpiresAt: now.Add(24 * time.Hour),
},
},
},
}
for _, c := range cases {
// Create gossip data from input.
in := gossipData{}
for _, e := range c.entries {
in[e.Silence.Id] = e
}
msg := in.Encode()
require.Equal(t, 1, len(msg), "expected single message for input")
out, err := decodeGossipData(msg[0])
require.NoError(t, err, "decoding message failed")
require.Equal(t, in, out, "decoded data doesn't match encoded data")
}
} | explode_data.jsonl/2696 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 665
} | [
2830,
3393,
38,
41473,
1043,
77513,
1155,
353,
8840,
836,
8,
341,
197,
322,
4248,
3425,
11170,
323,
47116,
279,
821,
374,
54343,
624,
80922,
1669,
69596,
7039,
2822,
1444,
2264,
1669,
3056,
1235,
341,
197,
197,
12940,
29838,
16650,
5015... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestLikeWebDataList(t *testing.T) {
convey.Convey("WebDataList", t, func(ctx convey.C) {
var (
c = context.Background()
vid = int64(36)
offset = int(1)
limit = int(10)
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
list, err := d.WebDataList(c, vid, offset, limit)
ctx.Convey("Then err should be nil.list should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(list, convey.ShouldNotBeNil)
})
})
})
} | explode_data.jsonl/39211 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 220
} | [
2830,
3393,
12949,
5981,
1043,
852,
1155,
353,
8840,
836,
8,
341,
37203,
5617,
4801,
5617,
445,
5981,
1043,
852,
497,
259,
11,
2915,
7502,
20001,
727,
8,
341,
197,
2405,
2399,
298,
1444,
414,
284,
2266,
19047,
741,
298,
197,
1301,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestArgumentFilter(t *testing.T) {
want := UserCommand{
Command: "generate",
Arguments: []string{
"-t=0",
},
}
arguments := []string{
"generate",
"--t=0",
}
command := ArgumentsFilter(arguments)
if command.Command != want.Command {
t.Errorf("Command configuration error: want %s, got %s",
want.Command,
command.Command)
return
}
if len(command.Arguments) == 0 {
t.Errorf("Arguments configuration error")
return
}
if command.Arguments[0] != command.Arguments[0] {
t.Errorf("Arguments configuration error: want %s, got %s", want.Arguments[0], command.Arguments[0])
return
}
} | explode_data.jsonl/30302 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 242
} | [
2830,
3393,
9171,
5632,
1155,
353,
8840,
836,
8,
341,
50780,
1669,
2657,
4062,
515,
197,
97493,
25,
330,
19366,
756,
197,
197,
19139,
25,
3056,
917,
515,
298,
197,
34294,
83,
28,
15,
756,
197,
197,
1583,
197,
630,
197,
16370,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestSeries_Logic(t *testing.T) {
tests := []struct {
series Series
another interface{}
andExpected Series
orExpected Series
}{
{
Bools([]string{"false", "true", "false", "false", "true"}),
"true",
Bools([]string{"false", "true", "false", "false", "true"}),
Bools([]string{"true", "true", "true", "true", "true"}),
},
{
Bools([]string{"false", "true", "false", "false", "true"}),
[]string {"true", "false", "true", "false", "false"},
Bools([]string{"false", "false", "false", "false", "false"}),
Bools([]string{"true", "true", "true", "false", "true"}),
},
{
Bools([]string{"false", "true", "false", "false", "true"}),
Bools([]string{"true", "false", "true", "false", "false"}),
Bools([]string{"false", "false", "false", "false", "false"}),
Bools([]string{"true", "true", "true", "false", "true"}),
},
{
Bools([]string{"false", "true", "false", "false", "true"}),
[]string {"1", "0", "1", "0", "0"},
Bools([]string{"false", "false", "false", "false", "false"}),
Bools([]string{"true", "true", "true", "false", "true"}),
},
{
Bools([]string{"false", "true", "false", "false", "true"}),
[]float64 {1, 0, 1, 0, 0},
Bools([]string{"false", "false", "false", "false", "false"}),
Bools([]string{"true", "true", "true", "false", "true"}),
},
{
Bools([]string{"false", "true", "false", "false", "true"}),
[]int {1, 0, 1, 0, 0},
Bools([]string{"false", "false", "false", "false", "false"}),
Bools([]string{"true", "true", "true", "false", "true"}),
},
}
for testnum, test := range tests {
expected := test.andExpected.Records()
b := test.series.And(test.another)
received := b.Records()
if !reflect.DeepEqual(expected, received) {
t.Errorf(
"Test-And:%v\nExpected:\n%v\nReceived:\n%v",
testnum, expected, received,
)
}
expected = test.orExpected.Records()
b = test.series.Or(test.another)
received = b.Records()
if !reflect.DeepEqual(expected, received) {
t.Errorf(
"Test-Or:%v\nExpected:\n%v\nReceived:\n%v",
testnum, expected, received,
)
}
}
} | explode_data.jsonl/47035 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 921
} | [
2830,
3393,
25544,
62,
26751,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
197,
19880,
981,
11131,
198,
197,
197,
41963,
58915,
1564,
16094,
197,
52477,
18896,
220,
11131,
198,
197,
81166,
18896,
220,
11131,
198,
197... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestIntegration(t *testing.T) {
setupServer := func(responseBody, expectedRequestBody string, statusCode int) *httptest.Server {
t.Helper()
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
defer req.Body.Close()
if expectedRequestBody != "" {
bodyBytes, _ := ioutil.ReadAll(req.Body)
require.Equal(t, expectedRequestBody, strings.TrimSuffix(string(bodyBytes), "\n"))
}
w.WriteHeader(statusCode)
if responseBody != "" {
w.Write([]byte(responseBody))
return
}
w.Write(nil)
}))
}
t.Run("get", func(t *testing.T) {
expectedMessage := "my message"
type Response struct {
Message string `json:"message"`
}
s := setupServer(fmt.Sprintf(`{"message": "%s"}`, expectedMessage), "", 200)
opts := Options{
BaseURL: fmt.Sprintf("%s/api/", s.URL),
}
client, err := New(opts)
require.NoError(t, err, "throws create client")
req, err := client.NewRequest(http.MethodGet, "/my-resource", nil)
require.NoError(t, err, "throws creating request")
response := Response{}
r, err := client.Do(req, &response)
require.NoError(t, err, "throws exec request")
require.Equal(t, Response{
Message: expectedMessage,
}, response)
require.Equal(t, 200, r.StatusCode, "wrong status code")
})
t.Run("post", func(t *testing.T) {
myID := "my id"
type Response struct {
ID string `json:"id"`
}
type RequestBody struct {
Name string `json:"name"`
Description string `json:"description"`
}
expectedName := "my name"
expectedDescription := "my description"
expectedRequestBody := fmt.Sprintf(`{"name":"%s","description":"%s"}`, expectedName, expectedDescription)
s := setupServer(fmt.Sprintf(`{"id": "%s"}`, myID), expectedRequestBody, 200)
opts := Options{
BaseURL: fmt.Sprintf("%s/api/", s.URL),
}
client, err := New(opts)
require.NoError(t, err, "throws create client")
requestBody := RequestBody{
Name: expectedName,
Description: expectedDescription,
}
req, err := client.NewRequest(http.MethodPost, "/my-resource", requestBody)
require.NoError(t, err, "throws creating request")
response := Response{}
r, err := client.Do(req, &response)
require.NoError(t, err, "throws exec request")
require.Equal(t, Response{
ID: myID,
}, response)
require.Equal(t, 200, r.StatusCode, "wrong status code")
})
} | explode_data.jsonl/66014 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 919
} | [
2830,
3393,
52464,
1155,
353,
8840,
836,
8,
341,
84571,
5475,
1669,
2915,
5684,
5444,
11,
3601,
33334,
914,
11,
35532,
526,
8,
353,
96336,
70334,
22997,
341,
197,
3244,
69282,
741,
197,
853,
54320,
70334,
7121,
5475,
19886,
89164,
18552... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestCmd_flagAndArgParsing_Symlink(t *testing.T) {
dir := t.TempDir()
defer testutil.Chdir(t, dir)()
err := os.MkdirAll(filepath.Join(dir, "path", "to", "pkg", "dir"), 0700)
assert.NoError(t, err)
err = os.Symlink(filepath.Join("path", "to", "pkg", "dir"), "foo")
assert.NoError(t, err)
// verify the branch ref is set to the correct value
r := cmddiff.NewRunner(fake.CtxWithDefaultPrinter(), "kpt")
r.C.RunE = NoOpRunE
r.C.SetArgs([]string{"foo" + "@refs/heads/foo"})
err = r.C.Execute()
assert.NoError(t, err)
cwd, err := os.Getwd()
assert.NoError(t, err)
assert.Equal(t, filepath.Join(cwd, "path", "to", "pkg", "dir"), r.Path)
} | explode_data.jsonl/1850 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 284
} | [
2830,
3393,
15613,
10933,
3036,
2735,
68839,
1098,
88,
44243,
1155,
353,
8840,
836,
8,
341,
48532,
1669,
259,
65009,
6184,
741,
16867,
1273,
1314,
6353,
3741,
1155,
11,
5419,
8,
2822,
9859,
1669,
2643,
1321,
12438,
2403,
34793,
22363,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestUnitPostQueryHelperError(t *testing.T) {
sr := &snowflakeRestful{
FuncPost: postTestError,
TokenAccessor: getSimpleTokenAccessor(),
}
var err error
var requestID uuid.UUID
requestID = uuid.New()
_, err = postRestfulQueryHelper(context.Background(), sr, &url.Values{}, make(map[string]string), []byte{0x12, 0x34}, 0, requestID, &Config{})
if err == nil {
t.Fatalf("should have failed to post")
}
sr.FuncPost = postTestAppBadGatewayError
requestID = uuid.New()
_, err = postRestfulQueryHelper(context.Background(), sr, &url.Values{}, make(map[string]string), []byte{0x12, 0x34}, 0, requestID, &Config{})
if err == nil {
t.Fatalf("should have failed to post")
}
sr.FuncPost = postTestSuccessButInvalidJSON
requestID = uuid.New()
_, err = postRestfulQueryHelper(context.Background(), sr, &url.Values{}, make(map[string]string), []byte{0x12, 0x34}, 0, requestID, &Config{})
if err == nil {
t.Fatalf("should have failed to post")
}
} | explode_data.jsonl/44740 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 362
} | [
2830,
3393,
4562,
4133,
2859,
5511,
1454,
1155,
353,
8840,
836,
8,
341,
1903,
81,
1669,
609,
74478,
63456,
12416,
1262,
515,
197,
197,
9626,
4133,
25,
414,
1736,
2271,
1454,
345,
197,
33299,
29889,
25,
633,
16374,
3323,
29889,
3148,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestMockHypervisorCreateSandbox(t *testing.T) {
var m *mockHypervisor
assert := assert.New(t)
sandbox := &Sandbox{
config: &SandboxConfig{
ID: "mock_sandbox",
HypervisorConfig: HypervisorConfig{
KernelPath: "",
ImagePath: "",
HypervisorPath: "",
},
},
}
ctx := context.Background()
// wrong config
err := m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, nil)
assert.Error(err)
sandbox.config.HypervisorConfig = HypervisorConfig{
KernelPath: fmt.Sprintf("%s/%s", testDir, testKernel),
ImagePath: fmt.Sprintf("%s/%s", testDir, testImage),
HypervisorPath: fmt.Sprintf("%s/%s", testDir, testHypervisor),
}
err = m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, nil)
assert.NoError(err)
} | explode_data.jsonl/59366 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 335
} | [
2830,
3393,
11571,
39,
1082,
31396,
4021,
50,
31536,
1155,
353,
8840,
836,
8,
341,
2405,
296,
353,
16712,
39,
1082,
31396,
198,
6948,
1669,
2060,
7121,
1155,
692,
1903,
31536,
1669,
609,
50,
31536,
515,
197,
25873,
25,
609,
50,
31536,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetServiceHostnames(t *testing.T) {
assert := tassert.New(t)
mc := newFakeMeshCatalog()
testCases := []struct {
svc service.MeshService
sameNamespace bool
expected []string
}{
{
tests.BookstoreV1Service,
true,
[]string{
"bookstore-v1",
"bookstore-v1.default",
"bookstore-v1.default.svc",
"bookstore-v1.default.svc.cluster",
"bookstore-v1.default.svc.cluster.local",
"bookstore-v1:8888",
"bookstore-v1.default:8888",
"bookstore-v1.default.svc:8888",
"bookstore-v1.default.svc.cluster:8888",
"bookstore-v1.default.svc.cluster.local:8888",
},
},
{
tests.BookstoreV1Service,
false,
[]string{
"bookstore-v1.default",
"bookstore-v1.default.svc",
"bookstore-v1.default.svc.cluster",
"bookstore-v1.default.svc.cluster.local",
"bookstore-v1.default:8888",
"bookstore-v1.default.svc:8888",
"bookstore-v1.default.svc.cluster:8888",
"bookstore-v1.default.svc.cluster.local:8888",
},
},
}
for _, tc := range testCases {
t.Run(fmt.Sprintf("Testing hostnames for svc %s with sameNamespace=%t", tc.svc, tc.sameNamespace), func(t *testing.T) {
actual, err := mc.getServiceHostnames(tc.svc, tc.sameNamespace)
assert.Nil(err)
assert.ElementsMatch(actual, tc.expected)
})
}
} | explode_data.jsonl/69758 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 628
} | [
2830,
3393,
1949,
1860,
9296,
11400,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
259,
2207,
7121,
1155,
692,
97662,
1669,
501,
52317,
14194,
41606,
2822,
18185,
37302,
1669,
3056,
1235,
341,
197,
1903,
7362,
1843,
2473,
50155,
1860,
198,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetLoadBalancer(t *testing.T) {
testCases := []struct {
desc string
labels map[string]string
expected *types.LoadBalancer
}{
{
desc: "should return nil when no LB labels",
labels: map[string]string{},
expected: nil,
},
{
desc: "should return a struct when labels are set",
labels: map[string]string{
TraefikBackendLoadBalancerMethod: "drr",
TraefikBackendLoadBalancerSticky: "true",
TraefikBackendLoadBalancerStickiness: "true",
TraefikBackendLoadBalancerStickinessCookieName: "foo",
},
expected: &types.LoadBalancer{
Method: "drr",
Sticky: true,
Stickiness: &types.Stickiness{
CookieName: "foo",
},
},
},
{
desc: "should return a nil Stickiness when Stickiness is not set",
labels: map[string]string{
TraefikBackendLoadBalancerMethod: "drr",
TraefikBackendLoadBalancerSticky: "true",
TraefikBackendLoadBalancerStickinessCookieName: "foo",
},
expected: &types.LoadBalancer{
Method: "drr",
Sticky: true,
Stickiness: nil,
},
},
}
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
actual := GetLoadBalancer(test.labels)
assert.Equal(t, test.expected, actual)
})
}
} | explode_data.jsonl/51859 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 628
} | [
2830,
3393,
1949,
5879,
93825,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
41653,
257,
914,
198,
197,
95143,
256,
2415,
14032,
30953,
198,
197,
42400,
353,
9242,
13969,
93825,
198,
197,
59403,
197,
197,
515,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRetryPolicyNoRetries(t *testing.T) {
srv, close := mock.NewServer()
defer close()
srv.AppendResponse(mock.WithStatusCode(http.StatusRequestTimeout))
srv.AppendResponse(mock.WithStatusCode(http.StatusInternalServerError))
srv.AppendResponse()
pl := NewPipeline(srv, NewRetryPolicy(&RetryOptions{MaxRetries: -1}))
req, err := NewRequest(context.Background(), http.MethodGet, srv.URL())
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
resp, err := pl.Do(req)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if resp.StatusCode != http.StatusRequestTimeout {
t.Fatalf("unexpected status code: %d", resp.StatusCode)
}
if r := srv.Requests(); r != 1 {
t.Fatalf("wrong try count, got %d expected %d", r, 1)
}
} | explode_data.jsonl/24381 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 287
} | [
2830,
3393,
51560,
13825,
2753,
12020,
4019,
1155,
353,
8840,
836,
8,
341,
1903,
10553,
11,
3265,
1669,
7860,
7121,
5475,
741,
16867,
3265,
741,
1903,
10553,
8982,
2582,
30389,
26124,
15872,
19886,
10538,
1900,
7636,
1171,
1903,
10553,
89... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestDeleterTempDir(t *testing.T) {
tests := map[string]struct {
expectedFailure bool
path string
}{
"just-tmp": {true, "/tmp"},
"not-tmp": {true, "/nottmp"},
"good-tmp": {false, "/tmp/scratch"},
}
for name, test := range tests {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: test.path}}}}}
plug, _ := plugMgr.FindDeletablePluginBySpec(spec)
deleter, _ := plug.NewDeleter(spec)
err := deleter.Delete()
if err == nil && test.expectedFailure {
t.Errorf("Expected failure for test '%s' but got nil err", name)
}
if err != nil && !test.expectedFailure {
t.Errorf("Unexpected failure for test '%s': %v", name, err)
}
}
} | explode_data.jsonl/66776 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 378
} | [
2830,
3393,
1912,
273,
465,
12151,
6184,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
2415,
14032,
60,
1235,
341,
197,
42400,
17507,
1807,
198,
197,
26781,
310,
914,
198,
197,
59403,
197,
197,
1,
4250,
2385,
1307,
788,
314,
1866,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestRSAPSSSign(t *testing.T) {
var err error
key, _ := ioutil.ReadFile("test/sample_key")
var rsaPSSKey *rsa.PrivateKey
if rsaPSSKey, err = jwt.ParseRSAPrivateKeyFromPEM(key); err != nil {
t.Errorf("Unable to parse RSA private key: %v", err)
}
for _, data := range rsaPSSTestData {
if data.valid {
parts := strings.Split(data.tokenString, ".")
method := jwt.GetSigningMethod(data.alg)
sig, err := method.Sign(strings.Join(parts[0:2], "."), rsaPSSKey)
if err != nil {
t.Errorf("[%v] Error signing token: %v", data.name, err)
}
if sig == parts[2] {
t.Errorf("[%v] Signatures shouldn't match\nnew:\n%v\noriginal:\n%v", data.name, sig, parts[2])
}
}
}
} | explode_data.jsonl/10573 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 308
} | [
2830,
3393,
11451,
2537,
1220,
7264,
1155,
353,
8840,
836,
8,
341,
2405,
1848,
1465,
271,
23634,
11,
716,
1669,
43144,
78976,
445,
1944,
69851,
3097,
1138,
2405,
68570,
47,
1220,
1592,
353,
60869,
87738,
1592,
198,
743,
68570,
47,
1220,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestThumb_FromFile(t *testing.T) {
conf := config.TestConfig()
thumbsPath := conf.CachePath() + "/_tmp"
defer os.RemoveAll(thumbsPath)
if err := conf.CreateDirectories(); err != nil {
t.Error(err)
}
t.Run("valid parameter", func(t *testing.T) {
fileModel := &entity.File{
FileName: conf.ExamplesPath() + "/elephants.jpg",
FileHash: "1234568889",
}
thumbnail, err := thumb.FromFile(fileModel.FileName, fileModel.FileHash, thumbsPath, 224, 224)
assert.Nil(t, err)
assert.FileExists(t, thumbnail)
})
t.Run("hash too short", func(t *testing.T) {
fileModel := &entity.File{
FileName: conf.ExamplesPath() + "/elephants.jpg",
FileHash: "123",
}
_, err := thumb.FromFile(fileModel.FileName, fileModel.FileHash, thumbsPath, 224, 224)
if err == nil {
t.Fatal("err should NOT be nil")
}
assert.Equal(t, "resample: file hash is empty or too short (123)", err.Error())
})
t.Run("filename too short", func(t *testing.T) {
fileModel := &entity.File{
FileName: "xxx",
FileHash: "12367890",
}
_, err := thumb.FromFile(fileModel.FileName, fileModel.FileHash, thumbsPath, 224, 224)
if err == nil {
t.FailNow()
}
assert.Equal(t, "resample: image filename is empty or too short (xxx)", err.Error())
})
} | explode_data.jsonl/1812 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 501
} | [
2830,
3393,
62699,
53157,
1703,
1155,
353,
8840,
836,
8,
341,
67850,
1669,
2193,
8787,
2648,
2822,
70479,
15775,
1820,
1669,
2335,
46130,
1820,
368,
488,
3521,
62,
5173,
1837,
16867,
2643,
84427,
24365,
15775,
1820,
692,
743,
1848,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIsReady(t *testing.T) {
cases := []struct {
name string
status PodAutoscalerStatus
isReady bool
}{{
name: "empty status should not be ready",
status: PodAutoscalerStatus{},
isReady: false,
}, {
name: "Different condition type should not be ready",
status: PodAutoscalerStatus{
Status: duckv1.Status{
Conditions: duckv1.Conditions{{
Type: PodAutoscalerConditionActive,
Status: corev1.ConditionTrue,
}},
},
},
isReady: false,
}, {
name: "False condition status should not be ready",
status: PodAutoscalerStatus{
Status: duckv1.Status{
Conditions: duckv1.Conditions{{
Type: PodAutoscalerConditionReady,
Status: corev1.ConditionFalse,
}},
},
},
isReady: false,
}, {
name: "Unknown condition status should not be ready",
status: PodAutoscalerStatus{
Status: duckv1.Status{
Conditions: duckv1.Conditions{{
Type: PodAutoscalerConditionReady,
Status: corev1.ConditionUnknown,
}},
},
},
isReady: false,
}, {
name: "Missing condition status should not be ready",
status: PodAutoscalerStatus{
Status: duckv1.Status{
Conditions: duckv1.Conditions{{
Type: PodAutoscalerConditionReady,
}},
},
},
isReady: false,
}, {
name: "True condition status should be ready",
status: PodAutoscalerStatus{
Status: duckv1.Status{
Conditions: duckv1.Conditions{{
Type: PodAutoscalerConditionReady,
Status: corev1.ConditionTrue,
}},
},
},
isReady: true,
}, {
name: "Multiple conditions with ready status should be ready",
status: PodAutoscalerStatus{
Status: duckv1.Status{
Conditions: duckv1.Conditions{{
Type: PodAutoscalerConditionActive,
Status: corev1.ConditionTrue,
}, {
Type: PodAutoscalerConditionReady,
Status: corev1.ConditionTrue,
}},
},
},
isReady: true,
}, {
name: "Multiple conditions with ready status false should not be ready",
status: PodAutoscalerStatus{
Status: duckv1.Status{
Conditions: duckv1.Conditions{{
Type: PodAutoscalerConditionActive,
Status: corev1.ConditionTrue,
}, {
Type: PodAutoscalerConditionReady,
Status: corev1.ConditionFalse,
}},
},
},
isReady: false,
}}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
pa := PodAutoscaler{Status: tc.status}
if got, want := pa.IsReady(), tc.isReady; got != want {
t.Errorf("IsReady = %v, want: %v", got, want)
}
})
}
} | explode_data.jsonl/27226 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1063
} | [
2830,
3393,
3872,
19202,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
3056,
1235,
341,
197,
11609,
262,
914,
198,
197,
23847,
220,
16821,
19602,
436,
63084,
2522,
198,
197,
19907,
19202,
1807,
198,
197,
15170,
515,
197,
11609,
25,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestHandlersCanSetConnectionClose11(t *testing.T) {
testTCPConnectionCloses(t, "GET / HTTP/1.1\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Connection", "close")
}))
} | explode_data.jsonl/22401 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 80
} | [
2830,
3393,
39949,
6713,
1649,
4526,
7925,
16,
16,
1155,
353,
8840,
836,
8,
341,
18185,
49896,
4526,
34,
49341,
1155,
11,
330,
3806,
608,
10130,
14,
16,
13,
16,
12016,
1699,
12016,
1699,
497,
19954,
9626,
18552,
3622,
5949,
6492,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestCompatibility(t *testing.T) {
scheme := runtime.NewScheme()
for _, builder := range groups {
require.NoError(t, builder.AddToScheme(scheme))
}
roundtrip.NewCompatibilityTestOptions(scheme).Complete(t).Run(t)
} | explode_data.jsonl/28219 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 80
} | [
2830,
3393,
85880,
1155,
353,
8840,
836,
8,
341,
1903,
8058,
1669,
15592,
7121,
28906,
741,
2023,
8358,
7363,
1669,
2088,
5203,
341,
197,
17957,
35699,
1155,
11,
7363,
1904,
1249,
28906,
1141,
8058,
1171,
197,
532,
197,
1049,
32981,
712... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestLBServiceConversion(t *testing.T) {
serviceName := "service1"
namespace := "default"
addresses := []coreV1.LoadBalancerIngress{
{
IP: "127.68.32.112",
},
{
IP: "127.68.32.113",
},
{
Hostname: "127.68.32.114",
},
{
Hostname: "127.68.32.115",
},
}
extSvc := coreV1.Service{
ObjectMeta: metaV1.ObjectMeta{
Name: serviceName,
Namespace: namespace,
},
Spec: coreV1.ServiceSpec{
Ports: []coreV1.ServicePort{
{
Name: "http",
Port: 80,
Protocol: coreV1.ProtocolTCP,
},
},
Type: coreV1.ServiceTypeLoadBalancer,
},
Status: coreV1.ServiceStatus{
LoadBalancer: coreV1.LoadBalancerStatus{
Ingress: addresses,
},
},
}
service := ConvertService(extSvc, domainSuffix, clusterID)
if service == nil {
t.Fatalf("could not convert external service")
}
gotAddresses := service.Attributes.ClusterExternalAddresses.GetAddressesFor(clusterID)
if len(gotAddresses) == 0 {
t.Fatalf("no load balancer addresses found")
}
for i, addr := range addresses {
var want string
if len(addr.IP) > 0 {
want = addr.IP
} else {
want = addr.Hostname
}
got := gotAddresses[i]
if got != want {
t.Fatalf("Expected address %s but got %s", want, got)
}
}
} | explode_data.jsonl/73808 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 567
} | [
2830,
3393,
34068,
1860,
48237,
1155,
353,
8840,
836,
8,
341,
52934,
675,
1669,
330,
7936,
16,
698,
56623,
1669,
330,
2258,
1837,
197,
53789,
1669,
3056,
2153,
53,
16,
13969,
93825,
641,
2483,
515,
197,
197,
515,
298,
197,
3298,
25,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestStateLock_POLSafety1(t *testing.T) {
config := configSetup(t)
logger := log.NewNopLogger()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cs1, vss := makeState(ctx, t, makeStateArgs{config: config, logger: logger})
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
height, round := cs1.Height, cs1.Round
partSize := types.BlockPartSizeBytes
proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal)
timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose)
timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait)
newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound)
pv1, err := cs1.privValidator.GetPubKey(ctx)
require.NoError(t, err)
addr := pv1.Address()
voteCh := subscribeToVoter(ctx, t, cs1, addr)
// start round and wait for propose and prevote
startTestRound(ctx, cs1, cs1.Height, round)
ensureNewRound(t, newRoundCh, height, round)
ensureNewProposal(t, proposalCh, height, round)
rs := cs1.GetRoundState()
propBlock := rs.ProposalBlock
ensurePrevoteMatch(t, voteCh, height, round, propBlock.Hash())
partSet, err := propBlock.MakePartSet(partSize)
require.NoError(t, err)
blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: partSet.Header()}
// the others sign a polka but we don't see it
prevotes := signVotes(ctx, t, tmproto.PrevoteType, config.ChainID(),
blockID,
vs2, vs3, vs4)
// we do see them precommit nil
signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4)
// cs1 precommit nil
ensurePrecommit(t, voteCh, height, round)
ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds())
incrementRound(vs2, vs3, vs4)
round++ // moving to the next round
cs2 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication())
prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round)
propBlockParts, err := propBlock.MakePartSet(partSize)
require.NoError(t, err)
r2BlockID := types.BlockID{
Hash: propBlock.Hash(),
PartSetHeader: propBlockParts.Header(),
}
ensureNewRound(t, newRoundCh, height, round)
//XXX: this isnt guaranteed to get there before the timeoutPropose ...
err = cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "some peer")
require.NoError(t, err)
/*Round2
// we timeout and prevote our lock
// a polka happened but we didn't see it!
*/
ensureNewProposal(t, proposalCh, height, round)
rs = cs1.GetRoundState()
require.Nil(t, rs.LockedBlock, "we should not be locked!")
// go to prevote, prevote for proposal block
ensurePrevoteMatch(t, voteCh, height, round, r2BlockID.Hash)
// now we see the others prevote for it, so we should lock on it
signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), r2BlockID, vs2, vs3, vs4)
ensurePrecommit(t, voteCh, height, round)
// we should have precommitted
validatePrecommit(ctx, t, cs1, round, round, vss[0], r2BlockID.Hash, r2BlockID.Hash)
signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4)
ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds())
incrementRound(vs2, vs3, vs4)
round++ // moving to the next round
ensureNewRound(t, newRoundCh, height, round)
/*Round3
we see the polka from round 1 but we shouldn't unlock!
*/
// timeout of propose
ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.proposeTimeout(round).Nanoseconds())
// finish prevote
ensurePrevoteMatch(t, voteCh, height, round, nil)
newStepCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRoundStep)
// before prevotes from the previous round are added
// add prevotes from the earlier round
addVotes(cs1, prevotes...)
ensureNoNewRoundStep(t, newStepCh)
} | explode_data.jsonl/54272 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1392
} | [
2830,
3393,
1397,
11989,
33165,
73037,
16,
1155,
353,
8840,
836,
8,
341,
25873,
1669,
2193,
21821,
1155,
340,
17060,
1669,
1487,
7121,
45,
453,
7395,
741,
20985,
11,
9121,
1669,
2266,
26124,
9269,
5378,
19047,
2398,
16867,
9121,
2822,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMatchOutputsNothingGivenEmptyInput(t *testing.T) {
t.Parallel()
got, err := script.NewPipe().Match("anything").String()
if err != nil {
t.Fatal(err)
}
if got != "" {
t.Error("want no output given empty input")
}
} | explode_data.jsonl/51494 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 88
} | [
2830,
3393,
8331,
61438,
23780,
22043,
3522,
2505,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
3174,
354,
11,
1848,
1669,
5316,
7121,
34077,
1005,
8331,
445,
72154,
1827,
703,
741,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestLocalhost(t *testing.T) {
config := getConfig()
config["socket.include_localhost"] = true
ms := mbtest.NewReportingMetricSetV2(t, config)
// Consume first set of events - list of all currently open sockets
events, errs := mbtest.ReportingFetchV2(ms)
if errs != nil {
t.Fatal("fetch", errs)
}
ln, err := net.Listen("tcp4", "127.0.0.1:")
if err != nil {
t.Fatal(err)
}
defer ln.Close()
listenerPort := getPort(t, ln.Addr())
events, errs = mbtest.ReportingFetchV2(ms)
if len(errs) > 0 {
t.Fatalf("received error: %+v", errs[0])
}
if len(events) == 0 {
t.Fatal("no events were generated")
}
var event *mb.Event
for _, evt := range events {
destinationPort, err := evt.RootFields.GetValue("destination.port")
if assert.NoError(t, err) {
if destinationPort == listenerPort {
event = &evt
break
}
}
}
if event == nil {
t.Fatal("socket not found")
}
checkFieldValue(t, event.RootFields, "event.action", eventActionSocketOpened.String())
checkFieldValue(t, event.RootFields, "process.pid", os.Getpid())
checkFieldValue(t, event.RootFields, "process.name", "socket.test")
checkFieldValue(t, event.RootFields, "user.id", os.Geteuid())
checkFieldValue(t, event.RootFields, "network.direction", sock.Listening.String())
checkFieldValue(t, event.RootFields, "network.transport", "tcp")
checkFieldValue(t, event.RootFields, "destination.ip", "127.0.0.1")
} | explode_data.jsonl/37030 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 559
} | [
2830,
3393,
7319,
3790,
1155,
353,
8840,
836,
8,
341,
25873,
1669,
66763,
741,
25873,
1183,
9556,
29645,
62,
8301,
1341,
284,
830,
271,
47691,
1669,
10016,
1944,
7121,
70131,
54310,
1649,
53,
17,
1155,
11,
2193,
692,
197,
322,
1200,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func TestChatSrvTeamChannelNameMentions(t *testing.T) {
runWithMemberTypes(t, func(mt chat1.ConversationMembersType) {
// Only run this test for teams
switch mt {
case chat1.ConversationMembersType_TEAM:
default:
return
}
ctc := makeChatTestContext(t, "TestChatSrvTeamChannelNameMentions", 2)
defer ctc.cleanup()
users := ctc.users()
ctx := ctc.as(t, users[0]).startCtx
ctx1 := ctc.as(t, users[1]).startCtx
listener0 := newServerChatListener()
ctc.as(t, users[0]).h.G().NotifyRouter.AddListener(listener0)
listener1 := newServerChatListener()
ctc.as(t, users[1]).h.G().NotifyRouter.AddListener(listener1)
conv := mustCreateConversationForTest(t, ctc, users[0], chat1.TopicType_CHAT, mt,
ctc.as(t, users[1]).user())
consumeNewConversation(t, listener0, conv.Id)
consumeNewConversation(t, listener1, conv.Id)
topicNames := []string{"miketime", "random", "hi"}
for index, topicName := range topicNames {
channel, err := ctc.as(t, users[1]).chatLocalHandler().NewConversationLocal(ctx,
chat1.NewConversationLocalArg{
TlfName: conv.TlfName,
TopicName: &topicName,
TopicType: chat1.TopicType_CHAT,
TlfVisibility: keybase1.TLFVisibility_PRIVATE,
MembersType: chat1.ConversationMembersType_TEAM,
})
t.Logf("conv: %s chan: %s, err: %v", conv.Id, channel.Conv.GetConvID(), err)
require.NoError(t, err)
assertNoNewConversation(t, listener0)
consumeNewConversation(t, listener1, channel.Conv.GetConvID())
consumeNewMsgRemote(t, listener1, chat1.MessageType_JOIN)
if index == 0 {
consumeNewMsgRemote(t, listener0, chat1.MessageType_SYSTEM)
consumeNewMsgRemote(t, listener1, chat1.MessageType_SYSTEM)
}
_, err = ctc.as(t, users[0]).chatLocalHandler().JoinConversationByIDLocal(ctx1,
channel.Conv.GetConvID())
require.NoError(t, err)
consumeNewMsgRemote(t, listener0, chat1.MessageType_JOIN)
consumeNewMsgRemote(t, listener1, chat1.MessageType_JOIN)
_, err = ctc.as(t, users[1]).chatLocalHandler().PostLocal(ctx1, chat1.PostLocalArg{
ConversationID: channel.Conv.GetConvID(),
Msg: chat1.MessagePlaintext{
ClientHeader: chat1.MessageClientHeader{
Conv: channel.Conv.Info.Triple,
MessageType: chat1.MessageType_TEXT,
TlfName: channel.Conv.Info.TlfName,
},
MessageBody: chat1.NewMessageBodyWithText(chat1.MessageText{
Body: fmt.Sprintf("The worst channel is #%s. #error", topicName),
}),
},
})
require.NoError(t, err)
consumeNewMsgRemote(t, listener0, chat1.MessageType_TEXT)
consumeNewMsgRemote(t, listener1, chat1.MessageType_TEXT)
tv, err := ctc.as(t, users[0]).chatLocalHandler().GetThreadLocal(ctx, chat1.GetThreadLocalArg{
ConversationID: channel.Conv.GetConvID(),
Query: &chat1.GetThreadQuery{
MessageTypes: []chat1.MessageType{chat1.MessageType_TEXT},
},
})
require.NoError(t, err)
uid := users[0].User.GetUID().ToBytes()
ptv := utils.PresentThreadView(ctx, ctc.as(t, users[0]).h.G(), uid, tv.Thread,
channel.Conv.GetConvID())
require.Equal(t, 1, len(ptv.Messages))
require.Equal(t, 1, len(ptv.Messages[0].Valid().ChannelNameMentions))
require.Equal(t, topicName, ptv.Messages[0].Valid().ChannelNameMentions[0].Name)
}
})
} | explode_data.jsonl/63720 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1412
} | [
2830,
3393,
15672,
50,
10553,
14597,
9629,
675,
44,
63701,
1155,
353,
8840,
836,
8,
341,
56742,
2354,
9366,
4173,
1155,
11,
2915,
81618,
6236,
16,
4801,
22323,
24371,
929,
8,
341,
197,
197,
322,
8278,
1598,
419,
1273,
369,
7263,
198,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestRelativePaths(t *testing.T) {
dockerfile := "Dockerfile_relative_copy"
t.Run("test_relative_"+dockerfile, func(t *testing.T) {
t.Parallel()
dockerfile = filepath.Join("./dockerfiles", dockerfile)
contextPath := "./context"
err := imageBuilder.buildRelativePathsImage(
config.imageRepo,
dockerfile,
config.serviceAccount,
contextPath,
)
if err != nil {
t.Fatal(err)
}
dockerImage := GetDockerImage(config.imageRepo, "test_relative_"+dockerfile)
kanikoImage := GetKanikoImage(config.imageRepo, "test_relative_"+dockerfile)
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
checkContainerDiffOutput(t, diff, expected)
})
} | explode_data.jsonl/672 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 294
} | [
2830,
3393,
28442,
26901,
1155,
353,
8840,
836,
8,
1476,
2698,
13659,
1192,
1669,
330,
35,
13659,
1192,
29286,
16096,
1837,
3244,
16708,
445,
1944,
29286,
33415,
28648,
1192,
11,
2915,
1155,
353,
8840,
836,
8,
341,
197,
3244,
41288,
795... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestK8SServiceEnvVarConfigMapsSecretsEmptyDoesNotAddEnvVarsAndVolumesToPod(t *testing.T) {
t.Parallel()
deployment := renderK8SServiceDeploymentWithSetValues(t, map[string]string{})
// Verify that there is only one container
renderedPodContainers := deployment.Spec.Template.Spec.Containers
require.Equal(t, len(renderedPodContainers), 1)
appContainer := renderedPodContainers[0]
// ... and that there are no environments
environments := appContainer.Env
assert.Equal(t, len(environments), 0)
// ... or volumes configured
renderedPodVolumes := deployment.Spec.Template.Spec.Volumes
assert.Equal(t, len(renderedPodVolumes), 0)
} | explode_data.jsonl/59752 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 206
} | [
2830,
3393,
42,
23,
1220,
1017,
14359,
3962,
2648,
36562,
19773,
82,
3522,
21468,
2623,
2212,
14359,
28305,
3036,
96325,
1249,
23527,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
197,
82213,
1669,
3141,
42,
23,
1220,
1017,
752... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStrSlice(t *testing.T) {
val := string("hello")
m := map[string]interface{}{"value": []string{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").StrSlice()[0])
assert.Equal(t, val, New(m).Get("value").MustStrSlice()[0])
assert.Equal(t, []string(nil), New(m).Get("nothing").StrSlice())
assert.Equal(t, val, New(m).Get("nothing").StrSlice([]string{string("hello")})[0])
assert.Panics(t, func() {
New(m).Get("nothing").MustStrSlice()
})
} | explode_data.jsonl/23409 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 192
} | [
2830,
3393,
2580,
33236,
1155,
353,
8840,
836,
8,
1476,
19302,
1669,
914,
445,
14990,
1138,
2109,
1669,
2415,
14032,
31344,
6257,
4913,
957,
788,
3056,
917,
90,
831,
2137,
330,
41212,
788,
2092,
532,
6948,
12808,
1155,
11,
1044,
11,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTPRKinds(t *testing.T) {
var serviceInstanceSample = v1beta1.ThirdPartyResource{
TypeMeta: metav1.TypeMeta{
Kind: "ThirdPartyResource",
APIVersion: "v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: withGroupName("service-instance"),
},
Versions: []v1beta1.APIVersion{
{Name: "v1alpha1"},
},
}
var serviceBrokerSample = v1beta1.ThirdPartyResource{
TypeMeta: metav1.TypeMeta{
Kind: "ThirdPartyResource",
APIVersion: "v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: withGroupName("service-broker"),
},
Versions: []v1beta1.APIVersion{
{Name: "v1alpha1"},
},
}
var serviceClassSample = v1beta1.ThirdPartyResource{
TypeMeta: metav1.TypeMeta{
Kind: "ThirdPartyResource",
APIVersion: "v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: withGroupName("service-class"),
},
Versions: []v1beta1.APIVersion{
{Name: "v1alpha1"},
},
}
var serviceInstanceCredentialSample = v1beta1.ThirdPartyResource{
TypeMeta: metav1.TypeMeta{
Kind: "ThirdPartyResource",
APIVersion: "v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: withGroupName("service-instance-credential"),
},
Versions: []v1beta1.APIVersion{
{Name: "v1alpha1"},
},
}
if !reflect.DeepEqual(serviceInstanceSample, serviceInstanceTPR) {
t.Errorf("Unexpected ServiceInstance TPR structure")
}
if !reflect.DeepEqual(serviceInstanceCredentialSample, serviceInstanceCredentialTPR) {
t.Errorf("Unexpected ServiceBroker TPR structure")
}
if !reflect.DeepEqual(serviceBrokerSample, serviceBrokerTPR) {
t.Errorf("Unexpected ServiceInstanceCredential TPR structure")
}
if !reflect.DeepEqual(serviceClassSample, serviceClassTPR) {
t.Errorf("Unexpected Service Class TPR structure")
}
} | explode_data.jsonl/71773 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 713
} | [
2830,
3393,
4239,
77020,
8673,
1155,
353,
8840,
836,
8,
341,
2405,
2473,
2523,
17571,
284,
348,
16,
19127,
16,
5111,
2603,
37361,
4783,
515,
197,
27725,
12175,
25,
77520,
16,
10184,
12175,
515,
298,
197,
10629,
25,
981,
330,
36975,
37... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestCopyObjectWithSpecialChars(t *testing.T) {
ts := newTestServer(t)
defer ts.Close()
svc := ts.s3Client()
srcMeta := map[string]string{
"Content-Type": "text/plain",
}
srcKey := "src+key,with special;chars!?="
content := "contents"
ts.backendPutString(defaultBucket, srcKey, srcMeta, content)
copySource := "/" + defaultBucket + "/" + url.QueryEscape(srcKey)
_, err := svc.CopyObject(&s3.CopyObjectInput{
Bucket: aws.String(defaultBucket),
Key: aws.String("dst-key"),
CopySource: aws.String(copySource),
})
ts.OK(err)
obj, err := svc.GetObject(&s3.GetObjectInput{
Bucket: aws.String(defaultBucket),
Key: aws.String(srcKey),
})
if err != nil {
t.Fatalf("object not found with key %v", srcKey)
}
objContent, err := ioutil.ReadAll(obj.Body)
ts.OK(err)
if !bytes.Equal([]byte(content), objContent) {
ts.Fatalf("object contents are different %v!=%v", content, objContent)
}
} | explode_data.jsonl/22257 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 382
} | [
2830,
3393,
12106,
1190,
2354,
20366,
32516,
1155,
353,
8840,
836,
8,
341,
57441,
1669,
501,
2271,
5475,
1155,
340,
16867,
10591,
10421,
741,
1903,
7362,
1669,
10591,
514,
18,
2959,
2822,
41144,
12175,
1669,
2415,
14032,
30953,
515,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestAddStaticRouteDoesNotMatchForInvalidMethod(t *testing.T) {
r := NewRouter()
r.AddStaticRoute("test", http.MethodGet, "/test", dummyHandler)
req := events.APIGatewayProxyRequest{
Path: "/test",
HTTPMethod: http.MethodPost,
}
assert.False(t, r.Routes["test"].Match(req), "Expected static route matching to return false")
} | explode_data.jsonl/4487 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 125
} | [
2830,
3393,
2212,
11690,
4899,
21468,
2623,
8331,
2461,
7928,
3523,
1155,
353,
8840,
836,
8,
341,
7000,
1669,
1532,
9523,
741,
7000,
1904,
11690,
4899,
445,
1944,
497,
1758,
20798,
1949,
11,
3521,
1944,
497,
17292,
3050,
692,
24395,
166... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPullerDataNotAvailable(t *testing.T) {
// Scenario: p1 pulls from p2 and not from p3
// but the data in p2 doesn't exist
gn := &gossipNetwork{}
policyStore := newCollectionStore().withPolicy("col1", uint64(100)).thatMapsTo("p2")
factoryMock := &mocks.CollectionAccessFactory{}
factoryMock.On("AccessPolicy", mock.Anything, mock.Anything).Return(&mocks.CollectionAccessPolicy{}, nil)
p1 := gn.newPuller("p1", policyStore, factoryMock, membership(peerData{"p2", uint64(1)}, peerData{"p3", uint64(1)})...)
policyStore = newCollectionStore().withPolicy("col1", uint64(100)).thatMapsTo("p1")
p2 := gn.newPuller("p2", policyStore, factoryMock)
dig := &proto.PvtDataDigest{
TxId: "txID1",
Collection: "col1",
Namespace: "ns1",
}
store := Dig2PvtRWSetWithConfig{
privdatacommon.DigKey{
TxId: "txID1",
Collection: "col1",
Namespace: "ns1",
}: &util.PrivateRWSetWithConfig{
RWSet: []util.PrivateRWSet{},
},
}
p2.PrivateDataRetriever.(*dataRetrieverMock).On("CollectionRWSet", mock.MatchedBy(protoMatcher(dig)), mock.Anything).Return(store, true, nil)
p3 := gn.newPuller("p3", newCollectionStore(), factoryMock)
p3.PrivateDataRetriever.(*dataRetrieverMock).On("CollectionRWSet", mock.MatchedBy(protoMatcher(dig)), mock.Anything).Run(func(_ mock.Arguments) {
t.Fatal("p3 shouldn't have been selected for pull")
})
dasf := &digestsAndSourceFactory{}
fetchedMessages, err := p1.fetch(dasf.mapDigest(toDigKey(dig)).toSources().create())
assert.Empty(t, fetchedMessages.AvailableElements)
assert.NoError(t, err)
} | explode_data.jsonl/53261 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 601
} | [
2830,
3393,
36068,
261,
1043,
2623,
16485,
1155,
353,
8840,
836,
8,
341,
197,
322,
58663,
25,
281,
16,
33045,
504,
281,
17,
323,
537,
504,
281,
18,
198,
197,
322,
714,
279,
821,
304,
281,
17,
3171,
944,
3000,
198,
3174,
77,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_All(t *testing.T) {
// key := crypto.GenerateKey()
// transaction := NewTransaction([]byte(key.PublicKey), nil, []byte(tool.RandomString(100)))
// // 模拟难度
// prefixMatch := tool.GenerateBytes(1, 0)
// transaction.Header.Nonce = transaction.GenerateNonce(prefixMatch)
// transaction.Signature = transaction.Sign(key.PrivateKey)
// data, err := transaction.MarshalBinary()
// if err != nil {
// t.Error(err)
// }
// // 从data还原回transaction
// tr2 := &Transaction{}
// _, err = tr2.UnmarshalBinary(data)
// if err != nil {
// t.Error(err)
// }
// // 对比是否一致
// if !reflect.DeepEqual(tr2.Signature, transaction.Signature) ||
// !reflect.DeepEqual(tr2.Payload, transaction.Payload) ||
// !reflect.DeepEqual(tr2.Header.From, transaction.Header.From) ||
// !reflect.DeepEqual(tr2.Header.To, transaction.Header.To) ||
// tr2.Header.Nonce != transaction.Header.Nonce ||
// tr2.Header.Timestamp != transaction.Header.Timestamp ||
// !reflect.DeepEqual(tr2.Header.PayloadHash, transaction.Header.PayloadHash) ||
// tr2.Header.PayloadLen != transaction.Header.PayloadLen {
// fmt.Println(reflect.DeepEqual(tr2.Signature, transaction.Signature))
// fmt.Println(reflect.DeepEqual(tr2.Payload, transaction.Payload))
// fmt.Println(reflect.DeepEqual(tr2.Header.From, transaction.Header.From))
// fmt.Println(reflect.DeepEqual(tr2.Header.To, transaction.Header.To))
// fmt.Println(tr2.Header.Nonce != transaction.Header.Nonce)
// fmt.Println(tr2.Header.Timestamp != transaction.Header.Timestamp)
// fmt.Println(tr2.Header.Timestamp)
// fmt.Println(transaction.Header.Timestamp)
// fmt.Println("tr2.Signature: " + string(tr2.Signature))
// fmt.Println("transaction.Signature: " + string(transaction.Signature))
// t.Error("error")
// }
// // 检测交易是否合法
// verifyResult := transaction.VerifyTransaction(prefixMatch)
// if !verifyResult {
// t.Error("verify failed")
// }
} | explode_data.jsonl/38088 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 770
} | [
2830,
3393,
53629,
1155,
353,
8840,
836,
8,
341,
197,
322,
1376,
1669,
19028,
57582,
1592,
741,
197,
322,
7745,
1669,
1532,
8070,
10556,
3782,
4857,
49139,
1592,
701,
2092,
11,
3056,
3782,
48950,
26709,
703,
7,
16,
15,
15,
19235,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetHostsWithFilter(t *testing.T) {
filterHostIP := net.ParseIP("127.0.0.3")
cluster := createCluster()
// Filter to remove one of the localhost nodes
cluster.HostFilter = HostFilterFunc(func(host *HostInfo) bool {
if host.ConnectAddress().Equal(filterHostIP) {
return false
}
return true
})
session := createSessionFromCluster(cluster, t)
hosts, partitioner, err := session.hostSource.GetHosts()
assertTrue(t, "err == nil", err == nil)
assertTrue(t, "len(hosts) == 2", len(hosts) == 2)
assertTrue(t, "len(partitioner) != 0", len(partitioner) != 0)
for _, host := range hosts {
if host.ConnectAddress().Equal(filterHostIP) {
t.Fatal(fmt.Sprintf("Did not expect to see '%q' in host list", filterHostIP))
}
}
} | explode_data.jsonl/37535 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 281
} | [
2830,
3393,
1949,
9296,
16056,
5632,
1155,
353,
8840,
836,
8,
341,
50108,
9296,
3298,
1669,
4179,
8937,
3298,
445,
16,
17,
22,
13,
15,
13,
15,
13,
18,
1138,
197,
18855,
1669,
1855,
28678,
2822,
197,
322,
12339,
311,
4057,
825,
315,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestVectorJSON(t *testing.T) {
input := []struct {
plain string
value Vector
}{
{
plain: `[]`,
value: Vector{},
},
{
plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}]`,
value: Vector{&Sample{
Metric: Metric{
MetricNameLabel: "test_metric",
},
Value: 123.1,
Timestamp: 1234567,
}},
},
{
plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]},{"metric":{"foo":"bar"},"value":[1.234,"+Inf"]}]`,
value: Vector{
&Sample{
Metric: Metric{
MetricNameLabel: "test_metric",
},
Value: 123.1,
Timestamp: 1234567,
},
&Sample{
Metric: Metric{
"foo": "bar",
},
Value: SampleValue(math.Inf(1)),
Timestamp: 1234,
},
},
},
}
for _, test := range input {
b, err := json.Marshal(test.value)
if err != nil {
t.Error(err)
continue
}
if string(b) != test.plain {
t.Errorf("encoding error: expected %q, got %q", test.plain, b)
continue
}
var vec Vector
err = json.Unmarshal(b, &vec)
if err != nil {
t.Error(err)
continue
}
if !reflect.DeepEqual(vec, test.value) {
t.Errorf("decoding error: expected %v, got %v", test.value, vec)
}
}
} | explode_data.jsonl/45164 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 622
} | [
2830,
3393,
3781,
5370,
1155,
353,
8840,
836,
8,
341,
22427,
1669,
3056,
1235,
341,
197,
197,
20772,
914,
198,
197,
16309,
4196,
198,
197,
59403,
197,
197,
515,
298,
197,
20772,
25,
1565,
1294,
12892,
298,
16309,
25,
4196,
38837,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestLocationAreaByName(t *testing.T) {
result, _ := pokeapi.LocationArea("canalave-city-area")
assert.Equal(t, "canalave-city-area", result.Name,
"Expect to receive Canalave City area.")
} | explode_data.jsonl/63736 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 71
} | [
2830,
3393,
4707,
8726,
16898,
1155,
353,
8840,
836,
8,
341,
9559,
11,
716,
1669,
51551,
2068,
4515,
8726,
445,
4814,
278,
523,
53329,
29022,
1138,
6948,
12808,
1155,
11,
330,
4814,
278,
523,
53329,
29022,
497,
1102,
2967,
345,
197,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestEnvironmentVpcUpserter(t *testing.T) {
assert := assert.New(t)
workflow := new(environmentWorkflow)
workflow.environment = &common.Environment{
Name: "foo",
}
workflow.environment.Cluster.KeyName = "mykey"
vpcInputParams := make(map[string]string)
stackManager := new(mockedStackManagerForUpsert)
stackManager.On("AwaitFinalStatus", "mu-vpc-foo").Return(&common.Stack{Status: common.StackStatusCreateComplete})
stackManager.On("UpsertStack", "mu-vpc-foo", mock.AnythingOfType("map[string]string")).Return(nil)
stackManager.On("FindLatestImageID").Return("ami-00000", nil)
stackManager.On("CountAZs").Return(3)
err := workflow.environmentVpcUpserter("mu", vpcInputParams, vpcInputParams, stackManager, stackManager, stackManager, stackManager)()
assert.Nil(err)
assert.Equal("mu-vpc-foo-VpcId", vpcInputParams["VpcId"])
assert.Equal("mu-vpc-foo-InstanceSubnetIds", vpcInputParams["InstanceSubnetIds"])
stackManager.AssertExpectations(t)
stackManager.AssertNumberOfCalls(t, "AwaitFinalStatus", 1)
stackManager.AssertNumberOfCalls(t, "UpsertStack", 1)
stackManager.AssertNumberOfCalls(t, "FindLatestImageID", 1)
} | explode_data.jsonl/15911 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 410
} | [
2830,
3393,
12723,
53,
3992,
2324,
90727,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
692,
197,
56249,
1669,
501,
67591,
62768,
340,
197,
56249,
62145,
284,
609,
5464,
45651,
515,
197,
21297,
25,
330,
7975,
756,
197,
5... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCookie(t *testing.T) {
root := mux.NewRouter()
a := assertions.New(t)
blockKey := random.Bytes(32)
hashKey := random.Bytes(64)
root.Use(mux.MiddlewareFunc(webmiddleware.Cookies(hashKey, blockKey)))
root.Path("/set").HandlerFunc(testSetCookie(t, "test_value")).Methods(http.MethodGet)
root.Path("/get").HandlerFunc(testGetCookie(t, "test_value", true)).Methods(http.MethodGet)
root.Path("/del").HandlerFunc(testDeleteCookie()).Methods(http.MethodGet)
root.Path("/no_cookie").HandlerFunc(testGetCookie(t, "", false)).Methods(http.MethodGet)
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
if err != nil {
panic(err)
}
doGET := func(path string) *http.Response {
req := httptest.NewRequest(http.MethodGet, path, nil)
req.URL.Scheme, req.URL.Host = "http", req.Host
for _, c := range jar.Cookies(req.URL) {
req.AddCookie(c)
}
rec := httptest.NewRecorder()
root.ServeHTTP(rec, req)
resp := rec.Result()
resp.Request = req
if cookies := resp.Cookies(); len(cookies) > 0 {
jar.SetCookies(req.URL, cookies)
}
return resp
}
resp := doGET("/no_cookie")
cookies := jar.Cookies(resp.Request.URL)
a.So(cookies, should.BeEmpty)
resp = doGET("/del")
cookies = jar.Cookies(resp.Request.URL)
a.So(cookies, should.BeEmpty)
resp = doGET("/set")
cookies = jar.Cookies(resp.Request.URL)
if a.So(cookies, should.HaveLength, 1) {
cookie := cookies[0]
a.So(cookie.Name, should.Equal, "test_cookie")
}
resp = doGET("/get")
cookies = jar.Cookies(resp.Request.URL)
if a.So(cookies, should.HaveLength, 1) {
cookie := cookies[0]
a.So(cookie.Name, should.Equal, "test_cookie")
}
resp = doGET("/del")
cookies = jar.Cookies(resp.Request.URL)
a.So(cookies, should.BeEmpty)
resp = doGET("/no_cookie")
cookies = jar.Cookies(resp.Request.URL)
a.So(cookies, should.BeEmpty)
} | explode_data.jsonl/19269 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 756
} | [
2830,
3393,
20616,
1155,
353,
8840,
836,
8,
341,
33698,
1669,
59807,
7121,
9523,
2822,
11323,
1669,
54836,
7121,
1155,
340,
47996,
1592,
1669,
4194,
36868,
7,
18,
17,
340,
50333,
1592,
1669,
4194,
36868,
7,
21,
19,
692,
33698,
9046,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestSKUUpdate(t *testing.T) {
sku, err := Update("sku_123", &stripe.SKUParams{
Inventory: &stripe.InventoryParams{
Type: stripe.String(string(stripe.SKUInventoryTypeBucket)),
Value: stripe.String(string(stripe.SKUInventoryValueInStock)),
},
})
assert.Nil(t, err)
assert.NotNil(t, sku)
} | explode_data.jsonl/11292 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 131
} | [
2830,
3393,
89755,
4289,
1155,
353,
8840,
836,
8,
341,
1903,
12133,
11,
1848,
1669,
5549,
445,
39929,
62,
16,
17,
18,
497,
609,
61233,
90929,
52,
4870,
515,
197,
197,
22319,
25,
609,
61233,
74260,
4870,
515,
298,
27725,
25,
220,
455... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestResolvingCorrelatedAggregate(t *testing.T) {
tests := []struct {
sql string
best string
}{
{
sql: "select (select count(a)) from t",
best: "Apply{DataScan(t)->Aggr(count(test.t.a))->Dual->Projection->MaxOneRow}->Projection",
},
{
sql: "select (select count(n.a) from t) from t n",
best: "Apply{DataScan(n)->Aggr(count(test.t.a))->DataScan(t)->Projection->MaxOneRow}->Projection",
},
{
sql: "select (select sum(count(a))) from t",
best: "Apply{DataScan(t)->Aggr(count(test.t.a))->Dual->Aggr(sum(Column#13))->MaxOneRow}->Projection",
},
{
sql: "select (select sum(count(n.a)) from t) from t n",
best: "Apply{DataScan(n)->Aggr(count(test.t.a))->DataScan(t)->Aggr(sum(Column#25))->MaxOneRow}->Projection",
},
{
sql: "select (select cnt from (select count(a) as cnt) n) from t",
best: "Apply{DataScan(t)->Aggr(count(test.t.a))->Dual->Projection->MaxOneRow}->Projection",
},
{
sql: "select sum(a), sum(a), count(a), (select count(a)) from t",
best: "Apply{DataScan(t)->Aggr(sum(test.t.a),count(test.t.a))->Dual->Projection->MaxOneRow}->Projection",
},
}
s := createPlannerSuite()
ctx := context.TODO()
for i, tt := range tests {
comment := fmt.Sprintf("case:%v sql:%s", i, tt.sql)
stmt, err := s.p.ParseOneStmt(tt.sql, "", "")
require.NoError(t, err, comment)
err = Preprocess(s.ctx, stmt, WithPreprocessorReturn(&PreprocessorReturn{InfoSchema: s.is}))
require.NoError(t, err, comment)
p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is)
require.NoError(t, err, comment)
p, err = logicalOptimize(context.TODO(), flagBuildKeyInfo|flagEliminateProjection|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan))
require.NoError(t, err, comment)
require.Equal(t, tt.best, ToString(p), comment)
}
} | explode_data.jsonl/50236 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 781
} | [
2830,
3393,
1061,
19648,
10580,
9721,
64580,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
30633,
220,
914,
198,
197,
92410,
914,
198,
197,
59403,
197,
197,
515,
298,
30633,
25,
220,
330,
1742,
320,
1742,
1760,
2877... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestAccPermissionsNotebooks(t *testing.T) {
permissionsTestHelper(t, func(permissionsAPI PermissionsAPI, user, group string,
ef func(string) PermissionsEntity) {
workspaceAPI := workspace.NewNotebooksAPI(context.Background(), permissionsAPI.client)
notebookDir := fmt.Sprintf("/Testing/%s/something", group)
err := workspaceAPI.Mkdirs(notebookDir)
require.NoError(t, err)
notebookPath := fmt.Sprintf("%s/Dummy", notebookDir)
err = workspaceAPI.Create(workspace.ImportRequest{
Path: notebookPath,
Content: "MSsx",
Format: "SOURCE",
Language: "PYTHON",
Overwrite: true,
})
require.NoError(t, err)
defer func() {
assert.NoError(t, workspaceAPI.Delete(notebookDir, true))
}()
folder, err := workspaceAPI.Read(fmt.Sprintf("/Testing/%s", group))
require.NoError(t, err)
directoryID := fmt.Sprintf("/directories/%d", folder.ObjectID)
require.NoError(t, permissionsAPI.Update(directoryID, AccessControlChangeList{
AccessControlList: []AccessControlChange{
{
GroupName: "users",
PermissionLevel: "CAN_READ",
},
},
}))
entity := ef(directoryID)
assert.Equal(t, "directory", entity.ObjectType)
assert.Len(t, entity.AccessControlList, 1)
notebook, err := workspaceAPI.Read(notebookPath)
require.NoError(t, err)
notebookID := fmt.Sprintf("/notebooks/%d", notebook.ObjectID)
require.NoError(t, permissionsAPI.Update(notebookID, AccessControlChangeList{
AccessControlList: []AccessControlChange{
{
UserName: user,
PermissionLevel: "CAN_MANAGE",
},
{
GroupName: group,
PermissionLevel: "CAN_EDIT",
},
},
}))
entity = ef(notebookID)
assert.Equal(t, "notebook", entity.ObjectType)
assert.Len(t, entity.AccessControlList, 2)
require.NoError(t, permissionsAPI.Delete(directoryID))
entity = ef(directoryID)
assert.Len(t, entity.AccessControlList, 0)
})
} | explode_data.jsonl/50891 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 761
} | [
2830,
3393,
14603,
23851,
9112,
12110,
1155,
353,
8840,
836,
8,
341,
197,
29900,
2271,
5511,
1155,
11,
2915,
7,
29900,
7082,
53357,
7082,
11,
1196,
11,
1874,
914,
345,
197,
197,
823,
2915,
3609,
8,
53357,
3030,
8,
341,
197,
197,
429... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCatalog_ListServices_Stale(t *testing.T) {
t.Parallel()
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.ACLDatacenter = "dc1"
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.ACLDatacenter = "dc1" // Enable ACLs!
c.Bootstrap = false // Disable bootstrap
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
args := structs.DCSpecificRequest{
Datacenter: "dc1",
}
args.AllowStale = true
var out structs.IndexedServices
// Inject a node
if err := s1.fsm.State().EnsureNode(3, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil {
t.Fatalf("err: %v", err)
}
codec := rpcClient(t, s2)
defer codec.Close()
// Run the query, do not wait for leader, never any contact with leader, should fail
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out); err == nil || err.Error() != structs.ErrNoLeader.Error() {
t.Fatalf("expected %v but got err: %v and %v", structs.ErrNoLeader, err, out)
}
// Try to join
joinLAN(t, s2, s1)
retry.Run(t, func(r *retry.R) { r.Check(wantRaft([]*Server{s1, s2})) })
waitForLeader(s1, s2)
testrpc.WaitForLeader(t, s2.RPC, "dc1")
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Should find the services
if len(out.Services) != 1 {
t.Fatalf("bad: %#v", out.Services)
}
if !out.KnownLeader {
t.Fatalf("should have a leader: %v", out)
}
s1.Leave()
s1.Shutdown()
testrpc.WaitUntilNoLeader(t, s2.RPC, "dc1")
args.AllowStale = false
// Since the leader is now down, non-stale query should fail now
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out); err == nil || err.Error() != structs.ErrNoLeader.Error() {
t.Fatalf("expected %v but got err: %v and %v", structs.ErrNoLeader, err, out)
}
// With stale, request should still work
args.AllowStale = true
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Should find old service
if len(out.Services) != 1 {
t.Fatalf("bad: %#v", out)
}
if out.KnownLeader {
t.Fatalf("should not have a leader anymore: %#v", out)
}
} | explode_data.jsonl/49232 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 945
} | [
2830,
3393,
41606,
27104,
11025,
70645,
1574,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
48532,
16,
11,
274,
16,
1669,
1273,
5475,
2354,
2648,
1155,
11,
2915,
1337,
353,
2648,
8,
341,
197,
1444,
875,
3140,
1043,
3057,
284,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGenerateCommonStubs(t *testing.T) {
cases := []struct {
Service *descriptor.ServiceDescriptorProto
Output string
}{
{
Service: &descriptor.ServiceDescriptorProto{
Name: proto.String("math"),
Method: []*descriptor.MethodDescriptorProto{
{
Name: proto.String("Sqrt"),
InputType: proto.String("SqrtInput"),
OutputType: proto.String("SqrtOutput"),
},
{
Name: proto.String("Add"),
InputType: proto.String("AddInput"),
OutputType: proto.String("AddOutput"),
},
},
},
Output: `
// Math is an interface satisfied by the generated client and
// which must be implemented by the object wrapped by the server.
type Math interface {
Sqrt(in *SqrtInput, out *SqrtOutput) error
Add(in *AddInput, out *AddOutput) error
}
`,
},
}
for _, c := range cases {
buf := new(bytes.Buffer)
p := Plugin{compileGen: fakeCompileGen{&generator.Generator{Buffer: buf}}}
p.GenerateCommonStubs(c.Service)
if got, want := buf.String(), strings.TrimSpace(c.Output)+"\n"; got != want {
t.Fail()
t.Logf("GenerateCommonStubs")
t.Logf(" Input: %s", c.Service)
t.Logf(" Got:\n%s", got)
t.Logf(" Want:\n%s", want)
}
}
} | explode_data.jsonl/56417 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 523
} | [
2830,
3393,
31115,
10839,
623,
15738,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
3056,
1235,
341,
197,
91619,
353,
53132,
13860,
11709,
31549,
198,
197,
80487,
220,
914,
198,
197,
59403,
197,
197,
515,
298,
91619,
25,
609,
53132,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestRuleNoWhere(t *testing.T) {
common.Log.Debug("Entering function: %s", common.GetFunctionName())
sqls := [][]string{
{"select col from tbl",
"delete from tbl",
"update tbl set col=1",
"insert into city (country_id) select country_id from country",
},
{
`select 1;`,
},
}
for _, sql := range sqls[0] {
q, err := NewQuery4Audit(sql)
if err == nil {
rule := q.RuleNoWhere()
if rule.Item != "CLA.001" && rule.Item != "CLA.014" && rule.Item != "CLA.015" {
t.Error("Rule not match:", rule.Item, "Expect : CLA.001/CLA.014/CLA.015")
}
} else {
t.Error("sqlparser.Parse Error:", err)
}
}
for _, sql := range sqls[1] {
q, err := NewQuery4Audit(sql)
if err == nil {
rule := q.RuleNoWhere()
if rule.Item != "OK" {
t.Error("Rule not match:", rule.Item, "Expect : OK")
}
} else {
t.Error("sqlparser.Parse Error:", err)
}
}
common.Log.Debug("Exiting function: %s", common.GetFunctionName())
} | explode_data.jsonl/76745 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 420
} | [
2830,
3393,
11337,
2753,
9064,
1155,
353,
8840,
836,
8,
341,
83825,
5247,
20345,
445,
82867,
729,
25,
1018,
82,
497,
4185,
2234,
5152,
675,
2398,
30633,
82,
1669,
52931,
917,
515,
197,
197,
4913,
1742,
1375,
504,
21173,
756,
298,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func Test_byteToString(t *testing.T) {
type args struct {
bts []byte
}
tests := []struct {
name string
args args
want string
}{
{name: "nil", args: args{bts: []byte{}}, want: "0"},
{name: "100", args: args{bts: []byte{1, 0, 0}}, want: "010000"},
{name: "a1", args: args{bts: []byte{0xa1}}, want: "a1"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := byteToString(tt.args.bts); got != tt.want {
t.Errorf("byteToInt64() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/14398 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 247
} | [
2830,
3393,
19737,
5870,
1155,
353,
8840,
836,
8,
341,
13158,
2827,
2036,
341,
197,
2233,
2576,
3056,
3782,
198,
197,
532,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
31215,
2827,
198,
197,
50780,
914,
198,
197,
59403,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_replace(t *testing.T) {
type args struct {
key string
}
tests := []struct {
name string
args args
want string
}{
{name: "case", args: args{key: "mysql.select"}, want: "mysql_select"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := replace(tt.args.key); got != tt.want {
t.Errorf("replace() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/14406 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 177
} | [
2830,
3393,
10633,
1155,
353,
8840,
836,
8,
341,
13158,
2827,
2036,
341,
197,
23634,
914,
198,
197,
532,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
31215,
2827,
198,
197,
50780,
914,
198,
197,
59403,
197,
197,
47006,
25,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) {
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
closeFn, dc, informers, clientset := setup(t)
defer closeFn()
ns := framework.CreateNamespaceOrDie(clientset, "simple-daemonset-test", t)
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
podClient := clientset.CoreV1().Pods(ns.Name)
nodeClient := clientset.CoreV1().Nodes()
podInformer := informers.Core().V1().Pods().Informer()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
informers.Start(ctx.Done())
go dc.Run(ctx, 2)
// Start Scheduler
setupScheduler(ctx, t, clientset, informers)
ds := newDaemonSet("foo", ns.Name)
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "zone",
Operator: v1.NodeSelectorOpIn,
Values: []string{"test"},
},
},
},
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: metav1.ObjectNameField,
Operator: v1.NodeSelectorOpIn,
Values: []string{"node-1"},
},
},
},
},
},
},
}
_, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err)
}
defer cleanupDaemonSets(t, clientset, ds)
addNodes(nodeClient, 0, 2, nil, t)
// Two nodes with labels
addNodes(nodeClient, 2, 2, map[string]string{
"zone": "test",
}, t)
addNodes(nodeClient, 4, 2, nil, t)
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 3, t)
validateDaemonSetStatus(dsClient, ds.Name, 3, t)
})
} | explode_data.jsonl/70041 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 893
} | [
2830,
3393,
89177,
1649,
2354,
1955,
5877,
32067,
288,
23527,
82,
1155,
353,
8840,
836,
8,
341,
2023,
4854,
19816,
1155,
11,
2915,
1155,
353,
8840,
836,
11,
8282,
353,
27635,
909,
64,
7291,
1649,
4289,
19816,
8,
341,
197,
27873,
24911... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestPointerLoops(t *testing.T) {
// Pointer loops through map entries, pointers and slices
// Regression test for issue #341
protest.AllowRecording(t)
withTestProcess("testvariables2", t, func(p *proc.Target, fixture protest.Fixture) {
assertNoError(p.Continue(), t, "Continue()")
for _, expr := range []string{"mapinf", "ptrinf", "sliceinf"} {
t.Logf("requesting %s", expr)
v := evalVariable(p, t, expr)
t.Logf("%s: %v\n", expr, v)
}
})
} | explode_data.jsonl/56237 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 178
} | [
2830,
3393,
9084,
4262,
3721,
1155,
353,
8840,
836,
8,
341,
197,
322,
21635,
29753,
1526,
2415,
10695,
11,
27454,
323,
34254,
198,
197,
322,
47470,
1273,
369,
4265,
671,
18,
19,
16,
198,
197,
776,
1944,
29081,
52856,
1155,
340,
46948,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestSupportsDTypeWithFType1XFS(t *testing.T) {
testSupportsDType(t, true, "mkfs.xfs", "-m", "crc=0", "-n", "ftype=1")
} | explode_data.jsonl/24929 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 59
} | [
2830,
3393,
7916,
82,
35,
929,
2354,
37,
929,
16,
55,
8485,
1155,
353,
8840,
836,
8,
341,
18185,
7916,
82,
35,
929,
1155,
11,
830,
11,
330,
24452,
3848,
1993,
3848,
497,
6523,
76,
497,
330,
66083,
28,
15,
497,
6523,
77,
497,
330... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestModeGetCurve(t *testing.T) {
type args struct {
mode int
}
tests := []struct {
name string
args args
want elliptic.Curve
wantErr bool
}{
{name: "sm2", args: args{mode: crypto.Sm2p256v1}, want: gm.GetSm2Curve(), wantErr: false},
{name: "k1", args: args{mode: crypto.Secp256k1}, want: secp256k1.S256(), wantErr: false},
{name: "r1", args: args{mode: crypto.Secp256r1}, want: elliptic.P256(), wantErr: false},
{name: "384", args: args{mode: crypto.Secp384r1}, want: elliptic.P384(), wantErr: false},
{name: "521", args: args{mode: crypto.Secp521r1}, want: elliptic.P521(), wantErr: false},
{name: "err", args: args{mode: crypto.Rsa4096}, want: nil, wantErr: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ModeGetCurve(tt.args.mode)
if (err != nil) != tt.wantErr {
t.Errorf("ModeGetCurve() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ModeGetCurve() got = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/45156 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 485
} | [
2830,
3393,
3636,
1949,
31325,
1155,
353,
8840,
836,
8,
341,
13158,
2827,
2036,
341,
197,
60247,
526,
198,
197,
532,
78216,
1669,
3056,
1235,
341,
197,
11609,
262,
914,
198,
197,
31215,
262,
2827,
198,
197,
50780,
262,
77783,
292,
727... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestSubscribe(t *testing.T) {
var tr *http.Request
var b []byte
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
tr = r
b, _ = ioutil.ReadAll(r.Body)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte("{\"results\": [{}, {\"error\": \"error_reason\"}]}"))
}))
defer ts.Close()
ctx := context.Background()
client, err := NewClient(ctx, testMessagingConfig)
if err != nil {
t.Fatal(err)
}
client.iidEndpoint = ts.URL
resp, err := client.SubscribeToTopic(ctx, []string{"id1", "id2"}, "test-topic")
if err != nil {
t.Fatal(err)
}
checkIIDRequest(t, b, tr, iidSubscribe)
checkTopicMgtResponse(t, resp)
} | explode_data.jsonl/3632 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 277
} | [
2830,
3393,
28573,
1155,
353,
8840,
836,
8,
341,
2405,
489,
353,
1254,
9659,
198,
2405,
293,
3056,
3782,
198,
57441,
1669,
54320,
70334,
7121,
5475,
19886,
89164,
18552,
3622,
1758,
37508,
11,
435,
353,
1254,
9659,
8,
341,
197,
25583,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPNGWriter_QRFile(t *testing.T) {
w := NewPNGWriter(L)
assert.NoError(t, w.QRFile("file_qr_2.png", "Hello world!"))
assert.Error(t, PNG("test/file_qr.png", L, "Hello world!"))
} | explode_data.jsonl/59209 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 85
} | [
2830,
3393,
59824,
6492,
13337,
49,
1703,
1155,
353,
8840,
836,
8,
341,
6692,
1669,
1532,
59824,
6492,
4957,
340,
6948,
35699,
1155,
11,
289,
10003,
49,
1703,
445,
1192,
8976,
81,
62,
17,
3508,
497,
330,
9707,
1879,
0,
28075,
6948,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestRejectsHS256(t *testing.T) {
hs256JWSBody := `
{
"header": {
"alg": "HS256",
"jwk": {
"kty": "RSA",
"n": "vrjT",
"e": "AQAB"
}
},
"payload": "aGkK",
"signature": "ghTIjrhiRl2pQ09vAkUUBbF5KziJdhzOTB-okM9SPRzU8Hyj0W1H5JA1Zoc-A-LuJGNAtYYHWqMw1SeZbT0l9FHcbMPeWDaJNkHS9jz5_g_Oyol8vcrWur2GDtB2Jgw6APtZKrbuGATbrF7g41Wijk6Kk9GXDoCnlfOQOhHhsrFFcWlCPLG-03TtKD6EBBoVBhmlp8DRLs7YguWRZ6jWNaEX-1WiRntBmhLqoqQFtvZxCBw_PRuaRw_RZBd1x2_BNYqEdOmVNC43UHMSJg3y_3yrPo905ur09aUTscf-C_m4Sa4M0FuDKn3bQ_pFrtz-aCCq6rcTIyxYpDqNvHMT2Q"
}
`
hs256JWS, err := jose.ParseSigned(hs256JWSBody)
if err != nil {
t.Fatal("Unable to parse hs256JWSBody")
}
hs256JWK := hs256JWS.Signatures[0].Header.JSONWebKey
err = checkAlgorithm(hs256JWK, hs256JWS)
if err == nil {
t.Fatalf("checkAlgorithm did not reject JWS with alg: 'HS256'")
}
expected := "signature type 'HS256' in JWS header is not supported, expected one of RS256, ES256, ES384 or ES512"
if err.Error() != expected {
t.Fatalf("checkAlgorithm rejected JWS with alg: 'none', but for wrong reason: got '%s', wanted %s", err.Error(), expected)
}
} | explode_data.jsonl/15346 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 631
} | [
2830,
3393,
78413,
82,
11961,
17,
20,
21,
1155,
353,
8840,
836,
8,
341,
81692,
17,
20,
21,
41,
7433,
5444,
1669,
22074,
197,
197,
515,
298,
197,
1,
2708,
788,
341,
571,
197,
1,
23881,
788,
330,
11961,
17,
20,
21,
756,
571,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestAlternative(t *testing.T) {
m := NewMessage()
m.SetHeader("From", "from@example.com")
m.SetHeader("To", "to@example.com")
m.SetBody("text/plain", "¡Hola, señor!")
m.AddAlternative("text/html", "¡<b>Hola</b>, <i>señor</i>!</h1>")
want := &message{
from: "from@example.com",
to: []string{"to@example.com"},
content: "From: from@example.com\r\n" +
"To: to@example.com\r\n" +
"Content-Type: multipart/alternative;\r\n" +
" boundary=_BOUNDARY_1_\r\n" +
"\r\n" +
"--_BOUNDARY_1_\r\n" +
"Content-Type: text/plain; charset=UTF-8\r\n" +
"Content-Transfer-Encoding: quoted-printable\r\n" +
"\r\n" +
"=C2=A1Hola, se=C3=B1or!\r\n" +
"--_BOUNDARY_1_\r\n" +
"Content-Type: text/html; charset=UTF-8\r\n" +
"Content-Transfer-Encoding: quoted-printable\r\n" +
"\r\n" +
"=C2=A1<b>Hola</b>, <i>se=C3=B1or</i>!</h1>\r\n" +
"--_BOUNDARY_1_--\r\n",
}
testMessage(t, m, 1, want)
} | explode_data.jsonl/31573 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 497
} | [
2830,
3393,
75763,
1155,
353,
8840,
836,
8,
341,
2109,
1669,
1532,
2052,
741,
2109,
4202,
4047,
445,
3830,
497,
330,
1499,
35487,
905,
1138,
2109,
4202,
4047,
445,
1249,
497,
330,
983,
35487,
905,
1138,
2109,
4202,
5444,
445,
1318,
36... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestExistingPasswordAndKeyfile_AreUsedWhenTheSecretExists(t *testing.T) {
mdb := newScramReplicaSet()
mgr := client.NewManager(&mdb)
c := mgr.Client
keyFileNsName := mdb.GetAgentKeyfileSecretNamespacedName()
err := secret.CreateOrUpdate(c,
secret.Builder().
SetName(keyFileNsName.Name).
SetNamespace(keyFileNsName.Namespace).
SetField(scram.AgentKeyfileKey, "my-keyfile").
Build(),
)
assert.NoError(t, err)
passwordNsName := mdb.GetAgentPasswordSecretNamespacedName()
err = secret.CreateOrUpdate(c,
secret.Builder().
SetName(passwordNsName.Name).
SetNamespace(passwordNsName.Namespace).
SetField(scram.AgentPasswordKey, "my-pass").
Build(),
)
assert.NoError(t, err)
r := NewReconciler(mgr)
res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}})
assertReconciliationSuccessful(t, res, err)
currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace})
assert.NoError(t, err)
assert.NotEmpty(t, currentAc.Auth.KeyFileWindows)
assert.False(t, currentAc.Auth.Disabled)
assert.Equal(t, "my-keyfile", currentAc.Auth.Key)
assert.NotEmpty(t, currentAc.Auth.KeyFileWindows)
assert.Equal(t, "my-pass", currentAc.Auth.AutoPwd)
} | explode_data.jsonl/80682 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 504
} | [
2830,
3393,
53067,
4876,
3036,
1592,
1192,
1566,
265,
22743,
4498,
785,
19773,
15575,
1155,
353,
8840,
836,
8,
341,
2109,
1999,
1669,
501,
3326,
2396,
18327,
15317,
1649,
741,
2109,
901,
1669,
2943,
7121,
2043,
2099,
78127,
692,
1444,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNewStdWriter(t *testing.T) {
writer := NewStdWriter(ioutil.Discard, Stdout)
if writer == nil {
t.Fatalf("NewStdWriter with an invalid StdType should not return nil.")
}
} | explode_data.jsonl/52152 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 69
} | [
2830,
3393,
3564,
22748,
6492,
1155,
353,
8840,
836,
8,
341,
38959,
1669,
1532,
22748,
6492,
1956,
30158,
909,
47560,
11,
42517,
411,
340,
743,
6916,
621,
2092,
341,
197,
3244,
30762,
445,
3564,
22748,
6492,
448,
458,
8318,
42517,
929,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestCAgetUserAttrValue(t *testing.T) {
testDirClean(t)
ca, err := newCA(configFile, &CAConfig{}, &srv, false)
if err != nil {
t.Fatal("newCA failed: ", err)
}
_, err = ca.getUserAttrValue("maryjokopechne", "delmont")
t.Log("getUserAttrValue err: ", err)
if err == nil {
t.Error("getUserAttrValue sould have failed: no such user")
}
CAclean(ca, t)
} | explode_data.jsonl/82700 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 157
} | [
2830,
3393,
5049,
28440,
13371,
1130,
1155,
353,
8840,
836,
8,
341,
18185,
6184,
27529,
1155,
340,
197,
924,
11,
1848,
1669,
501,
5049,
8754,
1703,
11,
609,
5049,
2648,
22655,
609,
40020,
11,
895,
340,
743,
1848,
961,
2092,
341,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestContainsWrites(t *testing.T) {
// Scenario I: Nil HashedRwSet in collection
col := &rwsetutil.CollHashedRwSet{
CollectionName: "col1",
}
assert.False(t, containsWrites("tx", "ns", col))
// Scenario II: No writes in collection
col.HashedRwSet = &kvrwset.HashedRWSet{}
assert.False(t, containsWrites("tx", "ns", col))
// Scenario III: Some writes in collection
col.HashedRwSet.HashedWrites = append(col.HashedRwSet.HashedWrites, &kvrwset.KVWriteHash{})
assert.True(t, containsWrites("tx", "ns", col))
} | explode_data.jsonl/36190 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 203
} | [
2830,
3393,
23805,
93638,
1155,
353,
8840,
836,
8,
341,
197,
322,
58663,
358,
25,
32274,
6531,
291,
49,
86,
1649,
304,
4426,
198,
46640,
1669,
609,
31768,
746,
1314,
727,
965,
6370,
291,
49,
86,
1649,
515,
197,
94120,
675,
25,
330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReproduceTaskRequestCommand(t *testing.T) {
t.Parallel()
Convey(`Make sure we can execute commands.`, t, func() {
c := reproduceRun{}
c.init(&testAuthFlags{})
ctx := context.Background()
var cmd *exec.Cmd
if runtime.GOOS == "windows" {
cmd = exec.CommandContext(ctx, "cmd", "/c", "echo", "chicken")
} else {
cmd = exec.CommandContext(ctx, "echo", "chicken")
}
var stdout bytes.Buffer
cmd.Stdout = &stdout
err := c.executeTaskRequestCommand(ctx, &swarming.SwarmingRpcsTaskRequest{}, cmd)
So(err, ShouldBeNil)
if runtime.GOOS == "windows" {
So(stdout.String(), ShouldEqual, "chicken\r\n")
} else {
So(stdout.String(), ShouldEqual, "chicken\n")
}
})
} | explode_data.jsonl/74873 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 284
} | [
2830,
3393,
693,
97274,
6262,
1900,
4062,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
93070,
5617,
5809,
8078,
2704,
582,
646,
9026,
11293,
13,
7808,
259,
11,
2915,
368,
341,
197,
1444,
1669,
22800,
6727,
16094,
197,
1444,
8... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestM(t *testing.T) {
const want = "mamimumemo"
for _, v := range [2]string{"まみむめも", "マミムメモ"} {
got, err := KanaToRomaji(v)
assert.Equal(t, want, got)
assert.Nil(t, err)
}
} | explode_data.jsonl/11306 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 98
} | [
2830,
3393,
44,
1155,
353,
8840,
836,
8,
341,
4777,
1366,
284,
330,
76,
309,
3295,
6726,
1837,
2023,
8358,
348,
1669,
2088,
508,
17,
30953,
4913,
16714,
63021,
125471,
60904,
31877,
497,
330,
67659,
124486,
89962,
38750,
125046,
9207,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestDB_Consistency(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
}); err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
if err := db.Update(func(tx *bolt.Tx) error {
if err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
if err := db.Update(func(tx *bolt.Tx) error {
if p, _ := tx.Page(0); p == nil {
t.Fatal("expected page")
} else if p.Type != "meta" {
t.Fatalf("unexpected page type: %s", p.Type)
}
if p, _ := tx.Page(1); p == nil {
t.Fatal("expected page")
} else if p.Type != "meta" {
t.Fatalf("unexpected page type: %s", p.Type)
}
if p, _ := tx.Page(2); p == nil {
t.Fatal("expected page")
} else if p.Type != "free" {
t.Fatalf("unexpected page type: %s", p.Type)
}
if p, _ := tx.Page(3); p == nil {
t.Fatal("expected page")
} else if p.Type != "free" {
t.Fatalf("unexpected page type: %s", p.Type)
}
if p, _ := tx.Page(4); p == nil {
t.Fatal("expected page")
} else if p.Type != "leaf" {
t.Fatalf("unexpected page type: %s", p.Type)
}
if p, _ := tx.Page(5); p == nil {
t.Fatal("expected page")
} else if p.Type != "freelist" {
t.Fatalf("unexpected page type: %s", p.Type)
}
if p, _ := tx.Page(6); p != nil {
t.Fatal("unexpected page")
}
return nil
}); err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/27488 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 710
} | [
2830,
3393,
3506,
920,
2382,
47094,
1155,
353,
8840,
836,
8,
341,
20939,
1669,
15465,
5002,
3506,
741,
16867,
2927,
50463,
7925,
741,
743,
1848,
1669,
2927,
16689,
18552,
27301,
353,
52433,
81362,
8,
1465,
341,
197,
197,
6878,
1848,
166... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestKeyTable(t *testing.T) {
table := types.NewKeyTable()
require.Panics(t, func() { table.RegisterType(types.ParamSetPair{[]byte(""), nil, nil}) })
require.Panics(t, func() { table.RegisterType(types.ParamSetPair{[]byte("!@#$%"), nil, nil}) })
require.Panics(t, func() { table.RegisterType(types.ParamSetPair{[]byte("hello,"), nil, nil}) })
require.Panics(t, func() { table.RegisterType(types.ParamSetPair{[]byte("hello"), nil, nil}) })
require.NotPanics(t, func() {
table.RegisterType(types.ParamSetPair{keyBondDenom, string("stake"), validateBondDenom})
})
require.NotPanics(t, func() {
table.RegisterType(types.ParamSetPair{keyMaxValidators, uint16(100), validateMaxValidators})
})
require.Panics(t, func() {
table.RegisterType(types.ParamSetPair{keyUnbondingTime, time.Duration(1), nil})
})
require.NotPanics(t, func() {
table.RegisterType(types.ParamSetPair{keyUnbondingTime, time.Duration(1), validateMaxValidators})
})
require.NotPanics(t, func() {
newTable := types.NewKeyTable()
newTable.RegisterParamSet(¶ms{})
})
require.Panics(t, func() { table.RegisterParamSet(¶ms{}) })
require.Panics(t, func() { types.NewKeyTable(types.ParamSetPair{[]byte(""), nil, nil}) })
require.NotPanics(t, func() {
types.NewKeyTable(
types.ParamSetPair{[]byte("test"), string("stake"), validateBondDenom},
types.ParamSetPair{[]byte("test2"), uint16(100), validateMaxValidators},
)
})
} | explode_data.jsonl/47788 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 529
} | [
2830,
3393,
1592,
2556,
1155,
353,
8840,
836,
8,
341,
26481,
1669,
4494,
7121,
1592,
2556,
2822,
17957,
1069,
276,
1211,
1155,
11,
2915,
368,
314,
1965,
81703,
52613,
9580,
1649,
12443,
90,
1294,
3782,
86076,
2092,
11,
2092,
5410,
2751,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDatabaseUpdateNamespace(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
d, mapCh, _ := newTestDatabase(t, ctrl, Bootstrapped)
require.NoError(t, d.Open())
defer func() {
close(mapCh)
require.NoError(t, d.Close())
leaktest.CheckTimeout(t, time.Second)()
}()
// retrieve the update channel to track propatation
updateCh := d.opts.NamespaceInitializer().(*mockNsInitializer).updateCh
// check initial namespaces
nses := d.Namespaces()
require.Len(t, nses, 2)
// construct new namespace Map
ropts := defaultTestNs1Opts.RetentionOptions().SetRetentionPeriod(2000 * time.Hour)
md1, err := namespace.NewMetadata(defaultTestNs1ID, defaultTestNs1Opts.SetRetentionOptions(ropts))
require.NoError(t, err)
md2, err := namespace.NewMetadata(defaultTestNs2ID, defaultTestNs2Opts)
require.NoError(t, err)
nsMap, err := namespace.NewMap([]namespace.Metadata{md1, md2})
require.NoError(t, err)
// update the database watch with new Map
mapCh <- nsMap
// wait till the update has propagated
<-updateCh
<-updateCh
time.Sleep(10 * time.Millisecond)
// ensure the namespaces have old properties
nses = d.Namespaces()
require.Len(t, nses, 2)
ns1, ok := d.Namespace(defaultTestNs1ID)
require.True(t, ok)
require.Equal(t, defaultTestNs1Opts, ns1.Options())
ns2, ok := d.Namespace(defaultTestNs2ID)
require.True(t, ok)
require.Equal(t, defaultTestNs2Opts, ns2.Options())
} | explode_data.jsonl/46537 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 528
} | [
2830,
3393,
5988,
4289,
22699,
1155,
353,
8840,
836,
8,
341,
84381,
1669,
342,
316,
1176,
7121,
2051,
1155,
340,
16867,
23743,
991,
18176,
2822,
2698,
11,
2415,
1143,
11,
716,
1669,
501,
2271,
5988,
1155,
11,
23743,
11,
15004,
495,
56... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestUserNamespaces(t *testing.T) {
t.Parallel()
t.Run("WritableRootFS", func(t *testing.T) { testUserNamespaces(t, false) })
// see #1373 and runc#1572
t.Run("ReadonlyRootFS", func(t *testing.T) { testUserNamespaces(t, true) })
} | explode_data.jsonl/39645 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 95
} | [
2830,
3393,
1474,
7980,
27338,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
3244,
16708,
445,
39623,
8439,
8485,
497,
2915,
1155,
353,
8840,
836,
8,
314,
1273,
1474,
7980,
27338,
1155,
11,
895,
8,
2751,
197,
322,
1490,
671,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTimeoutHandlerStartTimerWhenServing(t *testing.T) {
if testing.Short() {
t.Skip("skipping sleeping test in -short mode")
}
defer afterTest(t)
var handler HandlerFunc = func(w ResponseWriter, _ *Request) {
w.WriteHeader(StatusNoContent)
}
timeout := 300 * time.Millisecond
ts := httptest.NewServer(TimeoutHandler(handler, timeout, ""))
defer ts.Close()
// Issue was caused by the timeout handler starting the timer when
// was created, not when the request. So wait for more than the timeout
// to ensure that's not the case.
time.Sleep(2 * timeout)
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
if res.StatusCode != StatusNoContent {
t.Errorf("got res.StatusCode %d, want %v", res.StatusCode, StatusNoContent)
}
} | explode_data.jsonl/22426 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 267
} | [
2830,
3393,
7636,
3050,
3479,
10105,
4498,
50,
19505,
1155,
353,
8840,
836,
8,
341,
743,
7497,
55958,
368,
341,
197,
3244,
57776,
445,
4886,
5654,
21127,
1273,
304,
481,
8676,
3856,
1138,
197,
532,
16867,
1283,
2271,
1155,
340,
2405,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMultiPortServiceBindAddr(t *testing.T) {
ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
service1 := makeTestService("ns1", "svc1", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "TCP", 1234, 0, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port2", "TCP", 1235, 0, 0)
})
service2 := makeTestService("ns1", "svc1", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "TCP", 1234, 0, 0)
})
service3 := makeTestService("ns1", "svc1", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "TCP", 1234, 0, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port2", "TCP", 1235, 0, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port3", "UDP", 1236, 0, 0)
})
fp.servicesSynced = true
// first, add multi-port service1
fp.OnServiceAdd(service1)
fp.syncProxyRules()
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
// should only remain address "172.16.55.4"
if len(remainingAddrs) != 1 {
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
}
if remainingAddrs[0] != "172.16.55.4" {
t.Errorf("Expected remaining address should be %s, got %s", "172.16.55.4", remainingAddrs[0])
}
// update multi-port service1 to single-port service2
fp.OnServiceUpdate(service1, service2)
fp.syncProxyRules()
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
// should still only remain address "172.16.55.4"
if len(remainingAddrs) != 1 {
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
} else if remainingAddrs[0] != "172.16.55.4" {
t.Errorf("Expected remaining address should be %s, got %s", "172.16.55.4", remainingAddrs[0])
}
// update single-port service2 to multi-port service3
fp.OnServiceUpdate(service2, service3)
fp.syncProxyRules()
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
// should still only remain address "172.16.55.4"
if len(remainingAddrs) != 1 {
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
} else if remainingAddrs[0] != "172.16.55.4" {
t.Errorf("Expected remaining address should be %s, got %s", "172.16.55.4", remainingAddrs[0])
}
// delete multi-port service3
fp.OnServiceDelete(service3)
fp.syncProxyRules()
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
// all addresses should be unbound
if len(remainingAddrs) != 0 {
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 0, len(remainingAddrs))
}
} | explode_data.jsonl/44374 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1215
} | [
2830,
3393,
20358,
7084,
1860,
9950,
13986,
1155,
353,
8840,
836,
8,
341,
8230,
417,
1669,
66068,
480,
267,
477,
7121,
52317,
741,
46531,
11562,
1669,
45475,
267,
477,
7121,
52317,
741,
46531,
746,
1669,
5997,
746,
1944,
7121,
52317,
86... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMetricsServiceV2DeleteLogMetric(t *testing.T) {
var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{}
mockMetrics.err = nil
mockMetrics.reqs = nil
mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse)
var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]")
var request = &loggingpb.DeleteLogMetricRequest{
MetricName: formattedMetricName,
}
c, err := NewMetricsClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
err = c.DeleteLogMetric(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
} | explode_data.jsonl/77785 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 278
} | [
2830,
3393,
27328,
1860,
53,
17,
6435,
2201,
54310,
1155,
353,
8840,
836,
8,
341,
2405,
3601,
2582,
353,
17485,
22357,
18464,
11180,
284,
609,
17485,
22357,
18464,
11180,
31483,
77333,
27328,
18441,
284,
2092,
198,
77333,
27328,
1327,
263... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
for _, strategy := range updateStrategies() {
podSpec := v1.PodSpec{
NodeName: "port-conflict",
Containers: []v1.Container{{
Ports: []v1.ContainerPort{{
HostPort: 666,
}},
}},
}
manager, podControl, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
node := newNode("port-conflict", nil)
manager.nodeStore.Add(node)
manager.podStore.Add(&v1.Pod{
Spec: podSpec,
})
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
ds := newDaemonSet("critical")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec
setDaemonSetCritical(ds)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
}
} | explode_data.jsonl/50347 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 337
} | [
2830,
3393,
7084,
57974,
1955,
89177,
21468,
2623,
32067,
42008,
23527,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
8282,
1669,
2088,
2647,
2580,
69388,
368,
341,
197,
3223,
347,
8327,
1669,
348,
16,
88823,
8327,
515,
298,
30217,
675,
25,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestLoadPolicy(t *testing.T) {
workspace := &iottwinmaker.GetWorkspaceOutput{
S3Location: aws.String("dummyS3Location"),
Arn: aws.String("dummyArn"),
WorkspaceId: aws.String("dummyWorkspaceId"),
}
policy, err := LoadPolicy(workspace)
require.NoError(t, err)
require.NotEmpty(t, policy)
} | explode_data.jsonl/75533 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 127
} | [
2830,
3393,
5879,
13825,
1155,
353,
8840,
836,
8,
341,
197,
42909,
1669,
609,
72,
1716,
7526,
25766,
2234,
45981,
5097,
515,
197,
7568,
18,
4707,
25,
220,
31521,
6431,
445,
31390,
50,
18,
4707,
4461,
197,
197,
58331,
25,
260,
31521,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTypeChecks(t *testing.T) {
tests := []tomlTypeCheckTest{
{"bool", true, 0},
{"bool", false, 0},
{"int", int(2), 0},
{"int8", int8(2), 0},
{"int16", int16(2), 0},
{"int32", int32(2), 0},
{"int64", int64(2), 0},
{"uint", uint(2), 0},
{"uint8", uint8(2), 0},
{"uint16", uint16(2), 0},
{"uint32", uint32(2), 0},
{"uint64", uint64(2), 0},
{"float32", float32(3.14), 0},
{"float64", float64(3.14), 0},
{"string", "lorem ipsum", 0},
{"time", time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC), 0},
{"stringlist", []string{"hello", "hi"}, 1},
{"stringlistptr", &[]string{"hello", "hi"}, 1},
{"stringarray", [2]string{"hello", "hi"}, 1},
{"stringarrayptr", &[2]string{"hello", "hi"}, 1},
{"timelist", []time.Time{time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)}, 1},
{"timelistptr", &[]time.Time{time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)}, 1},
{"timearray", [1]time.Time{time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)}, 1},
{"timearrayptr", &[1]time.Time{time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)}, 1},
{"objectlist", []tomlTypeCheckTest{}, 2},
{"objectlistptr", &[]tomlTypeCheckTest{}, 2},
{"objectarray", [2]tomlTypeCheckTest{{}, {}}, 2},
{"objectlistptr", &[2]tomlTypeCheckTest{{}, {}}, 2},
{"object", tomlTypeCheckTest{}, 3},
{"objectptr", &tomlTypeCheckTest{}, 3},
}
for _, test := range tests {
expected := []bool{false, false, false, false}
expected[test.typ] = true
result := []bool{
isPrimitive(reflect.TypeOf(test.item)),
isOtherSequence(reflect.TypeOf(test.item)),
isTreeSequence(reflect.TypeOf(test.item)),
isTree(reflect.TypeOf(test.item)),
}
if !reflect.DeepEqual(expected, result) {
t.Errorf("Bad type check on %q: expected %v, got %v", test.name, expected, result)
}
}
} | explode_data.jsonl/46313 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 781
} | [
2830,
3393,
929,
49820,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
37401,
75,
929,
3973,
2271,
515,
197,
197,
4913,
2641,
497,
830,
11,
220,
15,
1583,
197,
197,
4913,
2641,
497,
895,
11,
220,
15,
1583,
197,
197,
4913,
396,
4... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestEntry_SelectSnapLeft(t *testing.T) {
e, window := setupSelection(false)
defer teardownImageTest(window)
c := window.Canvas()
assert.Equal(t, 1, e.CursorRow)
assert.Equal(t, 5, e.CursorColumn)
test.AssertImageMatches(t, "entry/selection_initial.png", c.Capture())
typeKeys(e, keyShiftLeftUp, fyne.KeyLeft)
assert.Equal(t, 1, e.CursorRow)
assert.Equal(t, 2, e.CursorColumn)
test.AssertImageMatches(t, "entry/selection_snap_left.png", c.Capture())
} | explode_data.jsonl/57319 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 188
} | [
2830,
3393,
5874,
58073,
61871,
5415,
1155,
353,
8840,
836,
8,
341,
7727,
11,
3241,
1669,
6505,
11177,
3576,
340,
16867,
49304,
1906,
2271,
15906,
340,
1444,
1669,
3241,
54121,
2822,
6948,
12808,
1155,
11,
220,
16,
11,
384,
29929,
3102,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStashSave(t *testing.T) {
runner := oscommands.NewFakeRunner(t).
ExpectGitArgs([]string{"stash", "save", "A stash message"}, "", nil)
instance := buildStashCommands(commonDeps{runner: runner})
assert.NoError(t, instance.Save("A stash message"))
runner.CheckForMissingCalls()
} | explode_data.jsonl/41418 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 101
} | [
2830,
3393,
623,
988,
8784,
1155,
353,
8840,
836,
8,
341,
197,
41736,
1669,
2643,
24270,
7121,
52317,
19486,
1155,
4292,
197,
35911,
46562,
4117,
10556,
917,
4913,
49771,
497,
330,
6628,
497,
330,
32,
64037,
1943,
14345,
7342,
2092,
340... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCheckThread(t *testing.T) {
orgID := "ORG_ID_VALUE"
id := "ID_VALUE"
pid := "PID_VALUE"
want := &Thread{ID: id}
wantOrg := &Thread{ID: orgID}
wantPID := &Thread{ID: id, PID: pid}
wantOrgWithPID := &Thread{ID: orgID, PID: pid}
type args struct {
thread *Thread
ID string
}
tests := []struct {
name string
args args
want *Thread
}{
{"was nil", args{thread: nil, ID: id}, want},
{"was empty", args{thread: &Thread{}, ID: id}, want},
{"was pid", args{thread: &Thread{ID: "", PID: pid}, ID: id}, wantPID},
{"was org", args{thread: &Thread{ID: orgID}, ID: id}, wantOrg},
{"was org and pid", args{thread: &Thread{ID: orgID, PID: pid}, ID: id}, wantOrgWithPID},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := CheckThread(tt.args.thread, tt.args.ID); !reflect.DeepEqual(got, tt.want) {
t.Errorf("CheckThread() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/45531 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 400
} | [
2830,
3393,
3973,
6855,
1155,
353,
8840,
836,
8,
341,
87625,
915,
1669,
330,
44927,
3450,
7476,
698,
15710,
1669,
330,
915,
7476,
698,
78799,
1669,
330,
33751,
7476,
698,
50780,
1669,
609,
6855,
90,
915,
25,
877,
532,
50780,
42437,
16... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestCreateExtension(t *testing.T) {
ts := newTestServer(t)
defer ts.Stop()
// ---- CreateExtensionRequest with no Annotations fails ----
_, err := ts.Client.CreateExtension(context.Background(), &api.CreateExtensionRequest{})
assert.Error(t, err)
assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err))
// --- With no name also fails
_, err = ts.Client.CreateExtension(context.Background(),
&api.CreateExtensionRequest{
Annotations: &api.Annotations{
Name: "",
Labels: map[string]string{"foo": "bar"},
},
},
)
assert.Error(t, err)
assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err))
extensionName := "extension1"
// ---- creating an extension with a valid extension object passed in succeeds ----
validRequest := api.CreateExtensionRequest{Annotations: &api.Annotations{Name: extensionName}}
resp, err := ts.Client.CreateExtension(context.Background(), &validRequest)
assert.NoError(t, err)
assert.NotNil(t, resp)
// for sanity, check that the stored extension still has the extension data
var storedExtension *api.Extension
ts.Store.View(func(tx store.ReadTx) {
storedExtension = store.GetExtension(tx, resp.Extension.ID)
})
assert.NotNil(t, storedExtension)
assert.Equal(t, extensionName, storedExtension.Annotations.Name)
// ---- creating an extension with the same name, even if it's the exact same spec, fails due to a name conflict ----
_, err = ts.Client.CreateExtension(context.Background(), &validRequest)
assert.Error(t, err)
assert.Equal(t, codes.AlreadyExists, testutils.ErrorCode(err), testutils.ErrorDesc(err))
// creating an extension with an empty string as a name fails
hasNoName := api.CreateExtensionRequest{
Annotations: &api.Annotations{
Labels: map[string]string{"name": "nope"},
},
Description: "some text",
}
_, err = ts.Client.CreateExtension(
context.Background(), &hasNoName,
)
assert.Error(t, err)
assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err))
} | explode_data.jsonl/48202 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 686
} | [
2830,
3393,
4021,
12049,
1155,
353,
8840,
836,
8,
341,
57441,
1669,
501,
2271,
5475,
1155,
340,
16867,
10591,
30213,
2822,
197,
322,
11304,
4230,
12049,
1900,
448,
902,
93332,
14525,
11304,
198,
197,
6878,
1848,
1669,
10591,
11716,
7251,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBazelBin(t *testing.T) {
testutil.Run(t, "", func(t *testutil.T) {
t.Override(&util.DefaultExecCommand, testutil.CmdRunOut(
"bazel info bazel-bin --arg1 --arg2",
"/absolute/path/bin\n",
))
bazelBin, err := bazelBin(context.Background(), ".", &latest_v1.BazelArtifact{
BuildArgs: []string{"--arg1", "--arg2"},
})
t.CheckNoError(err)
t.CheckDeepEqual("/absolute/path/bin", bazelBin)
})
} | explode_data.jsonl/124 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 185
} | [
2830,
3393,
33,
68326,
28794,
1155,
353,
8840,
836,
8,
341,
18185,
1314,
16708,
1155,
11,
7342,
2915,
1155,
353,
1944,
1314,
836,
8,
341,
197,
3244,
90008,
2099,
1314,
13275,
10216,
4062,
11,
1273,
1314,
64512,
6727,
2662,
1006,
298,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetCurlCommand(t *testing.T) {
form := url.Values{}
form.Add("age", "10")
form.Add("name", "Hudson")
body := form.Encode()
req, _ := http.NewRequest(http.MethodPost, "http://foo.com/cats", ioutil.NopCloser(bytes.NewBufferString(body)))
req.Header.Set("API_KEY", "123")
libCommand, _ := http2curl.GetCurlCommand(req)
command, _ := GetCurlCommand(req)
if libCommand.String() != command.String() {
t.Errorf("expected library command: %s and command: %s to match", libCommand, command)
}
// Output:
// curl -X 'POST' -d 'age=10&name=Hudson' -H 'Api_key: 123' 'http://foo.com/cats'
} | explode_data.jsonl/60998 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 237
} | [
2830,
3393,
1949,
34,
1085,
4062,
1155,
353,
8840,
836,
8,
341,
37410,
1669,
2515,
35145,
16094,
37410,
1904,
445,
424,
497,
330,
16,
15,
1138,
37410,
1904,
445,
606,
497,
330,
39,
31678,
1138,
35402,
1669,
1352,
50217,
2822,
24395,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestParseStrict(t *testing.T) {
for _, th := range testParseStrict {
_, err := parseStrict(th.in)
assert.NotEqual(t, nil, err)
}
_, err := parseStrict(`{"hello"}`)
assert.NotEqual(t, nil, err)
_, err = parseStrict("2009-08-12T22:15Z")
assert.Equal(t, nil, err)
} | explode_data.jsonl/32330 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 127
} | [
2830,
3393,
14463,
41857,
1155,
353,
8840,
836,
8,
1476,
2023,
8358,
270,
1669,
2088,
1273,
14463,
41857,
341,
197,
197,
6878,
1848,
1669,
4715,
41857,
24365,
1858,
340,
197,
6948,
15000,
2993,
1155,
11,
2092,
11,
1848,
340,
197,
630,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestJob_response(t *testing.T) {
type fields struct {
session session.ServiceFormatter
info Response
}
type args struct {
request *http.Request
}
tests := []struct {
name string
fields fields
args args
want Response
wantErr bool
}{
{
name: "Passing",
fields: fields{
session: &mockSessionFormatter{
client: mockHTTPClient(func(req *http.Request) *http.Response {
resp := `{
"apiVersion": 44.0,
"columnDelimiter": "COMMA",
"concurrencyMode": "Parallel",
"contentType": "CSV",
"contentUrl": "services/v44.0/jobs",
"createdById": "1234",
"createdDate": "1/1/1970",
"externalIdFieldName": "namename",
"id": "9876",
"jobType": "V2Ingest",
"lineEnding": "LF",
"object": "Account",
"operation": "Insert",
"state": "Open",
"systemModstamp": "1/1/1980"
}`
return &http.Response{
StatusCode: http.StatusOK,
Status: "Good",
Body: ioutil.NopCloser(strings.NewReader(resp)),
Header: make(http.Header),
}
}),
},
},
args: args{
request: testNewRequest(),
},
want: Response{
APIVersion: 44.0,
ColumnDelimiter: "COMMA",
ConcurrencyMode: "Parallel",
ContentType: "CSV",
ContentURL: "services/v44.0/jobs",
CreatedByID: "1234",
CreatedDate: "1/1/1970",
ExternalIDFieldName: "namename",
ID: "9876",
JobType: "V2Ingest",
LineEnding: "LF",
Object: "Account",
Operation: "Insert",
State: "Open",
SystemModstamp: "1/1/1980",
},
wantErr: false,
},
{
name: "failing",
fields: fields{
session: &mockSessionFormatter{
client: mockHTTPClient(func(req *http.Request) *http.Response {
resp := `[
{
"fields" : [ "Id" ],
"message" : "Account ID: id value of incorrect type: 001900K0001pPuOAAU",
"errorCode" : "MALFORMED_ID"
}
]`
return &http.Response{
StatusCode: http.StatusBadRequest,
Status: "Bad",
Body: ioutil.NopCloser(strings.NewReader(resp)),
Header: make(http.Header),
}
}),
},
},
args: args{
request: testNewRequest(),
},
want: Response{},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
j := &Job{
session: tt.fields.session,
info: tt.fields.info,
}
got, err := j.response(tt.args.request)
if (err != nil) != tt.wantErr {
t.Errorf("Job.response() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("Job.response() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/19878 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1520
} | [
2830,
3393,
12245,
9655,
1155,
353,
8840,
836,
8,
341,
13158,
5043,
2036,
341,
197,
25054,
3797,
13860,
14183,
198,
197,
27043,
262,
5949,
198,
197,
532,
13158,
2827,
2036,
341,
197,
23555,
353,
1254,
9659,
198,
197,
532,
78216,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestClientAuthPublicKey(t *testing.T) {
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
PublicKeys(testSigners["rsa"]),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
if err := tryAuth(t, config); err != nil {
t.Fatalf("unable to dial remote side: %s", err)
}
} | explode_data.jsonl/6931 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 117
} | [
2830,
3393,
2959,
5087,
61822,
1155,
353,
8840,
836,
8,
341,
25873,
1669,
609,
2959,
2648,
515,
197,
31672,
25,
330,
1944,
872,
756,
197,
197,
5087,
25,
3056,
5087,
3523,
515,
298,
73146,
8850,
8623,
7264,
388,
1183,
60869,
46442,
197... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestMemberAddInvalidRole(t *testing.T) {
tc, _, other, _, name := memberSetupMultiple(t)
defer tc.Cleanup()
if _, err := AddMember(context.TODO(), tc.G, name, other.Username, keybase1.TeamRole(8888)); err == nil {
t.Errorf("AddMember worked with invalid role")
}
assertRole(tc, name, other.Username, keybase1.TeamRole_NONE)
} | explode_data.jsonl/13514 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 126
} | [
2830,
3393,
9366,
2212,
7928,
9030,
1155,
353,
8840,
836,
8,
341,
78255,
11,
8358,
1008,
11,
8358,
829,
1669,
4462,
21821,
32089,
1155,
340,
16867,
17130,
727,
60639,
2822,
743,
8358,
1848,
1669,
2691,
9366,
5378,
90988,
1507,
17130,
12... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestAuthPushDisabled(t *testing.T) {
cfg := &conf.AdvancedConfig{
SplitUpdateQueueSize: 10000,
SegmentUpdateQueueSize: 10000,
}
logger := logging.NewLogger(nil)
synchronizer := &pushMocks.LocalSyncMock{}
authMock := &serviceMocks.MockAuthClient{
AuthenticateCall: func() (*dtos.Token, error) {
return &dtos.Token{PushEnabled: false}, nil
},
}
feedback := make(chan int64, 100)
telemetryMockStorage := mocks.MockTelemetryStorage{
RecordSyncLatencyCall: func(resource int, tm int64) {
if resource != telemetry.TokenSync {
t.Error("Resource should be token")
}
},
}
manager, err := NewManager(logger, synchronizer, cfg, feedback, authMock, telemetryMockStorage, dtos.Metadata{}, nil)
if err != nil {
t.Error("no error should be returned upon manager instantiation", err)
return
}
manager.Start()
message := <-feedback
if message != StatusNonRetryableError {
t.Error("push manager should have proapgated a non-retryable error. Got: ", message)
}
if manager.nextRefresh != nil {
t.Error("no next refresh should have been set if startup wasn't successful")
}
} | explode_data.jsonl/44440 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 392
} | [
2830,
3393,
5087,
16644,
25907,
1155,
353,
8840,
836,
8,
341,
50286,
1669,
609,
6135,
17865,
88087,
2648,
515,
197,
7568,
2292,
4289,
7554,
1695,
25,
256,
220,
16,
15,
15,
15,
15,
345,
197,
7568,
71080,
4289,
7554,
1695,
25,
220,
16... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDBUpdateInvalidID(t *testing.T) {
n := model.Name{Name: "Invalid"}
body, _ := json.Marshal(n)
request, _ := http.NewRequest("PUT", endpointPrefix+"/name/update/300000", bytes.NewReader(body))
request.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, _ := client.Do(request)
if resp.StatusCode != 404 {
t.Errorf("TestDBUpdate: response code is not 404, error: %d", resp.StatusCode)
}
} | explode_data.jsonl/64862 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 155
} | [
2830,
3393,
3506,
4289,
7928,
915,
1155,
353,
8840,
836,
8,
341,
9038,
1669,
1614,
2967,
63121,
25,
330,
7928,
16707,
35402,
11,
716,
1669,
2951,
37271,
1445,
340,
23555,
11,
716,
1669,
1758,
75274,
445,
6221,
497,
14887,
14335,
27569,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestWindowsEvalSymlinks(t *testing.T) {
testenv.MustHaveSymlink(t)
tmpDir, err := ioutil.TempDir("", "TestWindowsEvalSymlinks")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
// /tmp may itself be a symlink! Avoid the confusion, although
// it means trusting the thing we're testing.
tmpDir, err = filepath.EvalSymlinks(tmpDir)
if err != nil {
t.Fatal(err)
}
if len(tmpDir) < 3 {
t.Fatalf("tmpDir path %q is too short", tmpDir)
}
if tmpDir[1] != ':' {
t.Fatalf("tmpDir path %q must have drive letter in it", tmpDir)
}
test := EvalSymlinksTest{"test/linkabswin", tmpDir[:3]}
// Create the symlink farm using relative paths.
testdirs := append(EvalSymlinksTestDirs, test)
for _, d := range testdirs {
var err error
path := simpleJoin(tmpDir, d.path)
if d.dest == "" {
err = os.Mkdir(path, 0755)
} else {
err = os.Symlink(d.dest, path)
}
if err != nil {
t.Fatal(err)
}
}
path := simpleJoin(tmpDir, test.path)
testEvalSymlinks(t, path, test.dest)
testEvalSymlinksAfterChdir(t, path, ".", test.dest)
testEvalSymlinksAfterChdir(t,
path,
filepath.VolumeName(tmpDir)+".",
test.dest)
testEvalSymlinksAfterChdir(t,
simpleJoin(tmpDir, "test"),
simpleJoin("..", test.path),
test.dest)
testEvalSymlinksAfterChdir(t, tmpDir, test.path, test.dest)
} | explode_data.jsonl/28121 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 550
} | [
2830,
3393,
13164,
54469,
34667,
1014,
15504,
1155,
353,
8840,
836,
8,
341,
18185,
3160,
50463,
12116,
34667,
44243,
1155,
692,
20082,
6184,
11,
1848,
1669,
43144,
65009,
6184,
19814,
330,
2271,
13164,
54469,
34667,
1014,
15504,
1138,
743,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestIATNOC(t *testing.T) {
file := NewFile().SetHeader(mockFileHeader())
iatBatch := IATBatch{}
iatBatch.SetHeader(mockIATBatchHeaderFF())
iatBatch.Header.IATIndicator = IATCOR
iatBatch.Header.StandardEntryClassCode = "COR"
iatBatch.AddEntry(mockIATEntryDetail())
iatBatch.Entries[0].TransactionCode = CheckingReturnNOCCredit
iatBatch.Entries[0].Addenda10 = mockAddenda10()
iatBatch.Entries[0].Addenda11 = mockAddenda11()
iatBatch.Entries[0].Addenda12 = mockAddenda12()
iatBatch.Entries[0].Addenda13 = mockAddenda13()
iatBatch.Entries[0].Addenda14 = mockAddenda14()
iatBatch.Entries[0].Addenda15 = mockAddenda15()
iatBatch.Entries[0].Addenda16 = mockAddenda16()
iatBatch.Entries[0].Addenda98 = mockIATAddenda98()
iatBatch.Entries[0].Category = CategoryNOC
if err := iatBatch.Create(); err != nil {
t.Fatal(err)
}
file.AddIATBatch(iatBatch)
if err := file.Create(); err != nil {
t.Errorf("%T: %s", err, err)
}
if err := file.Validate(); err != nil {
t.Errorf("%T: %s", err, err)
}
b := &bytes.Buffer{}
f := NewWriter(b)
if err := f.Write(file); err != nil {
t.Errorf("%T: %s", err, err)
}
r := NewReader(strings.NewReader(b.String()))
_, err := r.Read()
if err != nil {
t.Errorf("%T: %s", err, err)
}
if err = r.File.Validate(); err != nil {
t.Errorf("%T: %s", err, err)
}
} | explode_data.jsonl/68677 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 567
} | [
2830,
3393,
40,
828,
45,
7612,
1155,
353,
8840,
836,
8,
341,
17661,
1669,
1532,
1703,
1005,
1649,
4047,
30389,
1703,
4047,
2398,
8230,
266,
21074,
1669,
358,
828,
21074,
16094,
8230,
266,
21074,
4202,
4047,
30389,
40,
828,
21074,
4047,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestConfigTx_CommitAbortFinality(t *testing.T) {
t.Skip("Add/Remove/Update node is a config update, TODO in issue: https://github.com/hyperledger-labs/orion-server/issues/40")
clientCryptoDir := testutils.GenerateTestClientCrypto(t, []string{"admin", "server"})
testServer, _, _, err := SetupTestServer(t, clientCryptoDir)
defer func() {
if testServer != nil {
_ = testServer.Stop()
}
}()
require.NoError(t, err)
StartTestServer(t, testServer)
serverPort, err := testServer.Port()
require.NoError(t, err)
bcdb := createDBInstance(t, clientCryptoDir, serverPort)
for i := 0; i < 3; i++ {
session := openUserSession(t, bcdb, "admin", clientCryptoDir)
tx, err := session.ConfigTx()
require.NoError(t, err)
config, err := tx.GetClusterConfig()
require.NoError(t, err)
node1 := config.Nodes[0]
node1.Port++
nodeId := node1.Id
nodePort := node1.Port
err = tx.UpdateClusterNode(config.Nodes[0], config.ConsensusConfig.Members[0])
require.NoError(t, err)
assertTxFinality(t, TxFinality(i), tx, session)
config, err = tx.GetClusterConfig()
require.EqualError(t, err, ErrTxSpent.Error())
require.Nil(t, config)
err = tx.AddClusterNode(&types.NodeConfig{}, nil)
require.EqualError(t, err, ErrTxSpent.Error())
err = tx.DeleteClusterNode("id")
require.EqualError(t, err, ErrTxSpent.Error())
err = tx.UpdateClusterNode(&types.NodeConfig{}, nil)
require.EqualError(t, err, ErrTxSpent.Error())
err = tx.AddAdmin(&types.Admin{})
require.EqualError(t, err, ErrTxSpent.Error())
err = tx.DeleteAdmin("id")
require.EqualError(t, err, ErrTxSpent.Error())
err = tx.UpdateAdmin(&types.Admin{})
require.EqualError(t, err, ErrTxSpent.Error())
if TxFinality(i) != TxFinalityAbort {
tx, err = session.ConfigTx()
require.NoError(t, err)
config, err := tx.GetClusterConfig()
require.NoError(t, err)
node1 := config.Nodes[0]
require.Equal(t, nodeId, node1.Id)
require.Equal(t, nodePort, node1.Port)
}
}
} | explode_data.jsonl/65333 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 797
} | [
2830,
3393,
2648,
31584,
16946,
1763,
85891,
19357,
487,
1155,
353,
8840,
836,
8,
341,
3244,
57776,
445,
2212,
14,
13021,
14,
4289,
2436,
374,
264,
2193,
2647,
11,
5343,
304,
4265,
25,
3703,
1110,
5204,
905,
7530,
39252,
50704,
2852,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestSet_Erase(t *testing.T) {
s := New()
for i := 0; i < 1000; i++ {
s.Insert(i)
}
for i := 0; i < 1000; i++ {
s.Erase(i)
}
assert.Equal(t, 0, s.Size())
} | explode_data.jsonl/71963 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 90
} | [
2830,
3393,
1649,
2089,
10632,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
1532,
741,
2023,
600,
1669,
220,
15,
26,
600,
366,
220,
16,
15,
15,
15,
26,
600,
1027,
341,
197,
1903,
23142,
1956,
340,
197,
532,
2023,
600,
1669,
220,
15,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestMimeType_MimeTypeWithParams(t *testing.T) {
t.Parallel()
for _, prov := range providerMimeTypeWithParams() {
prov := prov
t.Run(prov.name, func(t *testing.T) {
t.Parallel()
act := prov.b.StringWithParams()
if prov.exp != act {
t.Fatalf("Mime type is not equal to expected.\nExpected: %s\nActual: %s\n", prov.exp, act)
}
})
}
} | explode_data.jsonl/45420 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 161
} | [
2830,
3393,
97081,
1245,
34872,
2354,
4870,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
2023,
8358,
2543,
1669,
2088,
9109,
97081,
2354,
4870,
368,
341,
197,
197,
42947,
1669,
2543,
198,
197,
3244,
16708,
10340,
85,
2644,
11,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestServerDisconnection(t *testing.T) {
runTestWithEphemeralBackend(t, func(test *testHelper) {
client := test.newClient()
err := client.BranchWorkspace(test.ctx, quantumfs.NullSpaceName,
quantumfs.NullSpaceName, quantumfs.NullSpaceName,
"test", "test", "test")
test.AssertNoErr(err)
test.restartServer()
test.WaitFor("new server to start", func() bool {
num, err := client.NumTypespaces(test.ctx)
if err != nil || num != 1 {
return false
}
return true
})
})
} | explode_data.jsonl/9160 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 196
} | [
2830,
3393,
5475,
4839,
7742,
1155,
353,
8840,
836,
8,
341,
56742,
2271,
2354,
36,
59941,
3253,
29699,
1155,
11,
2915,
8623,
353,
1944,
5511,
8,
341,
197,
25291,
1669,
1273,
4618,
2959,
741,
197,
9859,
1669,
2943,
97249,
45981,
8623,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestRateBreakerSampleSize(t *testing.T) {
cb := NewRateBreaker(0.5, 100)
cb.Fail()
if cb.Tripped() {
t.Fatal("expected rate breaker to not be tripped yet")
}
} | explode_data.jsonl/60807 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 69
} | [
2830,
3393,
11564,
22524,
261,
17571,
1695,
1155,
353,
8840,
836,
8,
341,
63810,
1669,
1532,
11564,
22524,
261,
7,
15,
13,
20,
11,
220,
16,
15,
15,
340,
63810,
57243,
2822,
743,
9858,
836,
461,
6924,
368,
341,
197,
3244,
26133,
445,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestRootDirShouldReturnRootDir(t *testing.T) {
if buildpipeline.IsRegularBuild() {
// this test only apply to "regular build" pipeline
dir := filepath.Join(tmpDir, "TestRootDirShouldReturnRootDir")
chroot := NewChroot(dir, isExistingDir)
assert.Equal(t, dir, chroot.RootDir())
}
} | explode_data.jsonl/38793 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 105
} | [
2830,
3393,
8439,
6184,
14996,
5598,
8439,
6184,
1155,
353,
8840,
836,
8,
341,
743,
1936,
51258,
4506,
30404,
11066,
368,
341,
197,
197,
322,
419,
1273,
1172,
3796,
311,
330,
22308,
1936,
1,
15301,
198,
197,
48532,
1669,
26054,
22363,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestObjxMap(t *testing.T) {
val := (Map)(New(1))
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").ObjxMap())
assert.Equal(t, val, New(m).Get("value").MustObjxMap())
assert.Equal(t, (Map)(New(nil)), New(m).Get("nothing").ObjxMap())
assert.Equal(t, val, New(m).Get("nothing").ObjxMap(New(1)))
assert.Panics(t, func() {
New(m).Get("age").MustObjxMap()
})
} | explode_data.jsonl/23392 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 181
} | [
2830,
3393,
5261,
87,
2227,
1155,
353,
8840,
836,
8,
1476,
19302,
1669,
320,
2227,
2376,
3564,
7,
16,
1171,
2109,
1669,
2415,
14032,
31344,
6257,
4913,
957,
788,
1044,
11,
330,
41212,
788,
2092,
532,
6948,
12808,
1155,
11,
1044,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEmbeddedMethods(t *testing.T) {
typ := TypeOf((*OuterInt)(nil))
if typ.NumMethod() != 1 || typ.Method(0).Func.Pointer() != ValueOf((*OuterInt).M).Pointer() {
t.Errorf("Wrong method table for OuterInt: (m=%p)", (*OuterInt).M)
for i := 0; i < typ.NumMethod(); i++ {
m := typ.Method(i)
t.Errorf("\t%d: %s %#x\n", i, m.Name, m.Func.Pointer())
}
}
i := &InnerInt{3}
if v := ValueOf(i).Method(0).Call(nil)[0].Int(); v != 3 {
t.Errorf("i.M() = %d, want 3", v)
}
o := &OuterInt{1, InnerInt{2}}
if v := ValueOf(o).Method(0).Call(nil)[0].Int(); v != 2 {
t.Errorf("i.M() = %d, want 2", v)
}
f := (*OuterInt).M
if v := f(o); v != 2 {
t.Errorf("f(o) = %d, want 2", v)
}
} | explode_data.jsonl/29569 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 343
} | [
2830,
3393,
83466,
17856,
1155,
353,
8840,
836,
8,
341,
25314,
1669,
3990,
2124,
26609,
51322,
1072,
2376,
8385,
1171,
743,
3582,
39847,
3523,
368,
961,
220,
16,
1369,
3582,
20798,
7,
15,
568,
9626,
41275,
368,
961,
5162,
2124,
26609,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestGetAppVersion(t *testing.T) {
assert := assert.New(t)
var appVersion = new(model.AppVersion)
appVersion.AppID = 1
appVersion.DbVersion = "3.69"
db := model.NewAppVersion(appVersion)
db, opt := model.GetAppVersion(appVersion.AppID)
if assert.NotNil(db) {
assert.Equal(nil, db.Error, "they should be equal")
assert.Equal("3.69", opt.DbVersion, "they should be equal")
}
} | explode_data.jsonl/53342 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 150
} | [
2830,
3393,
1949,
2164,
5637,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
340,
2405,
906,
5637,
284,
501,
7635,
5105,
5637,
340,
28236,
5637,
5105,
915,
284,
220,
16,
198,
28236,
5637,
45332,
5637,
284,
330,
18,
13,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestJSON(t *testing.T) {
if shoulSkip() {
t.Skip("API_KEY required")
return
}
setup()
parameter := map[string]string{
"api_key": os.Getenv("API_KEY"),
"q": "Coffee",
"location": "Portland"}
client := NewGoogleSearch(parameter)
rsp, err := client.GetJSON()
if err != nil {
t.Error("unexpected error", err)
return
}
result := rsp.OrganicResults[0]
if len(result.Title) == 0 {
t.Error("empty title in local results")
return
}
} | explode_data.jsonl/19462 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 195
} | [
2830,
3393,
5370,
1155,
353,
8840,
836,
8,
341,
743,
557,
10965,
35134,
368,
341,
197,
3244,
57776,
445,
7082,
6600,
2567,
1138,
197,
853,
198,
197,
630,
84571,
741,
197,
16181,
1669,
2415,
14032,
30953,
515,
197,
197,
1,
2068,
3097,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestTextDiff(t *testing.T) {
f1, f2 := filepath.Join("testdata", "textdiff.go.a"), filepath.Join("testdata", "textdiff.go.b")
a, err := ioutil.ReadFile(f1)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadFile(f2)
if err != nil {
t.Fatal(err)
}
// The diff output may differ in detail but be equivalent
// since the edits can be ambgious.
dpOutput := append([]string{}, diffOutput...)
dpOutput[1] = "14,16d15"
dpOutput[2] = "30a30,41"
myersOutput := append([]string{}, diffOutput...)
myersOutput[2] = dpOutput[2]
insertedAll, deletedAll := processDiffOutput(t, filepath.Join("testdata", "textdiff.go.a.b"))
// insertedAll[0] = strings.TrimPrefix(insertedAll[0], "\n") + "\n"
deletedAll[1] = "\n" + strings.TrimSuffix(deletedAll[1], "\n")
insertedAll[2] = "\n" + strings.TrimSuffix(insertedAll[2], "\n")
for e, tc := range []struct {
engine func(a, b interface{}) lcs.EditScript
output []string
}{
{textdiff.DP, dpOutput},
{textdiff.Myers, myersOutput},
} {
if e != 0 {
continue
}
diffs := textdiff.DiffByLinesUsing(a, b, tc.engine)
if got, want := diffs.NumGroups(), len(tc.output); got != want {
t.Errorf("%v: got %v, want %v", e, got, want)
}
for i := 0; i < diffs.NumGroups(); i++ {
dg := diffs.Group(i)
if got, want := dg.Summary(), tc.output[i]; got != want {
t.Errorf("%v.%v: got %v, want %v", e, i, got, want)
}
if got, want := dg.Inserted(), insertedAll[i]; got != want {
t.Errorf("%v.%v: got __%v__, want __%v__", e, i, got, want)
}
if got, want := dg.Deleted(), deletedAll[i]; got != want {
t.Errorf("%v.%v: got %v, want %v", e, i, got, want)
}
}
}
} | explode_data.jsonl/9741 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 740
} | [
2830,
3393,
1178,
21751,
1155,
353,
8840,
836,
8,
341,
1166,
16,
11,
282,
17,
1669,
26054,
22363,
445,
92425,
497,
330,
1318,
13490,
18002,
5849,
3975,
26054,
22363,
445,
92425,
497,
330,
1318,
13490,
18002,
948,
1138,
11323,
11,
1848,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReWriteRawTx(t *testing.T) {
txHex1 := "0a05636f696e73122c18010a281080c2d72f222131477444795771577233553637656a7663776d333867396e7a6e7a434b58434b7120a08d0630a696c0b3f78dd9ec083a2131477444795771577233553637656a7663776d333867396e7a6e7a434b58434b71"
in := &types.ReWriteRawTx{
Tx: txHex1,
Fee: 29977777777,
Expire: "130s",
To: "aabbccdd",
Index: 0,
}
data, err := g.ReWriteTx(getOkCtx(), in)
assert.Nil(t, err)
assert.NotNil(t, data.Data)
rtTx := hex.EncodeToString(data.Data)
assert.NotEqual(t, txHex1, rtTx)
tx := &types.Transaction{}
err = types.Decode(data.Data, tx)
assert.Nil(t, err)
assert.Equal(t, tx.Fee, in.Fee)
assert.Equal(t, in.To, tx.To)
} | explode_data.jsonl/321 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 368
} | [
2830,
3393,
693,
7985,
20015,
31584,
1155,
353,
8840,
836,
8,
341,
46237,
20335,
16,
1669,
330,
15,
64,
15,
20,
21,
18,
21,
69,
21,
24,
21,
68,
22,
18,
16,
17,
17,
66,
16,
23,
15,
16,
15,
64,
17,
23,
16,
15,
23,
15,
66,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestObjectLiteral(t *testing.T) {
const SCRIPT = `
var getterCalled = false;
var setterCalled = false;
var o = {get x() {getterCalled = true}, set x() {setterCalled = true}};
o.x;
o.x = 42;
getterCalled && setterCalled;
`
testScript1(SCRIPT, valueTrue, t)
} | explode_data.jsonl/75285 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 115
} | [
2830,
3393,
1190,
17350,
1155,
353,
8840,
836,
8,
341,
4777,
53679,
284,
22074,
2405,
33429,
20960,
284,
895,
280,
2405,
40388,
20960,
284,
895,
401,
2405,
297,
284,
314,
455,
856,
368,
314,
52891,
20960,
284,
830,
2137,
738,
856,
368... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRuleSysdate(t *testing.T) {
common.Log.Debug("Entering function: %s", common.GetFunctionName())
sqls := []string{
`select sysdate();`,
}
for _, sql := range sqls {
q, err := NewQuery4Audit(sql)
if err == nil {
rule := q.RuleSysdate()
if rule.Item != "FUN.004" {
t.Error("Rule not match:", rule.Item, "Expect : FUN.004")
}
} else {
t.Error("sqlparser.Parse Error:", err)
}
}
common.Log.Debug("Exiting function: %s", common.GetFunctionName())
} | explode_data.jsonl/76788 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 200
} | [
2830,
3393,
11337,
32792,
1028,
1155,
353,
8840,
836,
8,
341,
83825,
5247,
20345,
445,
82867,
729,
25,
1018,
82,
497,
4185,
2234,
5152,
675,
2398,
30633,
82,
1669,
3056,
917,
515,
197,
197,
63,
1742,
5708,
1028,
2129,
12892,
197,
532,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestRouter1LevelMatch(t *testing.T) {
// Create empty handler
h := new(Handler)
// Create empty context
c := new(Context)
c.Params = Params{}
// Create route
r := Route("/level", h)
// Matching routes
rs := []string{"/level", "level"}
// Check
for _, s := range rs {
if !r.Match(s, c) {
t.Errorf("'%s' should match against '/level'", s)
}
}
} | explode_data.jsonl/35792 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 148
} | [
2830,
3393,
9523,
16,
4449,
8331,
1155,
353,
8840,
836,
8,
341,
197,
322,
4230,
4287,
7013,
198,
9598,
1669,
501,
7,
3050,
692,
197,
322,
4230,
4287,
2266,
198,
1444,
1669,
501,
14001,
340,
1444,
58268,
284,
34352,
31483,
197,
322,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestStructOf(t *testing.T) {
// check construction and use of type not in binary
fields := []StructField{
StructField{
Name: "S",
Tag: "s",
Type: TypeOf(""),
},
StructField{
Name: "X",
Tag: "x",
Type: TypeOf(byte(0)),
},
StructField{
Name: "Y",
Type: TypeOf(uint64(0)),
},
StructField{
Name: "Z",
Type: TypeOf([3]uint16{}),
},
}
st := StructOf(fields)
v := New(st).Elem()
runtime.GC()
v.FieldByName("X").Set(ValueOf(byte(2)))
v.FieldByIndex([]int{1}).Set(ValueOf(byte(1)))
runtime.GC()
s := fmt.Sprint(v.Interface())
want := `{ 1 0 [0 0 0]}`
if s != want {
t.Errorf("constructed struct = %s, want %s", s, want)
}
// check the size, alignment and field offsets
stt := TypeOf(struct {
String string
X byte
Y uint64
Z [3]uint16
}{})
if st.Size() != stt.Size() {
t.Errorf("constructed struct size = %v, want %v", st.Size(), stt.Size())
}
if st.Align() != stt.Align() {
t.Errorf("constructed struct align = %v, want %v", st.Align(), stt.Align())
}
if st.FieldAlign() != stt.FieldAlign() {
t.Errorf("constructed struct field align = %v, want %v", st.FieldAlign(), stt.FieldAlign())
}
for i := 0; i < st.NumField(); i++ {
o1 := st.Field(i).Offset
o2 := stt.Field(i).Offset
if o1 != o2 {
t.Errorf("constructed struct field %v offset = %v, want %v", i, o1, o2)
}
}
// check duplicate names
shouldPanic(func() {
StructOf([]StructField{
StructField{Name: "string", Type: TypeOf("")},
StructField{Name: "string", Type: TypeOf("")},
})
})
shouldPanic(func() {
StructOf([]StructField{
StructField{Type: TypeOf("")},
StructField{Name: "string", Type: TypeOf("")},
})
})
shouldPanic(func() {
StructOf([]StructField{
StructField{Type: TypeOf("")},
StructField{Type: TypeOf("")},
})
})
// check that type already in binary is found
checkSameType(t, Zero(StructOf(fields[2:3])).Interface(), struct{ Y uint64 }{})
} | explode_data.jsonl/29601 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 853
} | [
2830,
3393,
9422,
2124,
1155,
353,
8840,
836,
8,
341,
197,
322,
1779,
8086,
323,
990,
315,
943,
537,
304,
7868,
198,
55276,
1669,
3056,
9422,
1877,
515,
197,
197,
9422,
1877,
515,
298,
21297,
25,
330,
50,
756,
298,
197,
5668,
25,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.