text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func TestServer_Push_RejectAfterGoAway(t *testing.T) {
var readyOnce sync.Once
ready := make(chan struct{})
errc := make(chan error, 2)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
select {
case <-ready:
case <-time.After(5 * time.Second):
errc <- fmt.Errorf("timeout waiting for GOAWAY to be processed")
}
if got, want := w.(http.Pusher).Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want {
errc <- fmt.Errorf("Push()=%v, want %v", got, want)
}
errc <- nil
})
defer st.Close()
st.greet()
getSlash(st)
// Send GOAWAY and wait for it to be processed.
st.fr.WriteGoAway(1, ErrCodeNo, nil)
go func() {
for {
select {
case <-ready:
return
default:
}
st.sc.testHookCh <- func(loopNum int) {
if !st.sc.pushEnabled {
readyOnce.Do(func() { close(ready) })
}
}
}
}()
if err := <-errc; err != nil {
t.Error(err)
}
} | explode_data.jsonl/1979 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 410
} | [
2830,
3393,
5475,
1088,
1116,
50693,
583,
6025,
10850,
78284,
1155,
353,
8840,
836,
8,
341,
2405,
5527,
12522,
12811,
77946,
198,
197,
2307,
1669,
1281,
35190,
2036,
37790,
9859,
66,
1669,
1281,
35190,
1465,
11,
220,
17,
340,
18388,
166... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestConvertKubernetes(t *testing.T) {
dir, err := ioutil.TempDir("", "agent_test_legacy")
require.NoError(t, err)
defer os.RemoveAll(dir)
src := filepath.Join(dir, "kubernetes.yaml")
srcEmpty := filepath.Join(dir, "kubernetes-empty.yaml")
dst := filepath.Join(dir, "kubelet.yaml")
dstEmpty := filepath.Join(dir, "kubelet-empty.yaml")
err = ioutil.WriteFile(src, []byte(kubernetesLegacyConf), 0640)
require.NoError(t, err)
err = ioutil.WriteFile(srcEmpty, []byte(kubernetesLegacyEmptyConf), 0640)
require.NoError(t, err)
configConverter := config.NewConfigConverter()
deprecations, err := importKubernetesConfWithDeprec(src, dst, true, configConverter)
require.NoError(t, err)
require.EqualValues(t, expectedKubeDeprecations, deprecations)
newConf, err := ioutil.ReadFile(dst)
require.NoError(t, err)
assert.Equal(t, kubeletNewConf, string(newConf))
assert.Equal(t, 1234, config.Datadog.GetInt("kubernetes_http_kubelet_port"))
assert.Equal(t, 1234, config.Datadog.GetInt("kubernetes_https_kubelet_port"))
assert.Equal(t, "localhost", config.Datadog.GetString("kubernetes_kubelet_host"))
assert.Equal(t, "/path/to/client.crt", config.Datadog.GetString("kubelet_client_crt"))
assert.Equal(t, "/path/to/client.key", config.Datadog.GetString("kubelet_client_key"))
assert.Equal(t, "/path/to/ca.pem", config.Datadog.GetString("kubelet_client_ca"))
assert.Equal(t, "/path/to/token", config.Datadog.GetString("kubelet_auth_token_path"))
assert.EqualValues(t, expectedHostTags, config.Datadog.GetStringMapString("kubernetes_node_labels_as_tags"))
assert.Equal(t, false, config.Datadog.GetBool("kubelet_tls_verify"))
assert.Equal(t, true, config.Datadog.GetBool("kubernetes_collect_service_tags"))
assert.Equal(t, true, config.Datadog.GetBool("collect_kubernetes_events"))
assert.Equal(t, true, config.Datadog.GetBool("collect_kubernetes_metrics"))
assert.Equal(t, true, config.Datadog.GetBool("collect_kubernetes_topology"))
assert.Equal(t, 10, config.Datadog.GetInt("collect_kubernetes_timeout"))
assert.Equal(t, true, config.Datadog.GetBool("leader_election"))
assert.Equal(t, 1200, config.Datadog.GetInt("leader_lease_duration"))
assert.Equal(t, 3000, config.Datadog.GetInt("kubernetes_service_tag_update_freq"))
configConverter.Set("kubelet_tls_verify", true)
deprecations, err = importKubernetesConfWithDeprec(srcEmpty, dstEmpty, true, configConverter)
require.NoError(t, err)
assert.Equal(t, true, config.Datadog.GetBool("kubelet_tls_verify"))
assert.Equal(t, 0, len(deprecations))
newEmptyConf, err := ioutil.ReadFile(dstEmpty)
require.NoError(t, err)
assert.Equal(t, kubeletNewEmptyConf, string(newEmptyConf))
// test overwrite
err = ImportKubernetesConf(src, dst, false, configConverter)
require.NotNil(t, err)
_, err = os.Stat(filepath.Join(dir, "kubelet.yaml.bak"))
assert.True(t, os.IsNotExist(err))
err = ImportKubernetesConf(src, dst, true, configConverter)
require.NoError(t, err)
_, err = os.Stat(filepath.Join(dir, "kubelet.yaml.bak"))
assert.False(t, os.IsNotExist(err))
} | explode_data.jsonl/49077 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1226
} | [
2830,
3393,
12012,
42,
29827,
1155,
353,
8840,
836,
8,
341,
48532,
11,
1848,
1669,
43144,
65009,
6184,
19814,
330,
8092,
4452,
97555,
1138,
17957,
35699,
1155,
11,
1848,
340,
16867,
2643,
84427,
14161,
692,
41144,
1669,
26054,
22363,
1416... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCatalog_ListServiceNodes(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
args := structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "db",
ServiceTag: "slave",
TagFilter: false,
}
var out structs.IndexedServiceNodes
err := msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &args, &out)
if err != nil {
t.Fatalf("err: %v", err)
}
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Just add a node
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil {
t.Fatalf("err: %v", err)
}
if err := s1.fsm.State().EnsureService(2, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"primary"}, Address: "127.0.0.1", Port: 5000}); err != nil {
t.Fatalf("err: %v", err)
}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
if len(out.ServiceNodes) != 1 {
t.Fatalf("bad: %v", out)
}
// Try with a filter
args.TagFilter = true
out = structs.IndexedServiceNodes{}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
if len(out.ServiceNodes) != 0 {
t.Fatalf("bad: %v", out)
}
} | explode_data.jsonl/49233 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 579
} | [
2830,
3393,
41606,
27104,
1860,
12288,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
48532,
16,
11,
274,
16,
1669,
1273,
5475,
1155,
340,
16867,
2643,
84427,
14161,
16,
340,
16867,
274,
16,
10849,
18452,
741,
43343,
66,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestFixedAddressUpdateParams_WithTimeout(t *testing.T) {
p := NewFixedAddressUpdateParams()
p = p.WithTimeout(time.Minute * 5)
require.NotNil(t, p.timeout)
assert.Equal(t, time.Minute*5, p.timeout)
} | explode_data.jsonl/14333 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 81
} | [
2830,
3393,
13520,
4286,
4289,
4870,
62,
2354,
7636,
1155,
353,
8840,
836,
8,
341,
3223,
1669,
1532,
13520,
4286,
4289,
4870,
741,
3223,
284,
281,
26124,
7636,
9730,
75770,
353,
220,
20,
340,
17957,
93882,
1155,
11,
281,
36110,
340,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestMemPostings_ensureOrder(t *testing.T) {
p := NewUnorderedMemPostings()
p.m["a"] = map[string][]uint64{}
for i := 0; i < 100; i++ {
l := make([]uint64, 100)
for j := range l {
l[j] = rand.Uint64()
}
v := fmt.Sprintf("%d", i)
p.m["a"][v] = l
}
p.EnsureOrder()
for _, e := range p.m {
for _, l := range e {
ok := sort.SliceIsSorted(l, func(i, j int) bool {
return l[i] < l[j]
})
if !ok {
t.Fatalf("postings list %v is not sorted", l)
}
}
}
} | explode_data.jsonl/13122 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 252
} | [
2830,
3393,
18816,
4133,
819,
62,
27289,
4431,
1155,
353,
8840,
836,
8,
341,
3223,
1669,
1532,
1806,
10544,
18816,
4133,
819,
741,
3223,
744,
1183,
64,
1341,
284,
2415,
14032,
45725,
2496,
21,
19,
31483,
2023,
600,
1669,
220,
15,
26,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestObjectType(t *testing.T) {
assert.Equal(t, `nestedObject:
anotherNestedObject:
name: cheese
`, GenerateValuesAsYaml(t, "objectType.test.schema.json",
func(console *tests.ConsoleWrapper, donec chan struct{}) {
defer close(donec)
console.ExpectString("Enter a value for name")
console.SendLine("cheese")
console.ExpectEOF()
}))
} | explode_data.jsonl/61746 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 140
} | [
2830,
3393,
49530,
1155,
353,
8840,
836,
8,
341,
6948,
12808,
1155,
11,
1565,
59271,
1190,
510,
220,
2441,
71986,
1190,
510,
262,
829,
25,
17163,
198,
7808,
19813,
6227,
2121,
56,
9467,
1155,
11,
330,
1700,
929,
5958,
30892,
4323,
756... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNoTTLStrategyFailed(t *testing.T) {
var err error
var un *unstructured.Unstructured
controller := newTTLController()
// Veirfy we do not enqueue if not completed
wf := test.LoadWorkflowFromBytes([]byte(failedWf))
wf.Status.FinishedAt = metav1.Time{Time: controller.clock.Now().Add(-5 * time.Second)}
un, err = util.ToUnstructured(wf)
assert.NoError(t, err)
controller.enqueueWF(un)
assert.Equal(t, 0, controller.workqueue.Len())
wf1 := test.LoadWorkflowFromBytes([]byte(failedWf))
wf1.Status.FinishedAt = metav1.Time{Time: controller.clock.Now().Add(-11 * time.Second)}
un, err = util.ToUnstructured(wf1)
assert.NoError(t, err)
controller.enqueueWF(un)
assert.Equal(t, 0, controller.workqueue.Len())
} | explode_data.jsonl/48744 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 279
} | [
2830,
3393,
2753,
51,
13470,
19816,
9408,
1155,
353,
8840,
836,
8,
341,
2405,
1848,
1465,
198,
2405,
650,
353,
359,
51143,
10616,
51143,
198,
61615,
1669,
501,
51,
13470,
2051,
741,
197,
322,
22584,
404,
30595,
582,
653,
537,
53757,
4... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestHighWatermarks(t *testing.T) {
ctx := context.Background()
s := Server{logs: fakeLogs{
0: make([]mutator.LogMessage, 10),
1: make([]mutator.LogMessage, 20),
}}
for _, tc := range []struct {
desc string
batchSize int32
count int32
last spb.MapMetadata
next spb.MapMetadata
}{
{desc: "nobatch", batchSize: 30, count: 30,
next: spb.MapMetadata{Sources: []*spb.MapMetadata_SourceSlice{
{LogId: 0, HighestExclusive: 10},
{LogId: 1, HighestExclusive: 20}}}},
{desc: "exactbatch", batchSize: 20, count: 20,
next: spb.MapMetadata{Sources: []*spb.MapMetadata_SourceSlice{
{LogId: 0, HighestExclusive: 10},
{LogId: 1, HighestExclusive: 10}}}},
{desc: "batchwprev", batchSize: 20, count: 20,
last: spb.MapMetadata{Sources: []*spb.MapMetadata_SourceSlice{
{LogId: 0, HighestExclusive: 10}}},
next: spb.MapMetadata{Sources: []*spb.MapMetadata_SourceSlice{
{LogId: 0, LowestInclusive: 10, HighestExclusive: 10},
{LogId: 1, HighestExclusive: 20}}}},
// Don't drop existing watermarks.
{desc: "keep existing", batchSize: 1, count: 1,
last: spb.MapMetadata{Sources: []*spb.MapMetadata_SourceSlice{
{LogId: 1, HighestExclusive: 10}}},
next: spb.MapMetadata{Sources: []*spb.MapMetadata_SourceSlice{
{LogId: 0, HighestExclusive: 1},
{LogId: 1, LowestInclusive: 10, HighestExclusive: 10}}}},
{desc: "logs that dont move", batchSize: 0, count: 0,
last: spb.MapMetadata{Sources: []*spb.MapMetadata_SourceSlice{
{LogId: 3, HighestExclusive: 10}}},
next: spb.MapMetadata{Sources: []*spb.MapMetadata_SourceSlice{
{LogId: 0},
{LogId: 1},
{LogId: 3, LowestInclusive: 10, HighestExclusive: 10}}}},
} {
t.Run(tc.desc, func(t *testing.T) {
count, next, err := s.HighWatermarks(ctx, directoryID, &tc.last, tc.batchSize)
if err != nil {
t.Fatalf("HighWatermarks(): %v", err)
}
if count != tc.count {
t.Errorf("HighWatermarks(): count: %v, want %v", count, tc.count)
}
if !cmp.Equal(next, &tc.next) {
t.Errorf("HighWatermarks(): diff(-got, +want): %v", cmp.Diff(next, &tc.next))
}
})
}
} | explode_data.jsonl/64849 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 932
} | [
2830,
3393,
11976,
28253,
15544,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
2266,
19047,
741,
1903,
1669,
8422,
90,
22081,
25,
12418,
51053,
515,
197,
197,
15,
25,
1281,
10556,
6984,
850,
5247,
2052,
11,
220,
16,
15,
1326,
197,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestGetCreated(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "get_created",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
} | explode_data.jsonl/76365 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 87
} | [
2830,
3393,
1949,
11694,
1155,
353,
8840,
836,
8,
341,
2084,
17376,
80254,
2271,
1155,
11,
609,
60168,
80254,
2271,
3798,
515,
197,
197,
6184,
25,
688,
330,
455,
27288,
756,
197,
197,
48303,
25,
3056,
917,
4913,
31,
79,
65482,
4322,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func Test_unmarshal_struct_of_struct(t *testing.T) {
should := require.New(t)
for _, c := range test.UnmarshalCombinations {
buf, proto := c.CreateProtocol()
proto.WriteStructBegin("hello")
proto.WriteFieldBegin("field1", thrift.STRUCT, 1)
proto.WriteStructBegin("hello")
proto.WriteFieldBegin("field1", thrift.STRING, 1)
proto.WriteString("abc")
proto.WriteFieldEnd()
proto.WriteFieldStop()
proto.WriteStructEnd()
proto.WriteFieldEnd()
proto.WriteFieldStop()
proto.WriteStructEnd()
var val struct_of_struct_test.TestObject
should.NoError(c.Unmarshal(buf.Bytes(), &val))
should.Equal(struct_of_struct_test.TestObject{
struct_of_struct_test.EmbeddedObject{"abc"},
}, val)
}
} | explode_data.jsonl/47461 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 282
} | [
2830,
3393,
4907,
27121,
15126,
3575,
15126,
1155,
353,
8840,
836,
8,
341,
197,
5445,
1669,
1373,
7121,
1155,
340,
2023,
8358,
272,
1669,
2088,
1273,
38097,
1092,
73629,
341,
197,
26398,
11,
18433,
1669,
272,
7251,
20689,
741,
197,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestExpand(t *testing.T) {
match := map[string]string{
"domain": "gowalker.org",
"subdomain": "github.com",
}
s := "http://{domain}/{subdomain}/{0}/{1}"
sR := "http://gowalker.org/yougam/libraries/Unknwon/gowalker"
if Expand(s, match, "Unknwon", "gowalker") != sR {
t.Errorf("Expand:\n Expect => %s\n Got => %s\n", sR, s)
}
} | explode_data.jsonl/68415 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 158
} | [
2830,
3393,
38946,
1155,
353,
8840,
836,
8,
341,
47706,
1669,
2415,
14032,
30953,
515,
197,
197,
1,
12204,
788,
262,
330,
36186,
36808,
2659,
756,
197,
197,
1,
1966,
12204,
788,
330,
5204,
905,
756,
197,
532,
1903,
1669,
330,
1254,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFind(t *testing.T) {
type args struct {
code string
}
tests := []struct {
name string
args args
want Currency
}{
{"returns the currency with provided code, if found",
args{"JPY"},
JPY,
},
{"returns XXX (no currency), if not found",
args{"OMG"},
XXX,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := Find(tt.args.code); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Find() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/9003 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 224
} | [
2830,
3393,
9885,
1155,
353,
8840,
836,
8,
341,
13158,
2827,
2036,
341,
197,
43343,
914,
198,
197,
532,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
31215,
2827,
198,
197,
50780,
28453,
198,
197,
59403,
197,
197,
4913,
421... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestCaptiveGetLedger_ErrReadingMetaResult(t *testing.T) {
tt := assert.New(t)
metaChan := make(chan metaResult, 100)
for i := 64; i <= 65; i++ {
meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)})
metaChan <- metaResult{
LedgerCloseMeta: &meta,
}
}
metaChan <- metaResult{
err: fmt.Errorf("unmarshalling error"),
}
ctx := context.Background()
mockRunner := &stellarCoreRunnerMock{}
mockRunner.On("catchup", uint32(65), uint32(66)).Return(nil)
mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan))
ctx, cancel := context.WithCancel(ctx)
mockRunner.On("context").Return(ctx)
mockRunner.On("close").Return(nil).Run(func(args mock.Arguments) {
cancel()
}).Once()
// even if the request to fetch the latest checkpoint succeeds, we should fail at creating the subprocess
mockArchive := &historyarchive.MockArchive{}
mockArchive.
On("GetRootHAS").
Return(historyarchive.HistoryArchiveState{
CurrentLedger: uint32(200),
}, nil)
captiveBackend := CaptiveStellarCore{
archive: mockArchive,
stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) {
return mockRunner, nil
},
checkpointManager: historyarchive.NewCheckpointManager(64),
}
err := captiveBackend.PrepareRange(ctx, BoundedRange(65, 66))
assert.NoError(t, err)
meta, err := captiveBackend.GetLedger(ctx, 65)
tt.NoError(err)
tt.Equal(xdr.Uint32(65), meta.V0.LedgerHeader.Header.LedgerSeq)
tt.False(captiveBackend.closed)
// try reading from an empty buffer
_, err = captiveBackend.GetLedger(ctx, 66)
tt.EqualError(err, "unmarshalling error")
// not closed even if there is an error getting ledger
tt.False(captiveBackend.closed)
mockArchive.AssertExpectations(t)
mockRunner.AssertExpectations(t)
} | explode_data.jsonl/7328 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 647
} | [
2830,
3393,
34,
27781,
1949,
60850,
1389,
93623,
31899,
12175,
2077,
1155,
353,
8840,
836,
8,
341,
3244,
83,
1669,
2060,
7121,
1155,
340,
84004,
46019,
1669,
1281,
35190,
8823,
2077,
11,
220,
16,
15,
15,
692,
2023,
600,
1669,
220,
21,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIsNewSessionRequest(t *testing.T) {
AssertThat(t, isNewSessionRequest("POST", "session"), Is{true})
AssertThat(t, isNewSessionRequest("POST", "session/123/timeouts"), Is{false})
} | explode_data.jsonl/65295 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 65
} | [
2830,
3393,
3872,
3564,
5283,
1900,
1155,
353,
8840,
836,
8,
341,
18017,
4792,
1155,
11,
87478,
5283,
1900,
445,
2946,
497,
330,
5920,
3975,
2160,
90,
1866,
3518,
18017,
4792,
1155,
11,
87478,
5283,
1900,
445,
2946,
497,
330,
5920,
14... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestContextRenderYAML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.YAML(http.StatusCreated, H{"foo": "bar"})
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "foo: bar\n", w.Body.String())
assert.Equal(t, "application/x-yaml; charset=utf-8", w.Header().Get("Content-Type"))
} | explode_data.jsonl/26795 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 136
} | [
2830,
3393,
1972,
6750,
56,
31102,
1155,
353,
8840,
836,
8,
341,
6692,
1669,
54320,
70334,
7121,
47023,
741,
1444,
11,
716,
1669,
4230,
2271,
1972,
3622,
692,
1444,
7507,
31102,
19886,
10538,
11694,
11,
472,
4913,
7975,
788,
330,
2257,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestClusterAdminListPartitionReassignments(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
defer seedBroker.Close()
secondBroker := NewMockBroker(t, 2)
defer secondBroker.Close()
seedBroker.SetHandlerByMap(map[string]MockResponse{
"ApiVersionsRequest": NewMockApiVersionsResponse(t),
"MetadataRequest": NewMockMetadataResponse(t).
SetController(secondBroker.BrokerID()).
SetBroker(seedBroker.Addr(), seedBroker.BrokerID()).
SetBroker(secondBroker.Addr(), secondBroker.BrokerID()),
})
secondBroker.SetHandlerByMap(map[string]MockResponse{
"ApiVersionsRequest": NewMockApiVersionsResponse(t),
"ListPartitionReassignmentsRequest": NewMockListPartitionReassignmentsResponse(t),
})
config := NewTestConfig()
config.Version = V2_4_0_0
admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
response, err := admin.ListPartitionReassignments("my_topic", []int32{0, 1})
if err != nil {
t.Fatal(err)
}
partitionStatus, ok := response["my_topic"]
if !ok {
t.Fatalf("topic missing in response")
} else {
if len(partitionStatus) != 2 {
t.Fatalf("partition missing in response")
}
}
err = admin.Close()
if err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/40788 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 494
} | [
2830,
3393,
28678,
7210,
852,
49978,
693,
96310,
1155,
353,
8840,
836,
8,
341,
197,
22602,
65545,
1669,
1532,
11571,
65545,
1155,
11,
220,
16,
340,
16867,
10320,
65545,
10421,
2822,
197,
5569,
65545,
1669,
1532,
11571,
65545,
1155,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestECSClient(t *testing.T) {
testutil.CheckAWSEnvVarsForECS(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hc := utility.GetHTTPClient()
defer utility.PutHTTPClient(hc)
c, err := NewBasicECSClient(awsutil.ClientOptions{
Creds: credentials.NewEnvCredentials(),
Region: aws.String(testutil.AWSRegion()),
Role: aws.String(testutil.AWSRole()),
RetryOpts: &utility.RetryOptions{
MaxAttempts: 5,
},
HTTPClient: hc,
})
require.NoError(t, err)
require.NotNil(t, c)
for tName, tCase := range testcase.ECSClientTaskDefinitionTests() {
t.Run(tName, func(t *testing.T) {
tctx, tcancel := context.WithTimeout(ctx, 30*time.Second)
defer tcancel()
defer c.Close(tctx)
tCase(tctx, t, c)
})
}
registerIn := &ecs.RegisterTaskDefinitionInput{
ContainerDefinitions: []*ecs.ContainerDefinition{
{
Command: []*string{aws.String("echo"), aws.String("foo")},
Image: aws.String("busybox"),
Name: aws.String("print_foo"),
},
},
Cpu: aws.String("128"),
Memory: aws.String("4"),
Family: aws.String(testutil.NewTaskDefinitionFamily(t.Name())),
}
registerOut, err := c.RegisterTaskDefinition(ctx, registerIn)
require.NoError(t, err)
require.NotZero(t, registerOut)
require.NotZero(t, registerOut.TaskDefinition)
defer func() {
taskDefs := cleanupTaskDefinitions(ctx, t, c)
grip.InfoWhen(len(taskDefs) > 0, message.Fields{
"message": "cleaned up leftover task definitions",
"task_definitions": taskDefs,
"test": t.Name(),
})
tasks := cleanupTasks(ctx, t, c, taskDefs)
grip.InfoWhen(len(tasks) > 0, message.Fields{
"message": "cleaned up leftover running tasks",
"tasks": tasks,
"test": t.Name(),
})
require.NoError(t, c.Close(ctx))
}()
for tName, tCase := range testcase.ECSClientRegisteredTaskDefinitionTests(*registerIn, *registerOut) {
t.Run(tName, func(t *testing.T) {
tctx, tcancel := context.WithTimeout(ctx, 30*time.Second)
defer tcancel()
tCase(tctx, t, c)
})
}
} | explode_data.jsonl/68423 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 860
} | [
2830,
3393,
36,
6412,
2959,
1155,
353,
8840,
836,
8,
341,
18185,
1314,
10600,
14419,
925,
36941,
28305,
2461,
36,
6412,
1155,
692,
20985,
11,
9121,
1669,
2266,
26124,
9269,
5378,
19047,
2398,
16867,
9121,
2822,
9598,
66,
1669,
15549,
22... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCreateFromEmptyConfig(t *testing.T) {
var configData []byte
var policy schedulerapi.Policy
handler := utiltesting.FakeHandler{
StatusCode: 500,
ResponseBody: "",
T: t,
}
server := httptest.NewServer(&handler)
defer server.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
informerFactory := informers.NewSharedInformerFactory(client, 0)
factory := NewConfigFactory(
v1.DefaultSchedulerName,
client,
informerFactory.Core().V1().Nodes(),
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Core().V1().Services(),
v1.DefaultHardPodAffinitySymmetricWeight,
enableEquivalenceCache,
)
configData = []byte(`{}`)
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
t.Errorf("Invalid configuration: %v", err)
}
factory.CreateFromConfig(policy)
} | explode_data.jsonl/13322 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 442
} | [
2830,
3393,
4021,
3830,
3522,
2648,
1155,
353,
8840,
836,
8,
341,
2405,
2193,
1043,
3056,
3782,
198,
2405,
4842,
28809,
2068,
1069,
8018,
271,
53326,
1669,
4094,
8840,
991,
726,
3050,
515,
197,
197,
15872,
25,
256,
220,
20,
15,
15,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_buildReportLines(t *testing.T) {
type args struct {
report Report
}
tests := []struct {
name string
args args
want []string
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := buildReportLines(tt.args.report); !reflect.DeepEqual(got, tt.want) {
t.Errorf("buildReportLines() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/49958 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 177
} | [
2830,
3393,
20801,
10361,
16794,
1155,
353,
8840,
836,
8,
341,
13158,
2827,
2036,
341,
197,
69931,
8259,
198,
197,
532,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
31215,
2827,
198,
197,
50780,
3056,
917,
198,
197,
59403,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFlow_InvalidPacket(t *testing.T) {
invalidIPpacket := []byte{0xab, 0xbc}
_, err := FindFlow(invalidIPpacket)
if err == nil {
t.Errorf("Unable to detect invalid flow from %v\n", invalidIPpacket)
}
} | explode_data.jsonl/69662 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 86
} | [
2830,
3393,
18878,
62,
7928,
16679,
1155,
353,
8840,
836,
8,
341,
197,
11808,
3298,
24829,
1669,
3056,
3782,
90,
15,
52616,
11,
220,
15,
43416,
630,
197,
6878,
1848,
1669,
7379,
18878,
5900,
1891,
3298,
24829,
340,
743,
1848,
621,
209... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestState_UpdateModeSelf(t *testing.T) {
t.Parallel()
st := setupNewState()
ev := &irc.Event{
Name: irc.MODE,
Sender: string(st.selfUser.Host),
Args: []string{st.selfUser.Nick(), "+i-o"},
NetworkInfo: testNetInfo,
}
st.selfModes.Set("o")
if got, exp := st.selfModes.IsSet("i"), false; exp != got {
t.Errorf("Expected: %v, got: %v", exp, got)
}
if got, exp := st.selfModes.IsSet("o"), true; exp != got {
t.Errorf("Expected: %v, got: %v", exp, got)
}
st.Update(ev)
if got, exp := st.selfModes.IsSet("i"), true; exp != got {
t.Errorf("Expected: %v, got: %v", exp, got)
}
if got, exp := st.selfModes.IsSet("o"), false; exp != got {
t.Errorf("Expected: %v, got: %v", exp, got)
}
} | explode_data.jsonl/32102 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 338
} | [
2830,
3393,
1397,
47393,
3636,
12092,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
18388,
1669,
6505,
3564,
1397,
2822,
74837,
1669,
609,
2437,
6904,
515,
197,
21297,
25,
286,
79923,
41252,
345,
197,
7568,
1659,
25,
414,
914,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func Test_CDN_DescribeCdnDomainDetailWithRPCrequest(t *testing.T) {
client, err := cdn.NewClientWithAccessKey(os.Getenv("REGION_ID"), os.Getenv("ACCESS_KEY_ID"), os.Getenv("ACCESS_KEY_SECRET"))
assert.Nil(t, err)
assert.NotNil(t, client)
request := cdn.CreateDescribeRefreshTasksRequest()
response, err := client.DescribeRefreshTasks(request)
assert.Nil(t, err)
assert.NotNil(t, response)
assert.Equal(t, 36, len(response.RequestId))
} | explode_data.jsonl/56874 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 174
} | [
2830,
3393,
920,
31264,
98054,
3114,
34,
17395,
13636,
10649,
2354,
29528,
2035,
1155,
353,
8840,
836,
8,
341,
25291,
11,
1848,
1669,
15307,
77,
7121,
2959,
2354,
6054,
1592,
9638,
64883,
445,
77431,
3450,
3975,
2643,
64883,
445,
55749,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMakeIDFromTeam(t *testing.T) {
privateTID := keybase1.MakeTestTeamID(1, false)
publicTID := keybase1.MakeTestTeamID(2, true)
epochIndex := idByteLen - 2
check := func(ty Type, tid keybase1.TeamID, epoch byte) {
id, err := MakeIDFromTeam(ty, tid, epoch)
require.NoError(t, err)
require.Equal(t, id.Type(), ty)
require.Equal(t, tid.ToBytes()[:epochIndex], id.Bytes()[:epochIndex])
require.Equal(t, epoch, id.Bytes()[epochIndex])
}
check(Private, privateTID, 0)
check(Public, publicTID, 0)
check(SingleTeam, privateTID, 0)
check(Private, privateTID, 15)
_, err := MakeIDFromTeam(Public, privateTID, 0)
require.NotNil(t, err)
_, err = MakeIDFromTeam(Private, publicTID, 0)
require.NotNil(t, err)
_, err = MakeIDFromTeam(SingleTeam, publicTID, 0)
require.NotNil(t, err)
_, err = MakeIDFromTeam(
Private, keybase1.TeamID("extra"+privateTID.String()), 0)
require.NotNil(t, err)
} | explode_data.jsonl/65971 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 377
} | [
2830,
3393,
8078,
915,
3830,
14597,
1155,
353,
8840,
836,
8,
341,
2455,
51,
915,
1669,
1376,
3152,
16,
50133,
2271,
14597,
915,
7,
16,
11,
895,
340,
1219,
51,
915,
1669,
1376,
3152,
16,
50133,
2271,
14597,
915,
7,
17,
11,
830,
692... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestField_SameNameWithDifferentType(t *testing.T) {
type (
FooInt struct {
Foo int
}
FooStr struct {
Foo string
}
)
//
// To()
//
srcInt := &FooInt{Foo: 1}
dstStr := &FooStr{}
assert.Nil(t, Copy(dstStr).From(srcInt))
assert.Empty(t, dstStr.Foo)
//
// From()
//
dstStr = &FooStr{}
assert.Nil(t, Copy(dstStr).From(srcInt))
assert.Empty(t, dstStr.Foo)
} | explode_data.jsonl/13667 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 193
} | [
2830,
3393,
1877,
1098,
373,
675,
2354,
69123,
929,
1155,
353,
8840,
836,
8,
341,
13158,
2399,
197,
12727,
2624,
1072,
2036,
341,
298,
12727,
2624,
526,
198,
197,
197,
630,
197,
12727,
2624,
2580,
2036,
341,
298,
12727,
2624,
914,
198... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMemberRegistration_Leadership(t *testing.T) {
serverAddress, testServer, cleanup := newTestPlacementServer(testRaftServer)
testServer.hasLeadership = true
t.Run("Connect server and disconnect it gracefully", func(t *testing.T) {
// arrange
conn, stream, err := newTestClient(serverAddress)
assert.NoError(t, err)
host := &v1pb.Host{
Name: "127.0.0.1:50102",
Entities: []string{"DogActor", "CatActor"},
Id: "testAppID",
Load: 1, // Not used yet
// Port is redundant because Name should include port number
}
// act
stream.Send(host)
// assert
select {
case memberChange := <-testServer.membershipCh:
assert.Equal(t, raft.MemberUpsert, memberChange.cmdType)
assert.Equal(t, host.Name, memberChange.host.Name)
assert.Equal(t, host.Id, memberChange.host.AppID)
assert.EqualValues(t, host.Entities, memberChange.host.Entities)
assert.Equal(t, 1, len(testServer.streamConnPool))
case <-time.After(testStreamSendLatency):
assert.True(t, false, "no membership change")
}
// act
// Runtime needs to close stream gracefully which will let placement remove runtime host from hashing ring
// in the next flush time window.
stream.CloseSend()
// assert
select {
case memberChange := <-testServer.membershipCh:
assert.Equal(t, raft.MemberRemove, memberChange.cmdType)
assert.Equal(t, host.Name, memberChange.host.Name)
case <-time.After(testStreamSendLatency):
require.True(t, false, "no membership change")
}
conn.Close()
})
t.Run("Connect server and disconnect it forcefully", func(t *testing.T) {
// arrange
conn, stream, err := newTestClient(serverAddress)
assert.NoError(t, err)
// act
host := &v1pb.Host{
Name: "127.0.0.1:50103",
Entities: []string{"DogActor", "CatActor"},
Id: "testAppID",
Load: 1, // Not used yet
// Port is redundant because Name should include port number
}
stream.Send(host)
// assert
select {
case memberChange := <-testServer.membershipCh:
assert.Equal(t, raft.MemberUpsert, memberChange.cmdType)
assert.Equal(t, host.Name, memberChange.host.Name)
assert.Equal(t, host.Id, memberChange.host.AppID)
assert.EqualValues(t, host.Entities, memberChange.host.Entities)
assert.Equal(t, 1, len(testServer.streamConnPool))
case <-time.After(testStreamSendLatency):
require.True(t, false, "no membership change")
}
// act
// Close tcp connection before closing stream, which simulates the scenario
// where dapr runtime disconnects the connection from placement service unexpectedly.
conn.Close()
// assert
select {
case <-testServer.membershipCh:
require.True(t, false, "should not have any member change message because faulty host detector time will clean up")
case <-time.After(testStreamSendLatency):
assert.Equal(t, 0, len(testServer.streamConnPool))
}
})
t.Run("non actor host", func(t *testing.T) {
// arrange
conn, stream, err := newTestClient(serverAddress)
assert.NoError(t, err)
// act
host := &v1pb.Host{
Name: "127.0.0.1:50104",
Entities: []string{},
Id: "testAppID",
Load: 1, // Not used yet
// Port is redundant because Name should include port number
}
stream.Send(host)
// assert
select {
case <-testServer.membershipCh:
require.True(t, false, "should not have any membership change")
case <-time.After(testStreamSendLatency):
require.True(t, true)
}
// act
// Close tcp connection before closing stream, which simulates the scenario
// where dapr runtime disconnects the connection from placement service unexpectedly.
conn.Close()
})
cleanup()
} | explode_data.jsonl/42594 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1335
} | [
2830,
3393,
9366,
23365,
62,
92724,
2151,
1155,
353,
8840,
836,
8,
341,
41057,
4286,
11,
1273,
5475,
11,
21290,
1669,
501,
2271,
28237,
5475,
8623,
55535,
723,
5475,
340,
18185,
5475,
6858,
92724,
2151,
284,
830,
271,
3244,
16708,
445,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestUnMarshalCustomerStatementsResponse(t *testing.T) {
s := `[
{
"id": "f45382c6fbc44d44aa7f9a55eb2ce731",
"state": "sent",
"reject_reason": null,
"email": "client@example.com",
"template": "statement_email",
"subject": "Statement from Dunder Mifflin, Inc.",
"message": "Dear Client, we have attached your latest account statement. Thank you!",
"opens": 0,
"opens_detail": [],
"clicks": 0,
"clicks_detail": [],
"created_at": 1436890047
}
]`
so := new(EmailResponses)
err := json.Unmarshal([]byte(s), so)
if err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/43693 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 258
} | [
2830,
3393,
1806,
55438,
12792,
93122,
2582,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
1565,
9640,
220,
341,
262,
330,
307,
788,
330,
69,
19,
20,
18,
23,
17,
66,
21,
69,
8904,
19,
19,
67,
19,
19,
5305,
22,
69,
24,
64,
20,
20,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestMgetc(t *testing.T) {
// Environment context parse
var ctx lib.Ctx
ctx.Init()
// Set context's Mgetc manually (don't need to repeat tests from context_test.go)
ctx.Mgetc = "y"
expected := "y"
got := lib.Mgetc(&ctx)
if got != expected {
t.Errorf("expected %v, got %v", expected, got)
}
} | explode_data.jsonl/53279 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 120
} | [
2830,
3393,
44,
71303,
1155,
353,
8840,
836,
8,
341,
197,
322,
11586,
2266,
4715,
198,
2405,
5635,
3051,
727,
3998,
198,
20985,
26849,
2822,
197,
322,
2573,
2266,
594,
386,
71303,
20083,
320,
15007,
944,
1184,
311,
13153,
7032,
504,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGetCacheExecCode(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "xvm-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
compileFunc := func(code []byte, output string) error {
return ioutil.WriteFile(output, code, 0700)
}
makeExecCodeFunc := func(libpath string) (exec.Code, error) {
return new(fakeCode), nil
}
cp := &memCodeProvider{
code: []byte("binary code"),
desc: &pb.WasmCodeDesc{
Digest: []byte("digest1"),
},
}
cm, err := newCodeManager(tmpdir, compileFunc, makeExecCodeFunc)
if err != nil {
t.Fatal(err)
}
code, err := cm.GetExecCode("c1", cp)
if err != nil {
t.Fatal(err)
}
// 期待从内存中获取
codeMemory, err := cm.GetExecCode("c1", cp)
if err != nil {
t.Fatal(err)
}
if code != codeMemory {
t.Fatalf("expect same exec code:%p, %p", code, codeMemory)
}
// digest改变之后需要重新填充cache
cp.desc.Digest = []byte("digest2")
code1, _ := cm.GetExecCode("c1", cp)
if code1 == code {
t.Fatalf("expect none equal code:%p, %p", code1, code)
}
// 期待从磁盘中获取
cm1, err := newCodeManager(tmpdir, compileFunc, makeExecCodeFunc)
if err != nil {
t.Fatal(err)
}
codeDisk, err := cm1.GetExecCode("c1", cp)
if err != nil {
t.Fatal(err)
}
if code1 == codeDisk {
t.Fatalf("expect none same exec code address:%p, %p", code1, codeMemory)
}
} | explode_data.jsonl/34455 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 611
} | [
2830,
3393,
1949,
8233,
10216,
2078,
1155,
353,
8840,
836,
8,
341,
20082,
3741,
11,
1848,
1669,
43144,
65009,
6184,
19814,
330,
87,
7338,
16839,
1138,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
532,
16867,
2643,
84427,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPlayer_ViewDirectionY(t *testing.T) {
pl := playerWithProperty("m_angEyeAngles[0]", st.PropertyValue{FloatVal: 15})
assert.Equal(t, float32(15), pl.ViewDirectionY())
} | explode_data.jsonl/12194 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 67
} | [
2830,
3393,
4476,
50711,
9268,
56,
1155,
353,
8840,
836,
8,
341,
72213,
1669,
2781,
2354,
3052,
445,
76,
58060,
50058,
46454,
58,
15,
19076,
357,
15727,
1130,
90,
5442,
2208,
25,
220,
16,
20,
8824,
6948,
12808,
1155,
11,
2224,
18,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestTxOut_String(t *testing.T) {
script := script.NewScriptRaw([]byte{opcodes.OP_RETURN, 0x01, 0x01})
txout := NewTxOut(9, script)
s := txout.String()
assert.Equal(t, "Value :9 Script:6a0101", s)
} | explode_data.jsonl/38886 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 89
} | [
2830,
3393,
31584,
2662,
31777,
1155,
353,
8840,
836,
8,
341,
86956,
1669,
5316,
7121,
5910,
20015,
10556,
3782,
90,
453,
25814,
81563,
21909,
11,
220,
15,
87,
15,
16,
11,
220,
15,
87,
15,
16,
3518,
46237,
411,
1669,
1532,
31584,
26... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAccAWSWafSizeConstraintSet_disappears(t *testing.T) {
var v waf.SizeConstraintSet
sizeConstraintSet := fmt.Sprintf("sizeConstraintSet-%s", acctest.RandString(5))
resourceName := "aws_waf_size_constraint_set.size_constraint_set"
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSWaf(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSWafSizeConstraintSetDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSWafSizeConstraintSetConfig(sizeConstraintSet),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSWafSizeConstraintSetExists(resourceName, &v),
testAccCheckAWSWafSizeConstraintSetDisappears(&v),
),
ExpectNonEmptyPlan: true,
},
},
})
} | explode_data.jsonl/45417 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 312
} | [
2830,
3393,
14603,
36136,
54,
2577,
1695,
17890,
1649,
9932,
33431,
1561,
1155,
353,
8840,
836,
8,
341,
2405,
348,
289,
2577,
2465,
17890,
1649,
198,
13832,
17890,
1649,
1669,
8879,
17305,
445,
2141,
17890,
1649,
11069,
82,
497,
1613,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIsPlanBindable(t *testing.T) {
serviceClass := func(bindable bool) *v1beta1.ClusterServiceClass {
serviceClass := getTestClusterServiceClass()
serviceClass.Spec.Bindable = bindable
return serviceClass
}
servicePlan := func(bindable *bool) *v1beta1.ClusterServicePlan {
return &v1beta1.ClusterServicePlan{
Spec: v1beta1.ClusterServicePlanSpec{
CommonServicePlanSpec: v1beta1.CommonServicePlanSpec{
Bindable: bindable,
},
},
}
}
cases := []struct {
name string
serviceClass bool
servicePlan *bool
bindable bool
}{
{
name: "service true, plan not set",
serviceClass: true,
bindable: true,
},
{
name: "service true, plan false",
serviceClass: true,
servicePlan: falsePtr(),
bindable: false,
},
{
name: "service true, plan true",
serviceClass: true,
servicePlan: truePtr(),
bindable: true,
},
{
name: "service false, plan not set",
serviceClass: false,
bindable: false,
},
{
name: "service false, plan false",
serviceClass: false,
servicePlan: falsePtr(),
bindable: false,
},
{
name: "service false, plan true",
serviceClass: false,
servicePlan: truePtr(),
bindable: true,
},
}
for _, tc := range cases {
sc := serviceClass(tc.serviceClass)
plan := servicePlan(tc.servicePlan)
if e, a := tc.bindable, isClusterServicePlanBindable(sc, plan); e != a {
t.Errorf("%v: unexpected result; expected %v, got %v", tc.name, e, a)
}
}
} | explode_data.jsonl/40493 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 683
} | [
2830,
3393,
3872,
20485,
70397,
1155,
353,
8840,
836,
8,
341,
52934,
1957,
1669,
2915,
71544,
480,
1807,
8,
353,
85,
16,
19127,
16,
72883,
1860,
1957,
341,
197,
52934,
1957,
1669,
633,
2271,
28678,
1860,
1957,
741,
197,
52934,
1957,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestFixBisectionsDisabled(t *testing.T) {
c := NewCtx(t)
defer c.Close()
// Upload a crash report.
build := testBuild(1)
build.Manager = noFixBisectionManager
c.client2.UploadBuild(build)
crash := testCrashWithRepro(build, 20)
c.client2.ReportCrash(crash)
c.client2.pollEmailBug()
// Receive the JobBisectCause.
resp := c.client2.pollJobs(build.Manager)
c.client2.expectNE(resp.ID, "")
c.client2.expectEQ(resp.Type, dashapi.JobBisectCause)
done := &dashapi.JobDoneReq{
ID: resp.ID,
Error: []byte("testBisectFixRetry:JobBisectCause"),
}
c.client2.expectOK(c.client2.JobDone(done))
// Advance time by 30 days and read out any notification emails.
{
c.advanceTime(30 * 24 * time.Hour)
msg := c.client2.pollEmailBug()
c.expectEQ(msg.Subject, "title20")
c.expectTrue(strings.Contains(msg.Body, "Sending this report upstream."))
msg = c.client2.pollEmailBug()
c.expectEQ(msg.Subject, "[syzbot] title20")
c.expectTrue(strings.Contains(msg.Body, "syzbot found the following issue"))
}
// Ensure that we do not get a JobBisectFix.
resp = c.client2.pollJobs(build.Manager)
c.client2.expectEQ(resp.ID, "")
} | explode_data.jsonl/81428 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 469
} | [
2830,
3393,
25958,
33,
1064,
5136,
25907,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
1532,
23684,
1155,
340,
16867,
272,
10421,
2822,
197,
322,
24996,
264,
9920,
1895,
624,
69371,
1669,
1273,
11066,
7,
16,
340,
69371,
58298,
284,
902,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_reenableExtensionFails(t *testing.T) {
ctx := log.NewSyncLogger(log.NewLogfmtLogger(os.Stdout))
mm := createMockVMExtensionEnvironmentManager()
ii, _ := GetInitializationInfo("yaba", "5.0", true, testEnableCallback)
ii.SupportsDisable = true
ext, _ := getVMExtensionInternal(ctx, ii, mm)
createDirsForVMExtension(ext)
defer cleanupDirsForVMExtension(ext)
err := setDisabled(ctx, ext, true)
defer setDisabled(ctx, ext, false)
require.NoError(t, err, "setDisabled failed")
disableDependency = evilDisableDependencies{}
defer resetDependencies()
msg, err := enable(ctx, ext)
require.NoError(t, err) // We let the extension continue if we fail to reenable it
require.Equal(t, "blah", msg)
} | explode_data.jsonl/18588 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 255
} | [
2830,
3393,
1288,
12552,
12049,
37,
6209,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
1487,
7121,
12154,
7395,
12531,
7121,
2201,
12501,
7395,
9638,
83225,
1171,
2109,
76,
1669,
1855,
11571,
11187,
12049,
12723,
2043,
741,
197,
3808,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEncodeMD5(t *testing.T) {
assert.Equal(t,
"3858f62230ac3c915f300c664312c63f",
EncodeMD5("foobar"),
)
} | explode_data.jsonl/14307 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 63
} | [
2830,
3393,
32535,
6076,
20,
1155,
353,
8840,
836,
8,
341,
6948,
12808,
1155,
345,
197,
197,
1,
18,
23,
20,
23,
69,
21,
17,
17,
18,
15,
580,
18,
66,
24,
16,
20,
69,
18,
15,
15,
66,
21,
21,
19,
18,
16,
17,
66,
21,
18,
69,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestSort(t *testing.T) {
svcs := []ServiceInstance{{RegistrationTimeUTC: 1}, {RegistrationTimeUTC: 0}}
sort.Sort(serviceInstanceList(svcs))
assert.Equal(t, svcs[0].RegistrationTimeUTC, int64(0))
assert.Equal(t, svcs[1].RegistrationTimeUTC, int64(1))
} | explode_data.jsonl/46868 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 94
} | [
2830,
3393,
10231,
1155,
353,
8840,
836,
8,
341,
1903,
85,
4837,
1669,
3056,
1860,
2523,
2979,
23365,
1462,
21183,
25,
220,
16,
2137,
314,
23365,
1462,
21183,
25,
220,
15,
11248,
39487,
35976,
21656,
2523,
852,
96138,
4837,
1171,
6948,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestServiceWithHealthCheck(t *testing.T) {
service := kobject.ServiceConfig{
ContainerName: "name",
Image: "image",
ServiceType: "Headless",
HealthChecks: kobject.HealthChecks{
Readiness: kobject.HealthCheck{
Test: []string{"arg1", "arg2"},
Timeout: 10,
Interval: 5,
Retries: 3,
StartPeriod: 60,
},
Liveness: kobject.HealthCheck{
Test: []string{"arg1", "arg2"},
Timeout: 11,
Interval: 6,
Retries: 4,
StartPeriod: 61,
},
},
}
komposeObject := kobject.KomposeObject{
ServiceConfigs: map[string]kobject.ServiceConfig{"app": service},
}
k := Kubernetes{}
objects, err := k.Transform(komposeObject, kobject.ConvertOptions{CreateD: true, Replicas: 1})
if err != nil {
t.Error(errors.Wrap(err, "k.Transform failed"))
}
if err := testutils.CheckForHealthCheckLivenessAndReadiness(objects); err != nil {
t.Error(err)
}
} | explode_data.jsonl/58960 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 417
} | [
2830,
3393,
1860,
2354,
14542,
3973,
1155,
353,
8840,
836,
8,
341,
52934,
1669,
595,
1700,
13860,
2648,
515,
197,
197,
4502,
675,
25,
330,
606,
756,
197,
53397,
25,
260,
330,
1805,
756,
197,
91619,
929,
25,
256,
330,
12346,
1717,
75... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestKey_JWK(t *testing.T) {
k, err := NewKeys()
require.NoError(t, err)
require.NotNil(t, k)
err = k.Generate()
require.NoError(t, err)
assert.NotNil(t, k.publicKey)
assert.NotNil(t, k.privateKey)
jwk, err := k.JWK()
t.Log(jwk)
assert.NoError(t, err)
assert.NotEmpty(t, jwk)
assert.Equal(t, k.KeyID, jwk.Kid)
} | explode_data.jsonl/11411 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 164
} | [
2830,
3393,
1592,
10598,
68316,
1155,
353,
8840,
836,
8,
1476,
16463,
11,
1848,
1669,
1532,
8850,
741,
17957,
35699,
1155,
11,
1848,
340,
17957,
93882,
1155,
11,
595,
692,
9859,
284,
595,
57582,
741,
17957,
35699,
1155,
11,
1848,
340,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestResolveConstIntWithBadExpr(t *testing.T) {
expr := ast.BadExpr{}
_, err := resolveConstInt(&expr)
if err == nil {
t.Error("should have received error because expr is ast.BadExpr")
}
} | explode_data.jsonl/33715 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 73
} | [
2830,
3393,
56808,
19167,
1072,
2354,
17082,
16041,
1155,
353,
8840,
836,
8,
341,
8122,
649,
1669,
11763,
45946,
16041,
16094,
197,
6878,
1848,
1669,
8830,
19167,
1072,
2099,
9413,
340,
743,
1848,
621,
2092,
341,
197,
3244,
6141,
445,
5... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestSetStmt(t *testing.T) {
fmt.Println("===== SET =====")
var s1 setStmts
s1 = append(s1, newSet("c", 100))
s1 = append(s1, newSet("b", 200))
s1 = append(s1, newSet("c", SQL("b+?", 200)))
fmt.Println(s1.ToSQL())
} | explode_data.jsonl/1262 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 107
} | [
2830,
3393,
1649,
31063,
1155,
353,
8840,
836,
8,
341,
11009,
12419,
445,
46725,
9019,
30742,
1138,
2405,
274,
16,
738,
31063,
82,
271,
1903,
16,
284,
8737,
1141,
16,
11,
501,
1649,
445,
66,
497,
220,
16,
15,
15,
1171,
1903,
16,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIngestFlushQueuedMemTable(t *testing.T) {
// Verify that ingestion forces a flush of a queued memtable.
mem := vfs.NewMem()
d, err := Open("", &Options{
FS: mem,
})
require.NoError(t, err)
// Add the key "a" to the memtable, then fill up the memtable with the key
// "b". The ingested sstable will only overlap with the queued memtable.
require.NoError(t, d.Set([]byte("a"), nil, nil))
for {
require.NoError(t, d.Set([]byte("b"), nil, nil))
d.mu.Lock()
done := len(d.mu.mem.queue) == 2
d.mu.Unlock()
if done {
break
}
}
ingest := func(keys ...string) {
t.Helper()
f, err := mem.Create("ext")
require.NoError(t, err)
w := sstable.NewWriter(f, sstable.WriterOptions{})
for _, k := range keys {
require.NoError(t, w.Set([]byte(k), nil))
}
require.NoError(t, w.Close())
require.NoError(t, d.Ingest([]string{"ext"}))
}
ingest("a")
require.NoError(t, d.Close())
} | explode_data.jsonl/40265 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 384
} | [
2830,
3393,
641,
6969,
46874,
25776,
3260,
18816,
2556,
1155,
353,
8840,
836,
8,
341,
197,
322,
25429,
429,
87347,
8437,
264,
18198,
315,
264,
57163,
1833,
2005,
382,
14145,
1669,
92941,
7121,
18816,
741,
2698,
11,
1848,
1669,
5264,
198... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestVersion1(t *testing.T) {
assert.NoError(t, PrepareEngine())
err := testEngine.DropTables(new(VersionS))
assert.NoError(t, err)
err = testEngine.CreateTables(new(VersionS))
assert.NoError(t, err)
ver := &VersionS{Name: "sfsfdsfds"}
_, err = testEngine.Insert(ver)
assert.NoError(t, err)
assert.EqualValues(t, ver.Ver, 1)
newVer := new(VersionS)
has, err := testEngine.ID(ver.Id).Get(newVer)
assert.NoError(t, err)
assert.True(t, has)
assert.EqualValues(t, newVer.Ver, 1)
newVer.Name = "-------"
_, err = testEngine.ID(ver.Id).Update(newVer)
assert.NoError(t, err)
assert.EqualValues(t, newVer.Ver, 2)
newVer = new(VersionS)
has, err = testEngine.ID(ver.Id).Get(newVer)
assert.NoError(t, err)
assert.EqualValues(t, newVer.Ver, 2)
} | explode_data.jsonl/19220 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 322
} | [
2830,
3393,
5637,
16,
1155,
353,
8840,
836,
8,
341,
6948,
35699,
1155,
11,
31166,
4571,
12367,
9859,
1669,
1273,
4571,
21688,
21670,
1755,
7,
5637,
50,
1171,
6948,
35699,
1155,
11,
1848,
692,
9859,
284,
1273,
4571,
7251,
21670,
1755,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRHELParserMultipleCVE(t *testing.T) {
_, filename, _, _ := runtime.Caller(0)
path := filepath.Join(filepath.Dir(filename))
// Test parsing testdata/fetcher_rhel_test.2.xml
testFile, _ := os.Open(filepath.Join(path, "/testdata/fetcher_rhel_test.2.xml"))
vulnerabilities, err := parseRHSA(testFile)
// Expected
expectedCve := []string{"CVE-2015-2722", "CVE-2015-2724", "CVE-2015-2725", "CVE-2015-2727", "CVE-2015-2728",
"CVE-2015-2729", "CVE-2015-2731", "CVE-2015-2733", "CVE-2015-2734", "CVE-2015-2735", "CVE-2015-2736",
"CVE-2015-2737", "CVE-2015-2738", "CVE-2015-2739", "CVE-2015-2740", "CVE-2015-2741", "CVE-2015-2743",
}
expectedSeverity := []database.Severity{database.CriticalSeverity, database.HighSeverity, database.HighSeverity,
database.MediumSeverity, database.MediumSeverity, database.MediumSeverity, database.CriticalSeverity,
database.CriticalSeverity, database.CriticalSeverity, database.CriticalSeverity, database.CriticalSeverity,
database.CriticalSeverity, database.CriticalSeverity, database.CriticalSeverity, database.CriticalSeverity,
database.MediumSeverity, database.MediumSeverity}
expectedFeatures := []database.AffectedFeature{
{
FeatureType: affectedType,
Namespace: database.Namespace{
Name: "centos:6",
VersionFormat: rpm.ParserName,
},
FeatureName: "firefox",
FixedInVersion: "0:38.1.0-1.el6_6",
AffectedVersion: "0:38.1.0-1.el6_6",
},
{
FeatureType: affectedType,
Namespace: database.Namespace{
Name: "centos:7",
VersionFormat: rpm.ParserName,
},
FeatureName: "firefox",
FixedInVersion: "0:38.1.0-1.el7_1",
AffectedVersion: "0:38.1.0-1.el7_1",
},
}
if assert.Nil(t, err) && assert.Len(t, vulnerabilities, len(expectedCve)) {
for i, vulnerability := range vulnerabilities {
assert.Equal(t, expectedCve[i], vulnerability.Name)
assert.Equal(t, fmt.Sprintf("https://access.redhat.com/security/cve/%s", expectedCve[i]), vulnerability.Link)
assert.Equal(t, expectedSeverity[i], vulnerability.Severity)
assert.Equal(t, `Mozilla Firefox is an open source web browser. XULRunner provides the XUL Runtime environment for Mozilla Firefox. Several flaws were found in the processing of malformed web content. A web page containing malicious content could cause Firefox to crash or, potentially, execute arbitrary code with the privileges of the user running Firefox.`, vulnerability.Description)
for _, expectedFeature := range expectedFeatures {
assert.Contains(t, vulnerability.Affected, expectedFeature)
}
}
}
} | explode_data.jsonl/51215 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 962
} | [
2830,
3393,
49,
50712,
6570,
32089,
67802,
1155,
353,
8840,
836,
8,
341,
197,
6878,
3899,
11,
8358,
716,
1669,
15592,
727,
13956,
7,
15,
340,
26781,
1669,
26054,
22363,
34793,
83757,
10961,
4390,
197,
322,
3393,
22314,
1273,
691,
6663,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestInvalidAnnotations(t *testing.T) {
ing := buildIngress()
fakeSecret := &mockSecret{}
data := map[string]string{}
// No annotation
_, err := NewParser(fakeSecret).Parse(ing)
if err == nil {
t.Errorf("Expected error with ingress but got nil")
}
// Invalid NameSpace
data[parser.GetAnnotationWithPrefix("auth-tls-secret")] = "demo-secret"
ing.SetAnnotations(data)
_, err = NewParser(fakeSecret).Parse(ing)
if err == nil {
t.Errorf("Expected error with ingress but got nil")
}
// Invalid Auth Certificate
data[parser.GetAnnotationWithPrefix("auth-tls-secret")] = "default/invalid-demo-secret"
ing.SetAnnotations(data)
_, err = NewParser(fakeSecret).Parse(ing)
if err == nil {
t.Errorf("Expected error with ingress but got nil")
}
// Invalid optional Annotations
data[parser.GetAnnotationWithPrefix("auth-tls-secret")] = "default/demo-secret"
data[parser.GetAnnotationWithPrefix("auth-tls-verify-client")] = "w00t"
data[parser.GetAnnotationWithPrefix("auth-tls-verify-depth")] = "abcd"
data[parser.GetAnnotationWithPrefix("auth-tls-pass-certificate-to-upstream")] = "nahh"
ing.SetAnnotations(data)
i, err := NewParser(fakeSecret).Parse(ing)
if err != nil {
t.Errorf("Unexpected error with ingress: %v", err)
}
u, ok := i.(*Config)
if !ok {
t.Errorf("expected *Config but got %v", u)
}
if u.VerifyClient != "on" {
t.Errorf("expected %v but got %v", "on", u.VerifyClient)
}
if u.ValidationDepth != 1 {
t.Errorf("expected %v but got %v", 1, u.ValidationDepth)
}
if u.PassCertToUpstream != false {
t.Errorf("expected %v but got %v", false, u.PassCertToUpstream)
}
} | explode_data.jsonl/13965 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 624
} | [
2830,
3393,
7928,
21418,
1155,
353,
8840,
836,
8,
341,
197,
287,
1669,
1936,
641,
2483,
741,
1166,
726,
19773,
1669,
609,
16712,
19773,
16094,
8924,
1669,
2415,
14032,
30953,
31483,
197,
322,
2308,
21223,
198,
197,
6878,
1848,
1669,
153... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func TestRecordServer_Export_ReturnTopPulseWhenNoRecords(t *testing.T) {
t.Parallel()
ctx := inslogger.TestContext(t)
// Pulses
firstPN := insolar.PulseNumber(pulse.MinTimePulse + 100)
secondPN := insolar.PulseNumber(firstPN + 10)
// JetKeeper
jetKeeper := executor.NewJetKeeperMock(t)
jetKeeper.TopSyncPulseMock.Return(secondPN)
// TempDB
tmpdir, err := ioutil.TempDir("", "bdb-test-")
defer os.RemoveAll(tmpdir)
require.NoError(t, err)
ops := BadgerDefaultOptions(tmpdir)
db, err := store.NewBadgerDB(ops)
require.NoError(t, err)
defer db.Stop(context.Background())
pulseStorage := insolarPulse.NewDB(db)
recordStorage := object.NewRecordDB(db)
recordPosition := object.NewRecordDB(db)
// Pulses
// Trash pulses without data
err = pulseStorage.Append(ctx, insolar.Pulse{PulseNumber: pulse.MinTimePulse})
require.NoError(t, err)
err = pulseStorage.Append(ctx, insolar.Pulse{PulseNumber: pulse.MinTimePulse + 10})
require.NoError(t, err)
err = pulseStorage.Append(ctx, insolar.Pulse{PulseNumber: pulse.MinTimePulse + 20})
require.NoError(t, err)
// LegalInfo
err = pulseStorage.Append(ctx, insolar.Pulse{PulseNumber: firstPN})
require.NoError(t, err)
err = pulseStorage.Append(ctx, insolar.Pulse{PulseNumber: secondPN})
require.NoError(t, err)
recordServer := NewRecordServer(pulseStorage, recordPosition, recordStorage, jetKeeper)
t.Run("calling for pulse with empty pulses after returns the last pulse", func(t *testing.T) {
var recs []*Record
streamMock := &streamMock{checker: func(i *Record) error {
recs = append(recs, i)
return nil
}}
err := recordServer.Export(&GetRecords{
PulseNumber: pulse.MinTimePulse,
RecordNumber: 1,
Count: 1,
}, streamMock)
require.NoError(t, err)
require.Equal(t, 1, len(recs))
resRecord := recs[0]
require.NotNil(t, resRecord.ShouldIterateFrom)
require.NotNil(t, secondPN, *resRecord.ShouldIterateFrom)
})
} | explode_data.jsonl/17013 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 742
} | [
2830,
3393,
6471,
5475,
62,
16894,
53316,
5366,
47,
17217,
4498,
2753,
25876,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
20985,
1669,
1640,
9786,
8787,
1972,
1155,
692,
197,
322,
34862,
9275,
198,
42190,
17896,
1669,
1640,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNewStream(t *testing.T) {
t.Run("test", func(t *testing.T) {
assert := base.NewAssert(t)
stream := NewStream()
assert(len(stream.frames)).Equals(1)
assert(cap(stream.frames)).Equals(streamFrameArrayInitSize)
assert(stream.readSeg).Equals(0)
assert(stream.readIndex).Equals(streamPosBody)
assert(stream.readFrame).Equals(*stream.frames[0])
assert(stream.writeSeg).Equals(0)
assert(stream.writeIndex).Equals(streamPosBody)
assert(stream.writeFrame).Equals(*stream.frames[0])
assert(*stream.frames[0]).Equals(initStreamFrame0)
stream.Release()
})
} | explode_data.jsonl/21151 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 219
} | [
2830,
3393,
3564,
3027,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
1944,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
6948,
1669,
2331,
7121,
8534,
1155,
340,
197,
44440,
1669,
1532,
3027,
741,
197,
6948,
6901,
20574,
61327,
457... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestWorkflowTemplateRefInvalidWF(t *testing.T) {
wf := unmarshalWF(invalidWF)
t.Run("ProcessWFWithStoredWFT", func(t *testing.T) {
cancel, controller := newController(wf)
defer cancel()
woc := newWorkflowOperationCtx(wf, controller)
_, _, err := woc.loadExecutionSpec()
assert.Error(t, err)
woc.operate()
assert.Equal(t, wfv1.NodeError, woc.wf.Status.Phase)
})
} | explode_data.jsonl/30605 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 162
} | [
2830,
3393,
62768,
7275,
3945,
7928,
32131,
1155,
353,
8840,
836,
8,
341,
6692,
69,
1669,
650,
27121,
32131,
5900,
1891,
32131,
340,
3244,
16708,
445,
7423,
32131,
2354,
93243,
54,
3994,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
8... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRegistrar_JoinChannel(t *testing.T) {
// system channel
confSys := genesisconfig.Load(genesisconfig.SampleInsecureSoloProfile, configtest.GetDevConfigDir())
genesisBlockSys := encoder.New(confSys).GenesisBlockForChannel("sys-channel")
confApp := genesisconfig.Load(genesisconfig.SampleInsecureSoloProfile, configtest.GetDevConfigDir())
confApp.Consortiums = nil
confApp.Consortium = ""
genesisBlockApp := encoder.New(confApp).GenesisBlockForChannel("my-channel")
cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
assert.NoError(t, err)
t.Run("Reject join when system channel exists", func(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "registrar_test-")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
ledgerFactory, _ := newLedgerAndFactory(tmpdir, "sys-channel", genesisBlockSys)
mockConsenters := map[string]consensus.Consenter{confSys.Orderer.OrdererType: &mockConsenter{}}
registrar := NewRegistrar(localconfig.TopLevel{}, ledgerFactory, mockCrypto(), &disabled.Provider{}, cryptoProvider)
registrar.Initialize(mockConsenters)
info, err := registrar.JoinChannel("some-app-channel", &cb.Block{}, false)
assert.EqualError(t, err, "system channel exists")
assert.Equal(t, types.ChannelInfo{}, info)
})
t.Run("Reject join when channel exists", func(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "registrar_test-")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
ledgerFactory, _ := newLedgerAndFactory(tmpdir, "", nil)
mockConsenters := map[string]consensus.Consenter{confSys.Orderer.OrdererType: &mockConsenter{}, "etcdraft": &mockConsenter{}}
config := localconfig.TopLevel{}
config.General.BootstrapMethod = "none"
config.General.GenesisFile = ""
registrar := NewRegistrar(config, ledgerFactory, mockCrypto(), &disabled.Provider{}, cryptoProvider)
registrar.Initialize(mockConsenters)
ledger, err := ledgerFactory.GetOrCreate("my-channel")
assert.NoError(t, err)
ledger.Append(genesisBlockApp)
// Before creating the chain, it doesn't exist
assert.Nil(t, registrar.GetChain("my-channel"))
// After creating the chain, it exists
registrar.CreateChain("my-channel")
assert.NotNil(t, registrar.GetChain("my-channel"))
info, err := registrar.JoinChannel("my-channel", &cb.Block{}, false)
assert.EqualError(t, err, "channel already exists")
assert.Equal(t, types.ChannelInfo{}, info)
})
t.Run("no etcdraft consenter without system channel", func(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "registrar_test-")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
ledgerFactory, _ := newLedgerAndFactory(tmpdir, "", nil)
mockConsenters := map[string]consensus.Consenter{"not-raft": &mockConsenter{}}
config := localconfig.TopLevel{}
config.General.BootstrapMethod = "none"
config.General.GenesisFile = ""
registrar := NewRegistrar(config, ledgerFactory, mockCrypto(), &disabled.Provider{}, cryptoProvider)
assert.Panics(t, func() { registrar.Initialize(mockConsenters) })
})
} | explode_data.jsonl/37988 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1074
} | [
2830,
3393,
70252,
10598,
1961,
9629,
1155,
353,
8840,
836,
8,
341,
197,
322,
1849,
5496,
198,
67850,
32792,
1669,
59366,
1676,
13969,
36884,
13774,
1676,
76266,
641,
25132,
89299,
8526,
11,
2193,
1944,
2234,
14592,
2648,
6184,
2398,
8228... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTasks_RemoveTask(t *testing.T) {
err := tm.RemoveTask(3)
if err != nil {
t.Error("Failed to remove task")
}
if tm.TotalTask() != 2 {
t.Error("Task did not remove properly!")
}
} | explode_data.jsonl/66651 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 81
} | [
2830,
3393,
25449,
66843,
6262,
1155,
353,
8840,
836,
8,
341,
9859,
1669,
17333,
13270,
6262,
7,
18,
340,
743,
1848,
961,
2092,
341,
197,
3244,
6141,
445,
9408,
311,
4057,
3383,
1138,
197,
532,
743,
17333,
35997,
6262,
368,
961,
220,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 3 |
func TestManagementConfigurationByNameDoesNotExist(t *testing.T) {
conf, cleanupConfig := testutil.InitConfig(t)
defer cleanupConfig(t)
_, err := conf.GetManagementConfiguration(fmt.Sprintf("%s-test", config.AirshipDefaultContext))
assert.Error(t, err)
} | explode_data.jsonl/57914 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 84
} | [
2830,
3393,
22237,
7688,
16898,
21468,
45535,
1155,
353,
8840,
836,
8,
341,
67850,
11,
21290,
2648,
1669,
1273,
1314,
26849,
2648,
1155,
340,
16867,
21290,
2648,
1155,
692,
197,
6878,
1848,
1669,
2335,
2234,
22237,
7688,
28197,
17305,
443... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestRequestErrorWhenAdapterReturn(t *testing.T) {
a := assert.New(t)
ctx := context.Background()
actionMock := &actionServiceClientMock{}
actionMock.On("ReturnToLaunch", mock.Anything, mock.Anything).Return(nil, ErrRequest)
ret := AdapterReturnInternal(ctx, actionMock)
a.Equal("rtl command error: request error", ret.Error())
} | explode_data.jsonl/76179 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 109
} | [
2830,
3393,
1900,
1454,
4498,
5940,
5598,
1155,
353,
8840,
836,
8,
341,
11323,
1669,
2060,
7121,
1155,
692,
20985,
1669,
2266,
19047,
2822,
38933,
11571,
1669,
609,
1311,
1860,
2959,
11571,
16094,
38933,
11571,
8071,
445,
5598,
1249,
3206... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestChannelIsSubscribed(t *testing.T) {
t.Parallel()
cs := &cryptoService{}
cs.On("VerifyBlock", mock.Anything).Return(nil)
adapter := new(gossipAdapterMock)
configureAdapter(adapter)
gc := NewGossipChannel(pkiIDInOrg1, orgInChannelA, cs, channelA, adapter, &joinChanMsg{}, disabledMetrics)
adapter.On("Gossip", mock.Anything)
adapter.On("Forward", mock.Anything)
adapter.On("Send", mock.Anything, mock.Anything)
adapter.On("DeMultiplex", mock.Anything)
gc.HandleMessage(&receivedMsg{msg: createStateInfoMsg(10, pkiIDInOrg1, channelA), PKIID: pkiIDInOrg1})
assert.True(t, gc.EligibleForChannel(discovery.NetworkMember{PKIid: pkiIDInOrg1}))
} | explode_data.jsonl/66320 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 245
} | [
2830,
3393,
9629,
3872,
3136,
49785,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
71899,
1669,
609,
35772,
1860,
16094,
71899,
8071,
445,
32627,
4713,
497,
7860,
13311,
1596,
568,
5598,
27907,
340,
197,
19731,
1669,
501,
3268,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRepo_Update(t *testing.T) {
// setup test server
s := httptest.NewServer(server.FakeHandler())
// setup flags
authSet := flag.NewFlagSet("test", 0)
authSet.String("api.addr", s.URL, "doc")
authSet.String("api.token.access", test.TestTokenGood, "doc")
authSet.String("api.token.refresh", "superSecretRefreshToken", "doc")
fullSet := flag.NewFlagSet("test", 0)
fullSet.String("api.addr", s.URL, "doc")
fullSet.String("api.token.access", test.TestTokenGood, "doc")
fullSet.String("api.token.refresh", "superSecretRefreshToken", "doc")
fullSet.String("org", "github", "doc")
fullSet.String("repo", "octocat", "doc")
fullSet.String("output", "json", "doc")
// setup tests
tests := []struct {
failure bool
set *flag.FlagSet
}{
{
failure: false,
set: fullSet,
},
{
failure: true,
set: authSet,
},
{
failure: true,
set: flag.NewFlagSet("test", 0),
},
}
// run tests
for _, test := range tests {
err := update(cli.NewContext(&cli.App{Name: "vela", Version: "v0.0.0"}, test.set, nil))
if test.failure {
if err == nil {
t.Errorf("update should have returned err")
}
continue
}
if err != nil {
t.Errorf("update returned err: %v", err)
}
}
} | explode_data.jsonl/29909 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 517
} | [
2830,
3393,
25243,
47393,
1155,
353,
8840,
836,
8,
341,
197,
322,
6505,
1273,
3538,
198,
1903,
1669,
54320,
70334,
7121,
5475,
21421,
991,
726,
3050,
12367,
197,
322,
6505,
8042,
198,
78011,
1649,
1669,
5181,
7121,
12135,
1649,
445,
194... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestChanAvailableBandwidth(t *testing.T) {
t.Parallel()
// Create a test channel which will be used for the duration of this
// unittest. The channel will be funded evenly with Alice having 5 BTC,
// and Bob having 5 BTC.
aliceChannel, bobChannel, cleanUp, err := CreateTestChannels()
if err != nil {
t.Fatalf("unable to create test channels: %v", err)
}
defer cleanUp()
assertBandwidthEstimateCorrect := func(aliceInitiate bool) {
// With the HTLC's added, we'll now query the AvailableBalance
// method for the current available channel bandwidth from
// Alice's PoV.
aliceAvailableBalance := aliceChannel.AvailableBalance()
// With this balance obtained, we'll now trigger a state update
// to actually determine what the current up to date balance
// is.
if aliceInitiate {
err := forceStateTransition(aliceChannel, bobChannel)
if err != nil {
t.Fatalf("unable to complete alice's state "+
"transition: %v", err)
}
} else {
err := forceStateTransition(bobChannel, aliceChannel)
if err != nil {
t.Fatalf("unable to complete alice's state "+
"transition: %v", err)
}
}
// Now, we'll obtain the current available bandwidth in Alice's
// latest commitment and compare that to the prior estimate.
aliceBalance := aliceChannel.channelState.LocalCommitment.LocalBalance
if aliceBalance != aliceAvailableBalance {
_, _, line, _ := runtime.Caller(1)
t.Fatalf("line: %v, incorrect balance: expected %v, "+
"got %v", line, aliceBalance,
aliceAvailableBalance)
}
}
// First, we'll add 3 outgoing HTLC's from Alice to Bob.
const numHtlcs = 3
var htlcAmt lnwire.MilliSatoshi = 100000
alicePreimages := make([][32]byte, numHtlcs)
for i := 0; i < numHtlcs; i++ {
htlc, preImage := createHTLC(i, htlcAmt)
if _, err := aliceChannel.AddHTLC(htlc, nil); err != nil {
t.Fatalf("unable to add htlc: %v", err)
}
if _, err := bobChannel.ReceiveHTLC(htlc); err != nil {
t.Fatalf("unable to recv htlc: %v", err)
}
alicePreimages[i] = preImage
}
assertBandwidthEstimateCorrect(true)
// We'll repeat the same exercise, but with non-dust HTLCs. So we'll
// crank up the value of the HTLC's we're adding to the commitment
// transaction.
htlcAmt = lnwire.NewMSatFromSatoshis(30000)
for i := 0; i < numHtlcs; i++ {
htlc, preImage := createHTLC(numHtlcs+i, htlcAmt)
if _, err := aliceChannel.AddHTLC(htlc, nil); err != nil {
t.Fatalf("unable to add htlc: %v", err)
}
if _, err := bobChannel.ReceiveHTLC(htlc); err != nil {
t.Fatalf("unable to recv htlc: %v", err)
}
alicePreimages = append(alicePreimages, preImage)
}
assertBandwidthEstimateCorrect(true)
// Next, we'll have Bob 5 of Alice's HTLC's, and cancel one of them (in
// the update log).
for i := 0; i < (numHtlcs*2)-1; i++ {
preImage := alicePreimages[i]
err := bobChannel.SettleHTLC(preImage, uint64(i), nil, nil, nil)
if err != nil {
t.Fatalf("unable to settle htlc: %v", err)
}
err = aliceChannel.ReceiveHTLCSettle(preImage, uint64(i))
if err != nil {
t.Fatalf("unable to settle htlc: %v", err)
}
}
htlcIndex := uint64((numHtlcs * 2) - 1)
err = bobChannel.FailHTLC(htlcIndex, []byte("f"), nil, nil, nil)
if err != nil {
t.Fatalf("unable to cancel HTLC: %v", err)
}
err = aliceChannel.ReceiveFailHTLC(htlcIndex, []byte("bad"))
if err != nil {
t.Fatalf("unable to recv htlc cancel: %v", err)
}
// We must do a state transition before the balance is available
// for Alice.
if err := forceStateTransition(aliceChannel, bobChannel); err != nil {
t.Fatalf("unable to complete alice's state "+
"transition: %v", err)
}
// With the HTLC's settled in the log, we'll now assert that if we
// initiate a state transition, then our guess was correct.
assertBandwidthEstimateCorrect(false)
// TODO(roasbeef): additional tests from diff starting conditions
} | explode_data.jsonl/46442 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1451
} | [
2830,
3393,
46019,
16485,
33744,
3098,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
197,
322,
4230,
264,
1273,
5496,
892,
686,
387,
1483,
369,
279,
8090,
315,
419,
198,
197,
322,
19905,
13,
576,
5496,
686,
387,
23906,
41047,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestRegistryPodsDeleteProcessor_Process(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cons := bus.NewTestingConsumer(ctx)
endpoint := api.NewRegistryPodsDelete(logx.GetLog("test"), cons)
router := api_server.NewRouter(logx.GetLog("test"), endpoint)
srv := httptest.NewServer(router)
defer srv.Close()
t.Run(`empty query`, func(t *testing.T) {
req, err := http.NewRequest(http.MethodDelete, fmt.Sprintf("%s/v1/registry", srv.URL), strings.NewReader("[]"))
assert.NoError(t, err)
resp, err := http.DefaultClient.Do(req)
assert.NoError(t, err)
require.NotNil(t, req)
assert.Equal(t, resp.StatusCode, 400)
})
t.Run(`with two pods`, func(t *testing.T) {
v := []string{"1", "2"}
buf := &bytes.Buffer{}
require.NoError(t, json.NewEncoder(buf).Encode(v))
req, err := http.NewRequest(http.MethodDelete, fmt.Sprintf("%s/v1/registry", srv.URL), bytes.NewReader(buf.Bytes()))
require.NoError(t, err)
_, err = http.DefaultClient.Do(req)
assert.NoError(t, err)
fixture.WaitNoErrorT10(t, cons.ExpectMessagesFn(
bus.NewMessage("1", nil),
bus.NewMessage("2", nil),
))
})
} | explode_data.jsonl/67733 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 470
} | [
2830,
3393,
15603,
23527,
82,
6435,
22946,
70241,
1155,
353,
8840,
836,
8,
341,
20985,
11,
9121,
1669,
2266,
26124,
9269,
5378,
19047,
2398,
16867,
9121,
2822,
197,
6254,
1669,
5828,
7121,
16451,
29968,
7502,
340,
6246,
2768,
1669,
6330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDLOInsert(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithDLO(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
CheckHash: true,
ContentType: "image/jpeg",
}
out, err := c.DynamicLargeObjectCreateFile(ctx, &opts)
if err != nil {
t.Fatal(err)
}
buf := &bytes.Buffer{}
multi := io.MultiWriter(buf, out)
_, err = fmt.Fprintf(multi, "%d%s\n", 0, CONTENTS)
if err != nil {
t.Fatal(err)
}
_, _ = fmt.Fprintf(buf, "\n%d %s\n", 1, CONTENTS)
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
expected := buf.String()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
} | explode_data.jsonl/12721 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 352
} | [
2830,
3393,
35,
1593,
13780,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
2266,
19047,
741,
1444,
11,
60414,
1669,
1281,
4526,
2354,
35,
1593,
1155,
340,
16867,
60414,
741,
64734,
1669,
29362,
92762,
1190,
43451,
515,
197,
197,
4502,
25,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestNoGOPATH_Issue37984(t *testing.T) {
const files = `
-- main.go --
package main
func _() {
fmt.Println("Hello World")
}
`
editorConfig := EditorConfig{Env: map[string]string{"GOPATH": ""}}
withOptions(editorConfig).run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
env.Await(env.DiagnosticAtRegexp("main.go", "fmt"))
env.SaveBuffer("main.go")
env.Await(EmptyDiagnostics("main.go"))
})
} | explode_data.jsonl/38914 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 176
} | [
2830,
3393,
2753,
98733,
4827,
7959,
83890,
18,
22,
24,
23,
19,
1155,
353,
8840,
836,
8,
341,
4777,
3542,
284,
22074,
313,
1887,
18002,
39514,
1722,
1887,
271,
2830,
716,
368,
341,
11009,
12419,
445,
9707,
4337,
1138,
532,
3989,
78077... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReaderConsumerGroup(t *testing.T) {
t.Parallel()
tests := []struct {
scenario string
partitions int
commitInterval time.Duration
function func(*testing.T, context.Context, *Reader)
}{
{
scenario: "basic handshake",
partitions: 1,
function: testReaderConsumerGroupHandshake,
},
{
scenario: "verify offset committed",
partitions: 1,
function: testReaderConsumerGroupVerifyOffsetCommitted,
},
{
scenario: "verify offset committed when using interval committer",
partitions: 1,
commitInterval: 400 * time.Millisecond,
function: testReaderConsumerGroupVerifyPeriodicOffsetCommitter,
},
{
scenario: "rebalance across many partitions and consumers",
partitions: 8,
function: testReaderConsumerGroupRebalanceAcrossManyPartitionsAndConsumers,
},
{
scenario: "consumer group commits on close",
partitions: 3,
function: testReaderConsumerGroupVerifyCommitsOnClose,
},
{
scenario: "consumer group rebalance",
partitions: 3,
function: testReaderConsumerGroupRebalance,
},
{
scenario: "consumer group rebalance across topics",
partitions: 3,
function: testReaderConsumerGroupRebalanceAcrossTopics,
},
{
scenario: "consumer group reads content across partitions",
partitions: 3,
function: testReaderConsumerGroupReadContentAcrossPartitions,
},
{
scenario: "Close immediately after NewReader",
partitions: 1,
function: testConsumerGroupImmediateClose,
},
{
scenario: "Close immediately after NewReader",
partitions: 1,
function: testConsumerGroupSimple,
},
}
for _, test := range tests {
t.Run(test.scenario, func(t *testing.T) {
t.Parallel()
topic := makeTopic()
createTopic(t, topic, test.partitions)
groupID := makeGroupID()
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: topic,
GroupID: groupID,
HeartbeatInterval: 2 * time.Second,
CommitInterval: test.commitInterval,
RebalanceTimeout: 2 * time.Second,
RetentionTime: time.Hour,
MinBytes: 1,
MaxBytes: 1e6,
})
defer r.Close()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
test.function(t, ctx, r)
})
}
} | explode_data.jsonl/80374 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 961
} | [
2830,
3393,
5062,
29968,
2808,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
78216,
1669,
3056,
1235,
341,
197,
29928,
20413,
981,
914,
198,
197,
72872,
5930,
257,
526,
198,
197,
197,
17413,
10256,
882,
33795,
198,
197,
7527,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestImgTest_Init_LoadKeysFromDisk_WritesProperResultState(t *testing.T) {
unittest.MediumTest(t)
workDir := t.TempDir()
setupAuthWithGSUtil(t, workDir)
keysFile := filepath.Join(workDir, "keys.json")
require.NoError(t, ioutil.WriteFile(keysFile, []byte(`{"os": "Android"}`), 0644))
mh := mockRPCResponses("https://my-instance-gold.skia.org").Positive("pixel-tests", blankDigest).
Negative("other-test", blankDigest).
Known("11111111111111111111111111111111").Build()
// Call imgtest init with the following flags. We expect it to load the baseline expectations
// and the known hashes (both empty).
ctx, output, exit := testContext(nil, mh, nil, nil)
env := imgTest{
gitHash: "1234567890123456789012345678901234567890",
corpus: "my_corpus",
instanceID: "my-instance",
keysFile: keysFile,
passFailStep: true,
workDir: workDir,
}
runUntilExit(t, func() {
env.Init(ctx)
})
exit.AssertWasCalledWithCode(t, 0, output.String())
b, err := ioutil.ReadFile(filepath.Join(workDir, "result-state.json"))
require.NoError(t, err)
resultState := string(b)
assert.Contains(t, resultState, `"key":{"os":"Android","source_type":"my_corpus"}`)
assert.Contains(t, resultState, `"KnownHashes":{"00000000000000000000000000000000":true,"11111111111111111111111111111111":true}`)
assert.Contains(t, resultState, `"Expectations":{"other-test":{"00000000000000000000000000000000":"negative"},"pixel-tests":{"00000000000000000000000000000000":"positive"}}`)
assert.Contains(t, resultState, `"gitHash":"1234567890123456789012345678901234567890"`)
} | explode_data.jsonl/69521 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 581
} | [
2830,
3393,
13033,
2271,
15644,
19553,
8850,
3830,
47583,
2763,
23262,
1336,
712,
2077,
1397,
1155,
353,
8840,
836,
8,
341,
20479,
14267,
1321,
23090,
2271,
1155,
692,
97038,
6184,
1669,
259,
65009,
6184,
741,
84571,
5087,
2354,
16522,
27... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStackVal_MarshalJSON_GotString(t *testing.T) {
var sampleInp struct {
Val BoolString `json:"val"`
}
sampleInp.Val.Value = "A"
var sampleOut = []byte(`{"val":"A"}`)
data, _ := json.Marshal(sampleInp)
if bytes.Compare(data, sampleOut) != 0 {
t.Errorf("should be %s but got %s", sampleOut, data)
}
} | explode_data.jsonl/31039 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 130
} | [
2830,
3393,
4336,
2208,
1245,
28423,
5370,
2646,
354,
703,
1155,
353,
8840,
836,
8,
341,
2405,
6077,
641,
79,
2036,
341,
197,
197,
2208,
12608,
703,
1565,
2236,
2974,
831,
8805,
197,
532,
1903,
1516,
641,
79,
77819,
6167,
284,
330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestWriteParseText(t *testing.T) {
inner := bytes.NewBufferString("")
w := NewAnsiColorWriter(inner)
inputTail := "\x1b[0mtail text"
expectedTail := "tail text"
fmt.Fprintf(w, inputTail)
actualTail := inner.String()
inner.Reset()
if actualTail != expectedTail {
t.Errorf("Get %q, want %q", actualTail, expectedTail)
}
inputHead := "head text\x1b[0m"
expectedHead := "head text"
fmt.Fprintf(w, inputHead)
actualHead := inner.String()
inner.Reset()
if actualHead != expectedHead {
t.Errorf("Get %q, want %q", actualHead, expectedHead)
}
inputBothEnds := "both ends \x1b[0m text"
expectedBothEnds := "both ends text"
fmt.Fprintf(w, inputBothEnds)
actualBothEnds := inner.String()
inner.Reset()
if actualBothEnds != expectedBothEnds {
t.Errorf("Get %q, want %q", actualBothEnds, expectedBothEnds)
}
inputManyEsc := "\x1b\x1b\x1b\x1b[0m many esc"
expectedManyEsc := "\x1b\x1b\x1b many esc"
fmt.Fprintf(w, inputManyEsc)
actualManyEsc := inner.String()
inner.Reset()
if actualManyEsc != expectedManyEsc {
t.Errorf("Get %q, want %q", actualManyEsc, expectedManyEsc)
}
expectedSplit := "split text"
for _, ch := range "split \x1b[0m text" {
fmt.Fprintf(w, string(ch))
}
actualSplit := inner.String()
inner.Reset()
if actualSplit != expectedSplit {
t.Errorf("Get %q, want %q", actualSplit, expectedSplit)
}
} | explode_data.jsonl/3541 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 599
} | [
2830,
3393,
7985,
14463,
1178,
1155,
353,
8840,
836,
8,
972,
197,
4382,
1669,
5820,
7121,
4095,
703,
445,
6060,
6692,
1669,
1532,
2082,
6321,
1636,
6492,
68603,
7229,
22427,
44795,
1669,
2917,
87,
16,
65,
58,
15,
2501,
604,
1467,
5031... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func Test_Mock_On_WithMixedVariadicFunc(t *testing.T) {
// make a test impl object
var mockedService = new(TestExampleImplementation)
c := mockedService.
On("TheExampleMethodMixedVariadic", 1, []int{2, 3, 4}).
Return(nil)
assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
assert.Equal(t, 2, len(c.Arguments))
assert.Equal(t, 1, c.Arguments[0])
assert.Equal(t, []int{2, 3, 4}, c.Arguments[1])
assert.NotPanics(t, func() {
mockedService.TheExampleMethodMixedVariadic(1, 2, 3, 4)
})
assert.Panics(t, func() {
mockedService.TheExampleMethodMixedVariadic(1, 2, 3, 5)
})
} | explode_data.jsonl/8574 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 272
} | [
2830,
3393,
1245,
1176,
35482,
62,
2354,
86433,
56135,
36214,
9626,
1155,
353,
8840,
836,
8,
8022,
197,
322,
1281,
264,
1273,
11605,
1633,
319,
2405,
46149,
1860,
284,
501,
31159,
13314,
36850,
7229,
1444,
1669,
46149,
1860,
3224,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_podTracker_getTrackedPod(t *testing.T) {
// setup types
logger := logrus.NewEntry(logrus.StandardLogger())
tests := []struct {
name string
trackedPod string // namespace/podName
obj interface{}
want *v1.Pod
}{
{
name: "got-tracked-pod",
trackedPod: "test/github-octocat-1",
obj: _pod,
want: _pod,
},
{
name: "wrong-pod",
trackedPod: "test/github-octocat-2",
obj: _pod,
want: nil,
},
{
name: "invalid-type",
trackedPod: "test/github-octocat-1",
obj: new(v1.PodTemplate),
want: nil,
},
{
name: "nil",
trackedPod: "test/nil",
obj: nil,
want: nil,
},
{
name: "tombstone-pod",
trackedPod: "test/github-octocat-1",
obj: cache.DeletedFinalStateUnknown{
Key: "test/github-octocat-1",
Obj: _pod,
},
want: _pod,
},
{
name: "tombstone-nil",
trackedPod: "test/github-octocat-1",
obj: cache.DeletedFinalStateUnknown{
Key: "test/github-octocat-1",
Obj: nil,
},
want: nil,
},
{
name: "tombstone-invalid-type",
trackedPod: "test/github-octocat-1",
obj: cache.DeletedFinalStateUnknown{
Key: "test/github-octocat-1",
Obj: new(v1.PodTemplate),
},
want: nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
p := podTracker{
Logger: logger,
TrackedPod: test.trackedPod,
// other fields not used by getTrackedPod
// if they're needed, use newPodTracker
}
if got := p.getTrackedPod(test.obj); !reflect.DeepEqual(got, test.want) {
t.Errorf("getTrackedPod() = %v, want %v", got, test.want)
}
})
}
} | explode_data.jsonl/62938 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 892
} | [
2830,
3393,
85337,
31133,
3062,
1282,
11191,
23527,
1155,
353,
8840,
836,
8,
341,
197,
322,
6505,
4494,
198,
17060,
1669,
1487,
20341,
7121,
5874,
12531,
20341,
53615,
7395,
12367,
78216,
1669,
3056,
1235,
341,
197,
11609,
981,
914,
198,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_padData(t *testing.T) {
type args struct {
input []byte
}
tests := []struct {
name string
args args
want int
want1 []byte
}{
// because of the way our CRC works, input of 0 will have a CRC of 0
{"a", args{[]byte{0x00, 0x00}}, 2, []byte{0x00, 0x00, 0x00}},
// but change one bit and the crc changes
{"b", args{[]byte{0x80, 0x00}}, 2, []byte{0x80, 0x00, 36}},
// here's a more normal short one
{"c", args{[]byte{0x82, 0x41}}, 2, []byte{0x82, 0x41, 124}},
// and this is the basic 16-byte test; it should generate a 12-word result
{"d", args{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}, 12,
[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 32}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, got1 := padData(tt.args.input)
if got != tt.want {
t.Errorf("padData() got = %v, want %v", got, tt.want)
}
if !reflect.DeepEqual(got1, tt.want1) {
t.Errorf("padData() got1 = %v, want %v", got1, tt.want1)
}
})
}
} | explode_data.jsonl/32822 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 494
} | [
2830,
3393,
30290,
1043,
1155,
353,
8840,
836,
8,
341,
13158,
2827,
2036,
341,
197,
22427,
3056,
3782,
198,
197,
532,
78216,
1669,
3056,
1235,
341,
197,
11609,
220,
914,
198,
197,
31215,
220,
2827,
198,
197,
50780,
220,
526,
198,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func Test_PrivateLinkScopedResource_Status_WhenSerializedToJson_DeserializesAsEqual(t *testing.T) {
t.Parallel()
parameters := gopter.DefaultTestParameters()
parameters.MaxSize = 10
properties := gopter.NewProperties(parameters)
properties.Property(
"Round trip of PrivateLinkScopedResource_Status via JSON returns original",
prop.ForAll(RunJSONSerializationTestForPrivateLinkScopedResourceStatus, PrivateLinkScopedResourceStatusGenerator()))
properties.TestingRun(t, gopter.NewFormatedReporter(true, 240, os.Stdout))
} | explode_data.jsonl/43365 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 160
} | [
2830,
3393,
43830,
3939,
39437,
4783,
36449,
62,
4498,
77521,
78967,
98054,
2848,
4756,
2121,
2993,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
67543,
1669,
728,
73137,
13275,
2271,
9706,
741,
67543,
14535,
1695,
284,
220,
16,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTaskSortByTaskID(t *testing.T) {
testTasklist.LoadFromPath(testInputSort)
taskID := 21
testTasklist = testTasklist[taskID : taskID+5]
if err := testTasklist.Sort(SortPriorityAsc); err != nil {
t.Fatal(err)
}
if err := testTasklist.Sort(SortTaskIDAsc); err != nil {
t.Fatal(err)
}
testExpectedList = []string{
"(B) Task 1",
"(A) Task 2",
"Task 3 due:2020-11-11",
"(C) Task 4 due:2020-12-12",
"x Task 5",
}
checkTaskListOrder(t, testTasklist, testExpectedList)
if err := testTasklist.Sort(SortTaskIDDesc); err != nil {
t.Fatal(err)
}
testExpectedList = []string{
"x Task 5",
"(C) Task 4 due:2020-12-12",
"Task 3 due:2020-11-11",
"(A) Task 2",
"(B) Task 1",
}
checkTaskListOrder(t, testTasklist, testExpectedList)
} | explode_data.jsonl/24865 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 329
} | [
2830,
3393,
6262,
10231,
1359,
6262,
915,
1155,
353,
8840,
836,
8,
341,
18185,
6262,
1607,
13969,
3830,
1820,
8623,
2505,
10231,
340,
49115,
915,
1669,
220,
17,
16,
271,
18185,
6262,
1607,
284,
1273,
6262,
1607,
58,
8202,
915,
549,
33... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestStoreWatchExpireRefresh(t *testing.T) {
s := newStore()
fc := newFakeClock()
s.clock = fc
var eidx uint64 = 2
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true})
s.Create("/foofoo", false, "barbarbar", false, TTLOptionSet{ExpireTime: fc.Now().Add(1200 * time.Millisecond), Refresh: true})
// Make sure we set watch updates when Refresh is true for newly created keys
w, _ := s.Watch("/", true, false, 0)
testutil.AssertEqual(t, w.StartIndex(), eidx)
c := w.EventChan()
e := nbselect(c)
testutil.AssertNil(t, e)
fc.Advance(600 * time.Millisecond)
s.DeleteExpiredKeys(fc.Now())
eidx = 3
e = nbselect(c)
testutil.AssertEqual(t, e.EtcdIndex, eidx)
testutil.AssertEqual(t, e.Action, "expire")
testutil.AssertEqual(t, e.Node.Key, "/foo")
s.Update("/foofoo", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true})
w, _ = s.Watch("/", true, false, 4)
fc.Advance(700 * time.Millisecond)
s.DeleteExpiredKeys(fc.Now())
eidx = 5 // We should skip 4 because a TTL update should occur with no watch notification if set `TTLOptionSet.Refresh` to true
testutil.AssertEqual(t, w.StartIndex(), eidx-1)
e = nbselect(w.EventChan())
testutil.AssertEqual(t, e.EtcdIndex, eidx)
testutil.AssertEqual(t, e.Action, "expire")
testutil.AssertEqual(t, e.Node.Key, "/foofoo")
} | explode_data.jsonl/44123 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 562
} | [
2830,
3393,
6093,
14247,
8033,
554,
14567,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
501,
6093,
741,
1166,
66,
1669,
501,
52317,
26104,
741,
1903,
50546,
284,
25563,
271,
2405,
384,
6361,
2622,
21,
19,
284,
220,
17,
198,
1903,
7251,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNullIDResilience(t *testing.T) {
sr := &Resolver{store: ee.NewStore(dbconn.Global)}
s, err := graphqlbackend.NewSchema(sr, nil, nil)
if err != nil {
t.Fatal(err)
}
ctx := backend.WithAuthzBypass(context.Background())
ids := []graphql.ID{
marshalPatchSetID(0),
marshalPatchID(0),
campaigns.MarshalCampaignID(0),
marshalExternalChangesetID(0),
}
for _, id := range ids {
var response struct{ Node struct{ ID string } }
query := fmt.Sprintf(`query { node(id: %q) { id } }`, id)
apitest.MustExec(ctx, t, s, nil, &response, query)
if have, want := response.Node.ID, ""; have != want {
t.Fatalf("node has wrong ID. have=%q, want=%q", have, want)
}
}
mutations := []string{
fmt.Sprintf(`mutation { retryCampaignChangesets(campaign: %q) { id } }`, campaigns.MarshalCampaignID(0)),
fmt.Sprintf(`mutation { closeCampaign(campaign: %q) { id } }`, campaigns.MarshalCampaignID(0)),
fmt.Sprintf(`mutation { deleteCampaign(campaign: %q) { alwaysNil } }`, campaigns.MarshalCampaignID(0)),
fmt.Sprintf(`mutation { publishChangeset(patch: %q) { alwaysNil } }`, marshalPatchID(0)),
fmt.Sprintf(`mutation { syncChangeset(changeset: %q) { alwaysNil } }`, marshalExternalChangesetID(0)),
}
for _, m := range mutations {
var response struct{}
errs := apitest.Exec(ctx, t, s, nil, &response, m)
if len(errs) == 0 {
t.Fatalf("expected errors but none returned (mutation: %q)", m)
}
if have, want := errs[0].Error(), fmt.Sprintf("graphql: %s", ErrIDIsZero.Error()); have != want {
t.Fatalf("wrong errors. have=%s, want=%s (mutation: %q)", have, want, m)
}
}
} | explode_data.jsonl/27597 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 643
} | [
2830,
3393,
3280,
915,
1061,
321,
1835,
1155,
353,
8840,
836,
8,
341,
1903,
81,
1669,
609,
18190,
90,
4314,
25,
36343,
7121,
6093,
9791,
5148,
27381,
73822,
1903,
11,
1848,
1669,
48865,
20942,
7121,
8632,
67650,
11,
2092,
11,
2092,
34... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestMaxPerSkip(t *testing.T) {
test(t, []testSequence{
{
in: layers.TCP{
SrcPort: 1,
DstPort: 2,
Seq: 1000,
SYN: true,
BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3}},
},
want: []Reassembly{
Reassembly{
Start: true,
Bytes: []byte{1, 2, 3},
},
},
},
{
in: layers.TCP{
SrcPort: 1,
DstPort: 2,
Seq: 1007,
BaseLayer: layers.BaseLayer{Payload: []byte{3, 2, 3}},
},
want: []Reassembly{},
},
{
in: layers.TCP{
SrcPort: 1,
DstPort: 2,
Seq: 1010,
BaseLayer: layers.BaseLayer{Payload: []byte{4, 2, 3}},
},
want: []Reassembly{},
},
{
in: layers.TCP{
SrcPort: 1,
DstPort: 2,
Seq: 1013,
BaseLayer: layers.BaseLayer{Payload: []byte{5, 2, 3}},
},
want: []Reassembly{},
},
{
in: layers.TCP{
SrcPort: 1,
DstPort: 2,
Seq: 1016,
BaseLayer: layers.BaseLayer{Payload: []byte{6, 2, 3}},
},
want: []Reassembly{
Reassembly{
Skip: 3,
Bytes: []byte{3, 2, 3},
},
Reassembly{
Bytes: []byte{4, 2, 3},
},
Reassembly{
Bytes: []byte{5, 2, 3},
},
Reassembly{
Bytes: []byte{6, 2, 3},
},
},
},
})
} | explode_data.jsonl/9651 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 816
} | [
2830,
3393,
5974,
3889,
35134,
1155,
353,
8840,
836,
8,
972,
18185,
1155,
11,
3056,
1944,
14076,
1666,
197,
197,
1666,
298,
17430,
25,
13617,
836,
7123,
1666,
571,
7568,
1287,
7084,
25,
256,
220,
16,
1871,
571,
10957,
267,
7084,
25,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_TableBorder_DeleteBorder(t *testing.T) {
config := ReadConfiguration(t)
client, ctx := PrepareTest(t, config)
remoteDataFolder := remoteBaseTestDataFolder + "/DocumentElements/Tables"
localFile := "DocumentElements/Tables/TablesGet.docx"
remoteFileName := "TestDeleteBorder.docx"
UploadNextFileToStorage(t, ctx, client, GetLocalFile(localFile), remoteDataFolder + "/" + remoteFileName)
options := map[string]interface{}{
"nodePath": "tables/1/rows/0/cells/0",
"folder": remoteDataFolder,
}
request := &models.DeleteBorderRequest{
Name: ToStringPointer(remoteFileName),
BorderType: ToStringPointer("left"),
Optionals: options,
}
_, _, err := client.WordsApi.DeleteBorder(ctx, request)
if err != nil {
t.Error(err)
}
} | explode_data.jsonl/16258 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 320
} | [
2830,
3393,
42544,
10691,
57418,
10691,
1155,
353,
8840,
836,
8,
341,
262,
2193,
1669,
4457,
7688,
1155,
340,
262,
2943,
11,
5635,
1669,
31166,
2271,
1155,
11,
2193,
340,
262,
8699,
1043,
13682,
1669,
8699,
3978,
83920,
13682,
488,
3521... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestTabletServerExecNonExistentConnection(t *testing.T) {
db, tsv := setupTabletServerTest(t, "")
defer tsv.StopService()
defer db.Close()
db.AddQueryPattern(".*", &sqltypes.Result{})
target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}
options := &querypb.ExecuteOptions{}
// run a query with a non-existent reserved id
_, err := tsv.Execute(ctx, &target, "select 42", nil, 0, 123456, options)
require.Error(t, err)
} | explode_data.jsonl/79996 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 166
} | [
2830,
3393,
2556,
83,
5475,
10216,
8121,
840,
18128,
4526,
1155,
353,
8840,
836,
8,
341,
20939,
11,
259,
3492,
1669,
6505,
2556,
83,
5475,
2271,
1155,
11,
14676,
16867,
259,
3492,
30213,
1860,
741,
16867,
2927,
10421,
2822,
20939,
1904,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMysender(t *testing.T) {
c := conf.MapConf{
"name": "ohmysender",
"prefix": "test",
}
s, err := NewMySender(c)
datas := []sender.Data{
{
"abc": 1,
"cde": "testmessage",
},
{
"abc": 2,
"cde": "testmessage2",
},
}
assert.Nil(t, err)
assert.Equal(t, s.Name(), "ohmysender")
assert.Nil(t, s.Send(datas))
assert.Nil(t, s.Close())
} | explode_data.jsonl/67111 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 193
} | [
2830,
3393,
44,
1047,
1659,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
2335,
10104,
15578,
515,
197,
197,
31486,
788,
256,
330,
2267,
8209,
1659,
756,
197,
197,
1,
11849,
788,
330,
1944,
756,
197,
532,
1903,
11,
1848,
1669,
1532,
505... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIsEdgeNode(t *testing.T) {
nodeName := nodes[0]
locationCache := LocationCache{}
locationCache.EdgeNodes.Store(nodeName, commonconst.MessageSuccessfulContent)
tests := []struct {
name string
lc *LocationCache
nodeName string
want bool
}{
{
name: "TestIsEdgeNode() Case: Node is edgenode",
lc: &locationCache,
nodeName: nodeName,
want: true,
},
{
name: "TestIsEdgeNode() Case: Node is not edgenode",
lc: &locationCache,
nodeName: "notExistNode",
want: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if got := test.lc.IsEdgeNode(test.nodeName); !reflect.DeepEqual(got, test.want) {
t.Errorf("Manager.TestIsEdgeNode() case failed: got = %v, want = %v", got, test.want)
}
})
}
} | explode_data.jsonl/53306 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 361
} | [
2830,
3393,
3872,
11656,
1955,
1155,
353,
8840,
836,
8,
341,
20831,
675,
1669,
7798,
58,
15,
921,
53761,
8233,
1669,
9866,
8233,
16094,
53761,
8233,
13,
11656,
12288,
38047,
6958,
675,
11,
4185,
1024,
8472,
36374,
2762,
692,
78216,
1669... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestDaogetSubtime(t *testing.T) {
convey.Convey("getSubtime", t, func(ctx convey.C) {
subtime := getSubtime()
ctx.Convey("Then subtime should not be nil.", func(ctx convey.C) {
ctx.So(subtime, convey.ShouldNotBeNil)
})
})
} | explode_data.jsonl/61953 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 99
} | [
2830,
3393,
12197,
455,
3136,
1678,
1155,
353,
8840,
836,
8,
341,
37203,
5617,
4801,
5617,
445,
455,
3136,
1678,
497,
259,
11,
2915,
7502,
20001,
727,
8,
341,
197,
28624,
1678,
1669,
633,
3136,
1678,
741,
197,
20985,
4801,
5617,
445,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestChangefeedUpdatePrimaryKey(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
// This NOT NULL column checks a regression when used with UPDATE-ing a
// primary key column or with DELETE.
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING NOT NULL)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'bar')`)
foo := feed(t, f, `CREATE CHANGEFEED FOR foo`)
defer closeFeed(t, foo)
assertPayloads(t, foo, []string{
`foo: [0]->{"after": {"a": 0, "b": "bar"}}`,
})
sqlDB.Exec(t, `UPDATE foo SET a = 1`)
assertPayloads(t, foo, []string{
`foo: [0]->{"after": null}`,
`foo: [1]->{"after": {"a": 1, "b": "bar"}}`,
})
sqlDB.Exec(t, `DELETE FROM foo`)
assertPayloads(t, foo, []string{
`foo: [1]->{"after": null}`,
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
} | explode_data.jsonl/7048 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 416
} | [
2830,
3393,
1143,
524,
823,
12051,
4289,
25981,
1155,
353,
8840,
836,
8,
341,
16867,
23352,
1944,
36892,
2271,
1155,
8,
2822,
18185,
24911,
1669,
2915,
1155,
353,
8840,
836,
11,
2927,
353,
34073,
1470,
22537,
11,
282,
15307,
67880,
8787... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPurgeByHeight(t *testing.T) {
// Scenario: commit 3000 blocks and ensure that PurgeByHeight is called
// at commit of blocks 2000 and 3000 with values of max block to retain of 1000 and 2000
peerSelfSignedData := common.SignedData{}
cs := createcollectionStore(peerSelfSignedData).thatAcceptsAll()
var purgeHappened bool
assertPurgeHappened := func() {
assert.True(t, purgeHappened)
purgeHappened = false
}
committer := &committerMock{}
committer.On("CommitWithPvtData", mock.Anything).Return(nil)
store := &mockTransientStore{t: t}
store.On("PurgeByHeight", uint64(1000)).Return(nil).Once().Run(func(_ mock.Arguments) {
purgeHappened = true
})
store.On("PurgeByHeight", uint64(2000)).Return(nil).Once().Run(func(_ mock.Arguments) {
purgeHappened = true
})
store.On("PurgeByTxids", mock.Anything).Return(nil)
fetcher := &fetcherMock{t: t}
bf := &blockFactory{
channelID: "test",
}
coordinator := NewCoordinator(Support{
CollectionStore: cs,
Committer: committer,
Fetcher: fetcher,
TransientStore: store,
Validator: &validatorMock{},
}, peerSelfSignedData)
for i := 0; i <= 3000; i++ {
block := bf.create()
block.Header.Number = uint64(i)
err := coordinator.StoreBlock(block, nil)
assert.NoError(t, err)
if i != 2000 && i != 3000 {
assert.False(t, purgeHappened)
} else {
assertPurgeHappened()
}
}
} | explode_data.jsonl/40472 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 536
} | [
2830,
3393,
47,
39823,
1359,
3640,
1155,
353,
8840,
836,
8,
341,
197,
322,
58663,
25,
5266,
220,
18,
15,
15,
15,
10010,
323,
5978,
429,
14532,
709,
1359,
3640,
374,
2598,
198,
197,
322,
518,
5266,
315,
10010,
220,
17,
15,
15,
15,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestScanAVP(t *testing.T) {
if avp, err := Default.ScanAVP("Session-Id"); err != nil {
t.Error(err)
} else if avp.Code != 263 {
t.Fatalf("Unexpected code %d for Session-Id AVP", avp.Code)
}
} | explode_data.jsonl/9073 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 86
} | [
2830,
3393,
26570,
8093,
47,
1155,
353,
8840,
836,
8,
341,
743,
1822,
79,
11,
1848,
1669,
7899,
54874,
8093,
47,
445,
5283,
12,
764,
5038,
1848,
961,
2092,
341,
197,
3244,
6141,
3964,
340,
197,
92,
770,
421,
1822,
79,
20274,
961,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestUserEntityGetAllWhileSUCCESS(t *testing.T) {
resp := sendGet("http://localhost:8080/Search/Users&p=email&v=testing@company.com")
response := responseToString(resp)
compareResults(t, response, ResponseUserEntityAllEnabled)
} | explode_data.jsonl/59348 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 76
} | [
2830,
3393,
1474,
3030,
1949,
2403,
7983,
39308,
1155,
353,
8840,
836,
8,
341,
34653,
1669,
3624,
1949,
445,
1254,
1110,
8301,
25,
23,
15,
23,
15,
78893,
97821,
96774,
76371,
5,
85,
28,
8840,
31,
10139,
905,
1138,
21735,
1669,
2033,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestHaGroupDeleteParams_WithID(t *testing.T) {
p := NewHaGroupDeleteParams()
p = p.WithID("test-id")
require.NotNil(t, p.ID)
assert.Equal(t, "test-id", p.ID)
} | explode_data.jsonl/7755 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 76
} | [
2830,
3393,
32942,
2808,
6435,
4870,
62,
2354,
915,
1155,
353,
8840,
836,
8,
341,
3223,
1669,
1532,
32942,
2808,
6435,
4870,
741,
3223,
284,
281,
26124,
915,
445,
1944,
12897,
1138,
17957,
93882,
1155,
11,
281,
9910,
340,
6948,
12808,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestScale(t *testing.T) {
testClient(t, func(e *cli.Engine, i *mocksdk.Interface) {
i.On("SystemGet").Return(fxSystem(), nil)
i.On("ServiceList", "app1").Return(structs.Services{*fxService(), *fxService()}, nil)
i.On("ProcessList", "app1", structs.ProcessListOptions{}).Return(structs.Processes{*fxProcess(), *fxProcess()}, nil)
res, err := testExecute(e, "scale -a app1", nil)
require.NoError(t, err)
require.Equal(t, 0, res.Code)
res.RequireStderr(t, []string{""})
res.RequireStdout(t, []string{
"SERVICE DESIRED RUNNING CPU MEMORY",
"service1 1 0 2 3 ",
"service1 1 0 2 3 ",
})
})
} | explode_data.jsonl/65996 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 307
} | [
2830,
3393,
6947,
1155,
353,
8840,
836,
8,
341,
18185,
2959,
1155,
11,
2915,
2026,
353,
19521,
54424,
11,
600,
353,
16712,
51295,
41065,
8,
341,
197,
8230,
8071,
445,
2320,
1949,
1827,
5598,
955,
87,
2320,
1507,
2092,
340,
197,
8230,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestParseVolumeWithVolumeOptions(t *testing.T) {
volume, err := parseVolume("name:/target:nocopy")
expected := types.ServiceVolumeConfig{
Type: "volume",
Source: "name",
Target: "/target",
Volume: &types.ServiceVolumeVolume{NoCopy: true},
}
assert.NoError(t, err)
assert.Equal(t, expected, volume)
} | explode_data.jsonl/70102 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 115
} | [
2830,
3393,
14463,
18902,
2354,
18902,
3798,
1155,
353,
8840,
836,
8,
341,
5195,
4661,
11,
1848,
1669,
4715,
18902,
445,
606,
14375,
5657,
19266,
509,
1266,
1138,
42400,
1669,
4494,
13860,
18902,
2648,
515,
197,
27725,
25,
256,
330,
250... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestVerifyACOSettings(t *testing.T) {
settings := ACOSettings{}
err := settings.Verify()
if err == nil {
t.Error("verification should fail for invalid evaporation")
}
settings.Evaporation = 0.9
err = settings.Verify()
if err != nil {
t.Error("verification should pass for valid evaporation")
}
} | explode_data.jsonl/60132 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 109
} | [
2830,
3393,
32627,
1706,
46,
6086,
1155,
353,
8840,
836,
8,
341,
62930,
1669,
362,
8281,
6086,
16094,
9859,
1669,
5003,
54853,
741,
743,
1848,
621,
2092,
341,
197,
3244,
6141,
445,
50632,
1265,
3690,
369,
8318,
3637,
95549,
1138,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestBadStaticConfigsJSON(t *testing.T) {
content, err := ioutil.ReadFile("testdata/static_config.bad.json")
testutil.Ok(t, err)
var tg targetgroup.Group
err = json.Unmarshal(content, &tg)
testutil.NotOk(t, err, "")
} | explode_data.jsonl/74675 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 91
} | [
2830,
3393,
17082,
11690,
84905,
5370,
1155,
353,
8840,
836,
8,
341,
27751,
11,
1848,
1669,
43144,
78976,
445,
92425,
36643,
5332,
31563,
4323,
1138,
18185,
1314,
54282,
1155,
11,
1848,
340,
2405,
53188,
2169,
4074,
5407,
198,
9859,
284,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestViewSubApply(t *testing.T) {
var v View
v.AppendOwned([]byte("0123"))
v.AppendOwned([]byte("45678"))
v.AppendOwned([]byte("9abcd"))
data := []byte("0123456789abcd")
for i := 0; i <= len(data); i++ {
for j := i; j <= len(data); j++ {
t.Run(fmt.Sprintf("SubApply(%d,%d)", i, j), func(t *testing.T) {
var got []byte
v.SubApply(i, j-i, func(b []byte) {
got = append(got, b...)
})
if want := data[i:j]; !bytes.Equal(got, want) {
t.Errorf("got = %q; want %q", got, want)
}
})
}
}
} | explode_data.jsonl/52512 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 269
} | [
2830,
3393,
851,
3136,
28497,
1155,
353,
8840,
836,
8,
341,
2405,
348,
2738,
198,
5195,
8982,
57641,
10556,
3782,
445,
15,
16,
17,
18,
5455,
5195,
8982,
57641,
10556,
3782,
445,
19,
20,
21,
22,
23,
5455,
5195,
8982,
57641,
10556,
37... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIsValidVolumeCapabilities(t *testing.T) {
tests := map[string]struct {
caps []*csi.VolumeCapability
wantValid bool
}{
"single supported capability": {
caps: []*csi.VolumeCapability{
{
AccessMode: &csi.VolumeCapability_AccessMode{ // all access modes are supported
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
AccessType: &csi.VolumeCapability_Mount{ // mount is supported
Mount: &csi.VolumeCapability_MountVolume{},
},
},
},
wantValid: true,
},
"multiple supported capabilities": {
caps: []*csi.VolumeCapability{
{
AccessMode: &csi.VolumeCapability_AccessMode{ // all access modes are supported
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
AccessType: &csi.VolumeCapability_Mount{ // mount is supported
Mount: &csi.VolumeCapability_MountVolume{},
},
},
{
AccessMode: &csi.VolumeCapability_AccessMode{ // all access modes are supported
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY,
},
AccessType: &csi.VolumeCapability_Mount{ // mount is supported
Mount: &csi.VolumeCapability_MountVolume{},
},
},
},
wantValid: true,
},
"unsupported capability": {
caps: []*csi.VolumeCapability{
{
AccessMode: &csi.VolumeCapability_AccessMode{ // all access modes are supported
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
AccessType: &csi.VolumeCapability_Block{ // block is not supported
Block: &csi.VolumeCapability_BlockVolume{},
},
},
},
wantValid: false,
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
gotValid, reason := isValidVolumeCapabilities(tc.caps)
if tc.wantValid != gotValid {
t.Fatalf("expected: %t, got: %t, reason: %s", tc.wantValid, gotValid, reason)
}
})
}
} | explode_data.jsonl/77111 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 808
} | [
2830,
3393,
55470,
18902,
55315,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
2415,
14032,
60,
1235,
341,
197,
1444,
2625,
414,
29838,
63229,
79106,
63746,
198,
197,
50780,
4088,
1807,
198,
197,
59403,
197,
197,
1,
15338,
7248,
22302,
788... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGetMany3(t *testing.T) {
var r []string
data := `{"MarketName":null,"Nounce":6115}`
r = GetMany(data, "Nounce", "Buys", "Sells", "Fills")
if strings.Replace(fmt.Sprintf("%v", r), " ", "", -1) != "[6115]" {
t.Fatalf("expected '%v', got '%v'", "[6115]",
strings.Replace(fmt.Sprintf("%v", r), " ", "", -1))
}
r = GetMany(data, "Nounce", "Buys", "Sells")
if strings.Replace(fmt.Sprintf("%v", r), " ", "", -1) != "[6115]" {
t.Fatalf("expected '%v', got '%v'", "[6115]",
strings.Replace(fmt.Sprintf("%v", r), " ", "", -1))
}
r = GetMany(data, "Nounce")
if strings.Replace(fmt.Sprintf("%v", r), " ", "", -1) != "[6115]" {
t.Fatalf("expected '%v', got '%v'", "[6115]",
strings.Replace(fmt.Sprintf("%v", r), " ", "", -1))
}
} | explode_data.jsonl/62357 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 417
} | [
2830,
3393,
1949,
8441,
18,
1155,
353,
8840,
836,
8,
341,
262,
762,
435,
3056,
917,
198,
262,
821,
1669,
1565,
4913,
38822,
675,
788,
2921,
1335,
45,
9734,
788,
21,
16,
16,
20,
31257,
262,
435,
284,
2126,
8441,
2592,
11,
330,
45,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestFastRecovery(t *testing.T) {
maxPayload := 32
c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
defer c.Cleanup()
c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
const iterations = 3
data := buffer.NewView(2 * maxPayload * (tcp.InitialCwnd << (iterations + 1)))
for i := range data {
data[i] = byte(i)
}
// Write all the data in one shot. Packets will only be written at the
// MTU size though.
if _, err := c.EP.Write(tcpip.SlicePayload(data), tcpip.WriteOptions{}); err != nil {
t.Fatalf("Write failed: %s", err)
}
// Do slow start for a few iterations.
expected := tcp.InitialCwnd
bytesRead := 0
for i := 0; i < iterations; i++ {
expected = tcp.InitialCwnd << uint(i)
if i > 0 {
// Acknowledge all the data received so far if not on
// first iteration.
c.SendAck(790, bytesRead)
}
// Read all packets expected on this iteration. Don't
// acknowledge any of them just yet, so that we can measure the
// congestion window.
for j := 0; j < expected; j++ {
c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
bytesRead += maxPayload
}
// Check we don't receive any more packets on this iteration.
// The timeout can't be too high or we'll trigger a timeout.
c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
}
// Send 3 duplicate acks. This should force an immediate retransmit of
// the pending packet and put the sender into fast recovery.
rtxOffset := bytesRead - maxPayload*expected
for i := 0; i < 3; i++ {
c.SendAck(790, rtxOffset)
}
// Receive the retransmitted packet.
c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
// Wait before checking metrics.
metricPollFn := func() error {
if got, want := c.Stack().Stats().TCP.FastRetransmit.Value(), uint64(1); got != want {
return fmt.Errorf("got stats.TCP.FastRetransmit.Value = %d, want = %d", got, want)
}
if got, want := c.Stack().Stats().TCP.Retransmits.Value(), uint64(1); got != want {
return fmt.Errorf("got stats.TCP.Retransmit.Value = %d, want = %d", got, want)
}
if got, want := c.Stack().Stats().TCP.FastRecovery.Value(), uint64(1); got != want {
return fmt.Errorf("got stats.TCP.FastRecovery.Value = %d, want = %d", got, want)
}
return nil
}
if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
t.Error(err)
}
// Now send 7 mode duplicate acks. Each of these should cause a window
// inflation by 1 and cause the sender to send an extra packet.
for i := 0; i < 7; i++ {
c.SendAck(790, rtxOffset)
}
recover := bytesRead
// Ensure no new packets arrive.
c.CheckNoPacketTimeout("More packets received than expected during recovery after dupacks for this cwnd.",
50*time.Millisecond)
// Acknowledge half of the pending data.
rtxOffset = bytesRead - expected*maxPayload/2
c.SendAck(790, rtxOffset)
// Receive the retransmit due to partial ack.
c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
// Wait before checking metrics.
metricPollFn = func() error {
if got, want := c.Stack().Stats().TCP.FastRetransmit.Value(), uint64(2); got != want {
return fmt.Errorf("got stats.TCP.FastRetransmit.Value = %d, want = %d", got, want)
}
if got, want := c.Stack().Stats().TCP.Retransmits.Value(), uint64(2); got != want {
return fmt.Errorf("got stats.TCP.Retransmit.Value = %d, want = %d", got, want)
}
return nil
}
if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
t.Error(err)
}
// Receive the 10 extra packets that should have been released due to
// the congestion window inflation in recovery.
for i := 0; i < 10; i++ {
c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
bytesRead += maxPayload
}
// A partial ACK during recovery should reduce congestion window by the
// number acked. Since we had "expected" packets outstanding before sending
// partial ack and we acked expected/2 , the cwnd and outstanding should
// be expected/2 + 10 (7 dupAcks + 3 for the original 3 dupacks that triggered
// fast recovery). Which means the sender should not send any more packets
// till we ack this one.
c.CheckNoPacketTimeout("More packets received than expected during recovery after partial ack for this cwnd.",
50*time.Millisecond)
// Acknowledge all pending data to recover point.
c.SendAck(790, recover)
// At this point, the cwnd should reset to expected/2 and there are 10
// packets outstanding.
//
// NOTE: Technically netstack is incorrect in that we adjust the cwnd on
// the same segment that takes us out of recovery. But because of that
// the actual cwnd at exit of recovery will be expected/2 + 1 as we
// acked a cwnd worth of packets which will increase the cwnd further by
// 1 in congestion avoidance.
//
// Now in the first iteration since there are 10 packets outstanding.
// We would expect to get expected/2 +1 - 10 packets. But subsequent
// iterations will send us expected/2 + 1 + 1 (per iteration).
expected = expected/2 + 1 - 10
for i := 0; i < iterations; i++ {
// Read all packets expected on this iteration. Don't
// acknowledge any of them just yet, so that we can measure the
// congestion window.
for j := 0; j < expected; j++ {
c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
bytesRead += maxPayload
}
// Check we don't receive any more packets on this iteration.
// The timeout can't be too high or we'll trigger a timeout.
c.CheckNoPacketTimeout(fmt.Sprintf("More packets received(after deflation) than expected %d for this cwnd.", expected), 50*time.Millisecond)
// Acknowledge all the data received so far.
c.SendAck(790, bytesRead)
// In cogestion avoidance, the packets trains increase by 1 in
// each iteration.
if i == 0 {
// After the first iteration we expect to get the full
// congestion window worth of packets in every
// iteration.
expected += 10
}
expected++
}
} | explode_data.jsonl/24361 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2004
} | [
2830,
3393,
32174,
693,
7449,
1155,
353,
8840,
836,
8,
341,
22543,
29683,
1669,
220,
18,
17,
198,
1444,
1669,
2266,
7121,
1155,
11,
2622,
18,
17,
25534,
836,
7123,
28695,
1695,
10,
2708,
46917,
85,
19,
28695,
1695,
10,
2810,
29683,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestFindTablesInJson(t *testing.T) {
idx := 9
for _, j := range exp[idx : idx+1] {
pretty.Println(j)
findTablesInJSON(j, 0)
}
pretty.Println(len(explainJSONTables), explainJSONTables)
} | explode_data.jsonl/55046 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 89
} | [
2830,
3393,
9885,
21670,
641,
5014,
1155,
353,
8840,
836,
8,
341,
62077,
1669,
220,
24,
198,
2023,
8358,
502,
1669,
2088,
1343,
11597,
549,
7187,
10,
16,
60,
341,
197,
197,
32955,
12419,
3325,
340,
197,
80603,
21670,
641,
5370,
3325,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestValidateRequestMethod(t *testing.T) {
validateTransaction(t, func(tx *apm.Transaction) {
req, _ := http.NewRequest(strings.Repeat("x", 1025), "/", nil)
tx.Context.SetHTTPRequest(req)
})
} | explode_data.jsonl/792 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 75
} | [
2830,
3393,
17926,
69790,
1155,
353,
8840,
836,
8,
341,
197,
7067,
8070,
1155,
11,
2915,
27301,
353,
391,
76,
29284,
8,
341,
197,
24395,
11,
716,
1669,
1758,
75274,
51442,
2817,
10979,
445,
87,
497,
220,
16,
15,
17,
20,
701,
64657,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func Test_StoreRepo_Delete(t *testing.T) {
id := uuid.NewV4().String()
testCases := []struct {
name string
arg string
expectedErr bool
prepare func(mock sqlmock.Sqlmock)
}{
{
name: "failure_exec_query_returns_error",
arg: id,
expectedErr: true,
prepare: func(mock sqlmock.Sqlmock) {
query := `DELETE FROM stores WHERE id = $1`
mock.ExpectExec(regexp.QuoteMeta(query)).WithArgs(id).WillReturnError(errors.New("unexpected error"))
},
},
{
name: "failure_get_affected_row_returns_an error",
arg: id,
expectedErr: true,
prepare: func(mock sqlmock.Sqlmock) {
query := `DELETE FROM stores WHERE id = $1`
mock.ExpectExec(regexp.QuoteMeta(query)).WithArgs(id).WillReturnResult(sqlmock.NewErrorResult(errors.New("unexpected error")))
},
},
{
name: "failure_returns_invalid_number_of_affected_rows",
arg: id,
expectedErr: true,
prepare: func(mock sqlmock.Sqlmock) {
query := `DELETE FROM stores WHERE id = $1`
mock.ExpectExec(regexp.QuoteMeta(query)).WithArgs(id).WillReturnResult(sqlmock.NewResult(1, 2))
},
},
{
name: "should succeed",
arg: id,
prepare: func(mock sqlmock.Sqlmock) {
query := `DELETE FROM stores WHERE id = $1`
mock.ExpectExec(regexp.QuoteMeta(query)).WithArgs(id).WillReturnResult(sqlmock.NewResult(1, 1))
},
},
}
for i := range testCases {
tc := testCases[i]
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
db, mock, err := sqlmock.New()
assert.NoError(t, err)
repo := pg.NewStoreRepository(db)
tc.prepare(mock)
err = repo.Delete(context.TODO(), tc.arg)
if tc.expectedErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
} | explode_data.jsonl/20621 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 846
} | [
2830,
3393,
92684,
25243,
57418,
1155,
353,
8840,
836,
8,
341,
15710,
1669,
16040,
7121,
53,
19,
1005,
703,
2822,
18185,
37302,
1669,
3056,
1235,
341,
197,
11609,
286,
914,
198,
197,
47903,
260,
914,
198,
197,
42400,
7747,
1807,
198,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestOuterLinkV2WithMetadataEncode(t *testing.T) {
var o OuterLinkV2WithMetadata
_, err := MsgpackEncode(o)
requireErrorHasSuffix(t, errCodecEncodeSelf, err)
_, err = MsgpackEncode(&o)
requireErrorHasSuffix(t, errCodecEncodeSelf, err)
} | explode_data.jsonl/72238 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 101
} | [
2830,
3393,
51322,
3939,
53,
17,
2354,
14610,
32535,
1155,
353,
8840,
836,
8,
341,
2405,
297,
55197,
3939,
53,
17,
2354,
14610,
198,
197,
6878,
1848,
1669,
24205,
4748,
32535,
10108,
340,
17957,
1454,
10281,
40177,
1155,
11,
1848,
36913... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_IfdByteEncoder_encodeTagToBytes_bytes_embedded1(t *testing.T) {
defer func() {
if state := recover(); state != nil {
err := log.Wrap(state.(error))
log.PrintErrorf(err, "Test failure.")
panic(err)
}
}()
ibe := NewIfdByteEncoder()
im := NewIfdMapping()
err := LoadStandardIfds(im)
log.PanicIf(err)
ti := NewTagIndex()
ib := NewIfdBuilder(im, ti, exifcommon.IfdPathStandardGps, exifcommon.TestDefaultByteOrder)
it, err := ti.Get(ib.ifdPath, uint16(0x0000))
log.PanicIf(err)
bt := NewStandardBuilderTag(exifcommon.IfdPathStandardGps, it, exifcommon.TestDefaultByteOrder, []uint8{uint8(0x12)})
b := new(bytes.Buffer)
bw := NewByteWriter(b, exifcommon.TestDefaultByteOrder)
addressableOffset := uint32(0x1234)
ida := newIfdDataAllocator(addressableOffset)
childIfdBlock, err := ibe.encodeTagToBytes(ib, bt, bw, ida, uint32(0))
log.PanicIf(err)
if childIfdBlock != nil {
t.Fatalf("no child-IFDs were expected to be allocated")
} else if bytes.Compare(b.Bytes(), []byte{0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x12, 0x00, 0x00, 0x00}) != 0 {
t.Fatalf("encoded tag-entry bytes not correct")
} else if ida.NextOffset() != addressableOffset {
t.Fatalf("allocation was done but not expected")
}
} | explode_data.jsonl/52746 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 507
} | [
2830,
3393,
62,
2679,
67,
7153,
19921,
11224,
5668,
1249,
7078,
12524,
24007,
9789,
16,
1155,
353,
8840,
836,
8,
341,
16867,
2915,
368,
341,
197,
743,
1584,
1669,
11731,
2129,
1584,
961,
2092,
341,
298,
9859,
1669,
1487,
38968,
8390,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestBlocksInclusive(t *testing.T) {
chk := NewMemChunk(EncNone, testBlockSize, testTargetSize)
err := chk.Append(logprotoEntry(1, "1"))
require.Nil(t, err)
err = chk.cut()
require.Nil(t, err)
blocks := chk.Blocks(time.Unix(0, 1), time.Unix(0, 1))
require.Equal(t, 1, len(blocks))
require.Equal(t, 1, blocks[0].Entries())
} | explode_data.jsonl/15695 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 145
} | [
2830,
3393,
29804,
641,
8336,
1155,
353,
8840,
836,
8,
341,
23049,
74,
1669,
1532,
18816,
28304,
7,
7408,
4064,
11,
1273,
89932,
11,
1273,
6397,
1695,
340,
9859,
1669,
39242,
8982,
12531,
15110,
5874,
7,
16,
11,
330,
16,
5455,
17957,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestControllerVolumeType(t *testing.T) {
validConfigs := []struct {
conf string
volumeType string
iops int
}{
{
conf: ``,
volumeType: "gp2",
iops: 0,
},
{
conf: `
controller:
rootVolume:
type: gp2
`,
volumeType: "gp2",
iops: 0,
},
{
conf: `
controller:
rootVolume:
type: standard
`,
volumeType: "standard",
iops: 0,
},
{
conf: `
controller:
rootVolume:
type: io1
iops: 100
`,
volumeType: "io1",
iops: 100,
},
{
conf: `
controller:
rootVolume:
type: io1
iops: 20000
`,
volumeType: "io1",
iops: 20000,
},
}
invalidConfigs := []string{
`
# There's no volume type 'default'
controller:
rootVolume:
type: default
`,
`
# IOPS must be zero for volume types != 'io1'
controller:
rootVolume:
type: standard
iops: 100
`,
`
# IOPS must be zero for volume types != 'io1'
controller:
rootVolume:
type: gp2
iops: 20000
`,
`
# IOPS smaller than the minimum (100)
controller:
rootVolume:
type: io1
iops: 99
`,
`
# IOPS greater than the maximum (20000)
controller:
rootVolume:
type: io1
iops: 20001
`,
}
for _, conf := range validConfigs {
confBody := singleAzConfigYaml + conf.conf
c, err := ClusterFromBytes([]byte(confBody))
if err != nil {
t.Errorf("failed to parse config %s: %v", confBody, err)
continue
}
if c.Controller.RootVolume.Type != conf.volumeType {
t.Errorf(
"parsed root volume type %s does not match root volume %s in config: %s",
c.Controller.RootVolume.Type,
conf.volumeType,
confBody,
)
}
}
for _, conf := range invalidConfigs {
confBody := singleAzConfigYaml + conf
_, err := ClusterFromBytes([]byte(confBody))
if err == nil {
t.Errorf("expected error parsing invalid config: %s", confBody)
}
}
} | explode_data.jsonl/4370 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 838
} | [
2830,
3393,
2051,
18902,
929,
1155,
353,
8840,
836,
8,
1476,
56322,
84905,
1669,
3056,
1235,
341,
197,
67850,
981,
914,
198,
197,
5195,
4661,
929,
914,
198,
197,
8230,
3721,
981,
526,
198,
197,
59403,
197,
197,
515,
298,
67850,
25,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestNewOrganizations(t *testing.T) {
a := NewOrganizations(nil)
assert.NotNil(t, a)
a = NewOrganizations(orgs)
assert.NotNil(t, a)
assert.Equal(t, len(a.items), len(orgs))
a = Organization()
assert.NotNil(t, a)
} | explode_data.jsonl/36088 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 98
} | [
2830,
3393,
3564,
23227,
8040,
1155,
353,
8840,
836,
8,
341,
11323,
1669,
1532,
23227,
8040,
27907,
340,
6948,
93882,
1155,
11,
264,
692,
11323,
284,
1532,
23227,
8040,
36246,
82,
340,
6948,
93882,
1155,
11,
264,
340,
6948,
12808,
1155,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestAddExtraVarsFile(t *testing.T) {
tests := []struct {
desc string
file string
options *AnsibleAdhocOptions
res []string
err error
}{
{
desc: "Testing add an extra-vars file when ExtraVarsFile is nil",
file: "@test.yml",
options: &AnsibleAdhocOptions{},
res: []string{"@test.yml"},
err: &errors.Error{},
},
{
desc: "Testing add an extra-vars file",
file: "@test2.yml",
options: &AnsibleAdhocOptions{
ExtraVarsFile: []string{"@test1.yml"},
},
res: []string{"@test1.yml", "@test2.yml"},
err: &errors.Error{},
},
{
desc: "Testing add an extra-vars file without file mark prefix @",
file: "test.yml",
options: &AnsibleAdhocOptions{
ExtraVarsFile: []string{},
},
res: []string{"@test.yml"},
err: &errors.Error{},
},
}
for _, test := range tests {
t.Run(test.desc, func(t *testing.T) {
t.Log(test.desc)
err := test.options.AddExtraVarsFile(test.file)
if err != nil && assert.Error(t, err) {
assert.Equal(t, test.err, err)
} else {
assert.Equal(t, test.res, test.options.ExtraVarsFile, "Unexpected options value")
}
})
}
} | explode_data.jsonl/69692 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 539
} | [
2830,
3393,
2212,
11612,
28305,
1703,
1155,
353,
8840,
836,
8,
1476,
78216,
1669,
3056,
1235,
341,
197,
41653,
262,
914,
198,
197,
17661,
262,
914,
198,
197,
35500,
353,
69599,
1238,
2589,
88473,
3798,
198,
197,
10202,
257,
3056,
917,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestRedis_Exists(t *testing.T) {
runOnRedis(t, func(client *Redis) {
_, err := NewRedis(client.Addr, "").Exists("a")
assert.NotNil(t, err)
ok, err := client.Exists("a")
assert.Nil(t, err)
assert.False(t, ok)
assert.Nil(t, client.Set("a", "b"))
ok, err = client.Exists("a")
assert.Nil(t, err)
assert.True(t, ok)
})
} | explode_data.jsonl/39153 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 166
} | [
2830,
3393,
48137,
62,
15575,
1155,
353,
8840,
836,
8,
341,
56742,
1925,
48137,
1155,
11,
2915,
12805,
353,
48137,
8,
341,
197,
197,
6878,
1848,
1669,
1532,
48137,
12805,
93626,
11,
35229,
15575,
445,
64,
1138,
197,
6948,
93882,
1155,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAndOr(t *testing.T) {
t.Parallel()
resource.Require(t, resource.UnitTest)
expect(t, c.Or(c.Literal(true), c.Literal(false)), "(? OR ?)", []interface{}{true, false}, nil)
wiTbl := workitem.WorkItemStorage{}.TableName()
expect(t, c.And(c.Not(c.Field("foo.bar"), c.Literal("abcd")), c.Not(c.Literal(true), c.Literal(false))), `(NOT (`+workitem.Column(wiTbl, "fields")+` @> '{"foo.bar" : "abcd"}') AND (? != ?))`, []interface{}{true, false}, nil)
expect(t, c.And(c.Equals(c.Field("foo.bar"), c.Literal("abcd")), c.Equals(c.Literal(true), c.Literal(false))), `((`+workitem.Column(wiTbl, "fields")+` @> '{"foo.bar" : "abcd"}') AND (? = ?))`, []interface{}{true, false}, nil)
expect(t, c.Or(c.Equals(c.Field("foo.bar"), c.Literal("abcd")), c.Equals(c.Literal(true), c.Literal(false))), `((`+workitem.Column(wiTbl, "fields")+` @> '{"foo.bar" : "abcd"}') OR (? = ?))`, []interface{}{true, false}, nil)
} | explode_data.jsonl/36707 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 409
} | [
2830,
3393,
3036,
2195,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
50346,
81288,
1155,
11,
5101,
25159,
2271,
340,
24952,
1155,
11,
272,
90449,
1337,
1214,
9953,
3715,
701,
272,
1214,
9953,
3576,
5731,
11993,
30,
2726,
937,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNewMappingRuleSnapshotFromFields(t *testing.T) {
res, err := newMappingRuleSnapshotFromFields(
testMappingRuleSnapshot3.name,
testMappingRuleSnapshot3.cutoverNanos,
testMappingRuleSnapshot3.filter,
testMappingRuleSnapshot3.rawFilter,
testMappingRuleSnapshot3.aggregationID,
testMappingRuleSnapshot3.storagePolicies,
testMappingRuleSnapshot3.dropPolicy,
testMappingRuleSnapshot3.tags,
testMappingRuleSnapshot3.lastUpdatedAtNanos,
testMappingRuleSnapshot3.lastUpdatedBy,
)
require.NoError(t, err)
require.True(t, cmp.Equal(testMappingRuleSnapshot3, res, testMappingRuleSnapshotCmpOpts...))
} | explode_data.jsonl/64569 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 233
} | [
2830,
3393,
3564,
6807,
11337,
15009,
3830,
8941,
1155,
353,
8840,
836,
8,
341,
10202,
11,
1848,
1669,
501,
6807,
11337,
15009,
3830,
8941,
1006,
197,
18185,
6807,
11337,
15009,
18,
2644,
345,
197,
18185,
6807,
11337,
15009,
18,
520,
15... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStoreGateway_SeriesQueryingShouldRemoveExternalLabels(t *testing.T) {
ctx := context.Background()
logger := log.NewNopLogger()
userID := "user-1"
storageDir, err := ioutil.TempDir(os.TempDir(), "")
require.NoError(t, err)
defer os.RemoveAll(storageDir) //nolint:errcheck
// Generate 2 TSDB blocks with the same exact series (and data points).
numSeries := 2
now := time.Now()
minT := now.Add(-1*time.Hour).Unix() * 1000
maxT := now.Unix() * 1000
step := (maxT - minT) / int64(numSeries)
mockTSDB(t, path.Join(storageDir, userID), numSeries, 0, minT, maxT)
mockTSDB(t, path.Join(storageDir, userID), numSeries, 0, minT, maxT)
bucketClient, err := filesystem.NewBucketClient(filesystem.Config{Directory: storageDir})
require.NoError(t, err)
createBucketIndex(t, bucketClient, userID)
// Find the created blocks (we expect 2).
var blockIDs []string
require.NoError(t, bucketClient.Iter(ctx, "user-1/", func(key string) error {
if _, ok := block.IsBlockDir(key); ok {
blockIDs = append(blockIDs, strings.TrimSuffix(strings.TrimPrefix(key, userID+"/"), "/"))
}
return nil
}))
require.Len(t, blockIDs, 2)
// Inject different external labels for each block.
for idx, blockID := range blockIDs {
meta := metadata.Thanos{
Labels: map[string]string{
cortex_tsdb.TenantIDExternalLabel: userID,
cortex_tsdb.IngesterIDExternalLabel: fmt.Sprintf("ingester-%d", idx),
cortex_tsdb.ShardIDExternalLabel: fmt.Sprintf("shard-%d", idx),
},
Source: metadata.TestSource,
}
_, err := metadata.InjectThanos(logger, filepath.Join(storageDir, userID, blockID), meta, nil)
require.NoError(t, err)
}
for _, bucketIndexEnabled := range []bool{true, false} {
t.Run(fmt.Sprintf("bucket index enabled = %v", bucketIndexEnabled), func(t *testing.T) {
// Create a store-gateway used to query back the series from the blocks.
gatewayCfg := mockGatewayConfig()
gatewayCfg.ShardingEnabled = false
storageCfg := mockStorageConfig(t)
storageCfg.BucketStore.BucketIndex.Enabled = bucketIndexEnabled
g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, nil, defaultLimitsOverrides(t), mockLoggingLevel(), logger, nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(ctx, g))
defer services.StopAndAwaitTerminated(ctx, g) //nolint:errcheck
// Query back all series.
req := &storepb.SeriesRequest{
MinTime: minT,
MaxTime: maxT,
Matchers: []storepb.LabelMatcher{
{Type: storepb.LabelMatcher_RE, Name: "__name__", Value: ".*"},
},
}
srv := newBucketStoreSeriesServer(setUserIDToGRPCContext(ctx, userID))
err = g.Series(req, srv)
require.NoError(t, err)
assert.Empty(t, srv.Warnings)
assert.Len(t, srv.SeriesSet, numSeries)
for seriesID := 0; seriesID < numSeries; seriesID++ {
actual := srv.SeriesSet[seriesID]
// Ensure Cortex external labels have been removed.
assert.Equal(t, []labelpb.ZLabel{{Name: "series_id", Value: strconv.Itoa(seriesID)}}, actual.Labels)
// Ensure samples have been correctly queried. The Thanos store also deduplicate samples
// in most cases, but it's not strictly required guaranteeing deduplication at this stage.
samples, err := readSamplesFromChunks(actual.Chunks)
require.NoError(t, err)
assert.Equal(t, []sample{
{ts: minT + (step * int64(seriesID)), value: float64(seriesID)},
}, samples)
}
})
}
} | explode_data.jsonl/57964 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1313
} | [
2830,
3393,
6093,
40709,
1098,
4699,
2859,
287,
14996,
13021,
25913,
23674,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
2266,
19047,
741,
17060,
1669,
1487,
7121,
45,
453,
7395,
741,
19060,
915,
1669,
330,
872,
12,
16,
1837,
197,
16172,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.