text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func Test_stateFromName(t *testing.T) {
st := stateFromName(stateNameNoop)
require.Equal(t, &noOp{}, st)
st = stateFromName(stateNameStart)
require.Equal(t, &start{}, st)
st = stateFromName(stateNameDone)
require.Equal(t, &done{}, st)
st = stateFromName(stateNameArranging)
require.Equal(t, &arranging{}, st)
st = stateFromName(stateNameDelivering)
require.Equal(t, &delivering{}, st)
st = stateFromName(stateNameConfirming)
require.Equal(t, &confirming{}, st)
st = stateFromName(stateNameAbandoning)
require.Equal(t, &abandoning{}, st)
st = stateFromName(stateNameDeciding)
require.Equal(t, &deciding{}, st)
st = stateFromName(stateNameWaiting)
require.Equal(t, &waiting{}, st)
st = stateFromName(stateNameRequesting)
require.Equal(t, &requesting{}, st)
st = stateFromName("unknown")
require.Equal(t, &noOp{}, st)
} | explode_data.jsonl/66259 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 330
} | [
2830,
3393,
4387,
3830,
675,
1155,
353,
8840,
836,
8,
341,
18388,
1669,
1584,
3830,
675,
8390,
675,
2753,
453,
340,
17957,
12808,
1155,
11,
609,
2152,
7125,
22655,
357,
692,
18388,
284,
1584,
3830,
675,
8390,
675,
3479,
340,
17957,
12... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSyncPod(t *testing.T) {
tests := []struct {
name string
key string
initialObjs []runtime.Object
expectedError string
prepare func(t *testing.T, c *Controller)
validate func(t *testing.T, c *Controller)
}{
{
name: "invalid key",
key: "foo/bar/baz",
expectedError: `unexpected key format: "foo/bar/baz"`,
},
{
name: "deleted pod enqueues all pvcs in pod namespace",
key: "default/foo",
initialObjs: []runtime.Object{
newPVC("foo", "default"),
newPVC("bar", "default"),
newPVC("bar", "kube-system"),
},
validate: func(t *testing.T, c *Controller) {
assert.Equal(t, 2, c.pvcQueue.Len())
},
},
{
name: "pod update enqueue mounted pvcs",
key: "default/foo",
initialObjs: []runtime.Object{
newPodWithVolumes("foo", "default", []corev1.Volume{
newVolumeWithClaim("some-vol", "foo"),
}),
newPVC("foo", "default"),
newPVC("bar", "default"),
newPVC("bar", "kube-system"),
},
validate: func(t *testing.T, c *Controller) {
assert.Equal(t, 1, c.pvcQueue.Len())
},
},
{
name: "pod with deletion timestamp does not enqueue pvcs",
key: "default/foo",
initialObjs: []runtime.Object{
func() *corev1.Pod {
pod := newPodWithVolumes("foo", "default", []corev1.Volume{
newVolumeWithClaim("some-vol", "foo"),
})
pod.DeletionTimestamp = new(metav1.Time)
return pod
}(),
newPVC("foo", "default"),
newPVC("bar", "default"),
newPVC("bar", "kube-system"),
},
validate: func(t *testing.T, c *Controller) {
assert.Equal(t, 0, c.pvcQueue.Len())
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
c, err := newFakeController(test.initialObjs...)
require.NoError(t, err)
if test.prepare != nil {
test.prepare(t, c)
}
fakeIndexerAdd(t, c, test.initialObjs...)
err = c.syncPod(test.key)
if test.expectedError != "" {
require.Error(t, err)
assert.Equal(t, test.expectedError, err.Error())
} else {
require.NoError(t, err)
}
if test.validate != nil {
test.validate(t, c)
}
})
}
} | explode_data.jsonl/46211 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1026
} | [
2830,
3393,
12154,
23527,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
688,
914,
198,
197,
23634,
1843,
914,
198,
197,
85270,
4121,
2519,
256,
3056,
22255,
8348,
198,
197,
42400,
1454,
914,
198,
197,
197,
13... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_getResponseToMessage(t *testing.T) {
cfg := fakeCfg{}
discord := &FakeDiscordClientSpy{}
prc := &fakeProcessor{}
b := &bot{
cfg: cfg,
discord: discord,
prc: prc,
}
got := b.getResponseToMessage("hello", "user1")
want := "user1 told me : hello"
if got != want {
t.Errorf("want message %q, got %q", want, got)
}
} | explode_data.jsonl/2144 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 154
} | [
2830,
3393,
3062,
2582,
1249,
2052,
1155,
353,
8840,
836,
8,
341,
50286,
1669,
12418,
42467,
16094,
2698,
3427,
539,
1669,
609,
52317,
23477,
539,
2959,
44027,
16094,
25653,
66,
1669,
609,
30570,
22946,
31483,
2233,
1669,
609,
6331,
515,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestMaterialPool_FetchData(t *testing.T) {
keyword := domain.NewKeyword("sample")
resultPoolRepo := repository.NewResultPool([]service.Collector{&CollectorMock{}})
resultPool, _ := resultPoolRepo.FetchData(keyword)
if len(*resultPool) != 1 {
t.Fatal("Fail on test fetch data from search engine")
}
} | explode_data.jsonl/53725 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 108
} | [
2830,
3393,
13415,
10551,
1400,
2995,
1043,
1155,
353,
8840,
836,
8,
341,
197,
19863,
1669,
7947,
7121,
34481,
445,
13611,
5130,
9559,
10551,
25243,
1669,
12542,
7121,
2077,
10551,
10556,
7936,
727,
24605,
269,
90,
5,
53694,
11571,
90,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestValidateEndpointHandler(t *testing.T) {
os.Clearenv()
svc := awstesting.NewClient(aws.NewConfig().WithRegion("us-west-2"))
svc.Handlers.Clear()
svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
err := req.Build()
assert.NoError(t, err)
} | explode_data.jsonl/57761 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 129
} | [
2830,
3393,
17926,
27380,
3050,
1155,
353,
8840,
836,
8,
341,
25078,
727,
273,
9151,
85,
2822,
1903,
7362,
1669,
1360,
267,
59855,
7121,
2959,
7,
8635,
7121,
2648,
1005,
2354,
14091,
445,
355,
37602,
12,
17,
5455,
1903,
7362,
35308,
9... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestUnknownTag(t *testing.T) {
var args struct {
Foo string `arg:"this_is_not_valid"`
}
err := parse("--foo xyz", &args)
assert.Error(t, err)
} | explode_data.jsonl/13030 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 65
} | [
2830,
3393,
13790,
5668,
1155,
353,
8840,
836,
8,
341,
2405,
2827,
2036,
341,
197,
12727,
2624,
914,
1565,
858,
2974,
574,
6892,
7913,
8337,
8805,
197,
532,
9859,
1669,
4715,
21549,
7975,
40511,
497,
609,
2116,
340,
6948,
6141,
1155,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestTypeFlag_IsOn_GivenType_ReturnsExpectedValue(t *testing.T) {
armString := ArmFlag.ApplyTo(StringType)
cases := []struct {
name string
subject Type
flag TypeFlag
expected bool
}{
{"String does not have ArmFlag", StringType, ArmFlag, false},
{"String does not have StorageFlag", StringType, StorageFlag, false},
{"ArmString does have ArmFlag", armString, ArmFlag, true},
{"ArmString does not have StorageFlag", armString, StorageFlag, false},
}
for _, c := range cases {
c := c
t.Run(c.name, func(t *testing.T) {
t.Parallel()
g := NewGomegaWithT(t)
g.Expect(c.flag.IsOn(c.subject)).To(Equal(c.expected))
})
}
} | explode_data.jsonl/71882 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 255
} | [
2830,
3393,
929,
12135,
31879,
1925,
2646,
2071,
929,
53316,
82,
18896,
1130,
1155,
353,
8840,
836,
8,
1476,
197,
2178,
703,
1669,
12990,
12135,
36051,
1249,
2242,
929,
692,
1444,
2264,
1669,
3056,
1235,
341,
197,
11609,
257,
914,
198,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIsSupported(t *testing.T) {
cases := []struct {
rrType uint16
supported bool
}{
// supported
{dns.TypeA, true},
{dns.TypeSRV, true},
// some others
{dns.TypeCNAME, false},
{dns.TypeNS, false},
{dns.TypeMX, false},
}
for _, c := range cases {
out := IsSupported(c.rrType)
if out != c.supported {
t.Fatal("wrong value for %s", dns.TypeToString[c.rrType])
}
}
} | explode_data.jsonl/27543 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 182
} | [
2830,
3393,
3872,
34636,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
3056,
1235,
341,
197,
197,
634,
929,
262,
2622,
16,
21,
198,
197,
1903,
12513,
1807,
198,
197,
59403,
197,
197,
322,
7248,
198,
197,
197,
90,
45226,
10184,
32,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestTruncateString(t *testing.T) {
const (
maxLength = 5
expected1 = "testa"
expected2 = "testab"
expected3 = "testabc"
expected4 = "testabcd"
expected5 = "testa..."
expected6 = ""
expected7 = "t"
)
var result1 = truncateString(maxLength, "testa")
if result1 != expected1 {
t.Errorf("Got: %s, expected: %s", result1, expected1)
}
var result2 = truncateString(maxLength, "testab")
if result2 != expected2 {
t.Errorf("Got: %s, expected: %s", result2, expected2)
}
var result3 = truncateString(maxLength, "testabc")
if result3 != expected3 {
t.Errorf("Got: %s, expected: %s", result3, expected3)
}
var result4 = truncateString(maxLength, "testabcd")
if result4 != expected4 {
t.Errorf("Got: %s, expected: %s", result4, expected4)
}
var result5 = truncateString(maxLength, "testabcde")
if result5 != expected5 {
t.Errorf("Got: %s, expected: %s", result5, expected5)
}
var result6 = truncateString(maxLength, "")
if result6 != expected6 {
t.Errorf("Got: %s, expected: %s", result6, expected6)
}
var result7 = truncateString(maxLength, "t")
if result7 != expected7 {
t.Errorf("Got: %s, expected: %s", result7, expected7)
}
} | explode_data.jsonl/3655 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 467
} | [
2830,
3393,
1282,
26900,
703,
1155,
353,
8840,
836,
8,
341,
4777,
2399,
197,
22543,
4373,
284,
220,
20,
198,
197,
42400,
16,
284,
330,
1944,
64,
698,
197,
42400,
17,
284,
330,
1944,
370,
698,
197,
42400,
18,
284,
330,
1944,
13683,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestTenantStatus(t *testing.T) {
// set the correct Tenant ID
tID := os.Getenv("TENANT_ID")
url := os.Getenv("URL")
token := os.Getenv("TOKEN")
c := NewClient(Config{
ClusterType: ClusterTypeSingleNode,
Token: token,
Environment: EnvironmentDev,
SamlTenant: "ycloud.accounts.ondemand.com",
URL: url,
}, logrus.StandardLogger())
s, err := c.GetTenantStatus(tID)
t.Logf("%+v\n%s", s, err)
} | explode_data.jsonl/500 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 187
} | [
2830,
3393,
71252,
2522,
1155,
353,
8840,
836,
8,
341,
197,
322,
738,
279,
4396,
73724,
3034,
198,
3244,
915,
1669,
2643,
64883,
445,
51,
953,
2821,
3450,
5130,
19320,
1669,
2643,
64883,
445,
3144,
1138,
43947,
1669,
2643,
64883,
445,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGETRoot(t *testing.T) {
request, _ := http.NewRequest(http.MethodGet, "/", nil)
response := httptest.NewRecorder()
conf := config.Config{}
conf.Name = "Gopher"
conf.Port = 3000
page := NewPage(&conf, tpl)
page.FrontpageHandler(response, request)
t.Run("Page returns expected status code", func(t *testing.T) {
got := response.Result().StatusCode
want := 200
if got != want {
t.Errorf("Got %q but want %q", got, want)
}
})
} | explode_data.jsonl/64678 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 170
} | [
2830,
3393,
3806,
8439,
1155,
353,
8840,
836,
8,
341,
23555,
11,
716,
1669,
1758,
75274,
19886,
20798,
1949,
11,
64657,
2092,
340,
21735,
1669,
54320,
70334,
7121,
47023,
2822,
67850,
1669,
2193,
10753,
16094,
67850,
2967,
284,
330,
38,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestReaderWriter(t *testing.T) {
assert := With(t)
pReader, pWriter := pipe.New(pipe.WithSizeLimit(1024))
dest := net.TCPDestination(net.DomainAddress("v2ray.com"), 80)
writer := NewWriter(1, dest, pWriter, protocol.TransferTypeStream)
dest2 := net.TCPDestination(net.LocalHostIP, 443)
writer2 := NewWriter(2, dest2, pWriter, protocol.TransferTypeStream)
dest3 := net.TCPDestination(net.LocalHostIPv6, 18374)
writer3 := NewWriter(3, dest3, pWriter, protocol.TransferTypeStream)
writePayload := func(writer *Writer, payload ...byte) error {
b := buf.New()
b.Write(payload)
return writer.WriteMultiBuffer(buf.NewMultiBufferValue(b))
}
assert(writePayload(writer, 'a', 'b', 'c', 'd'), IsNil)
assert(writePayload(writer2), IsNil)
assert(writePayload(writer, 'e', 'f', 'g', 'h'), IsNil)
assert(writePayload(writer3, 'x'), IsNil)
writer.Close()
writer3.Close()
assert(writePayload(writer2, 'y'), IsNil)
writer2.Close()
bytesReader := &buf.BufferedReader{Reader: pReader}
var meta FrameMetadata
err := meta.ReadFrom(bytesReader)
assert(err, IsNil)
assert(meta.SessionID, Equals, uint16(1))
assert(byte(meta.SessionStatus), Equals, byte(SessionStatusNew))
assert(meta.Target, Equals, dest)
assert(byte(meta.Option), Equals, byte(OptionData))
data, err := readAll(NewStreamReader(bytesReader))
assert(err, IsNil)
assert(len(data), Equals, 1)
assert(data[0].String(), Equals, "abcd")
err = meta.ReadFrom(bytesReader)
assert(err, IsNil)
assert(byte(meta.SessionStatus), Equals, byte(SessionStatusNew))
assert(meta.SessionID, Equals, uint16(2))
assert(byte(meta.Option), Equals, byte(0))
assert(meta.Target, Equals, dest2)
err = meta.ReadFrom(bytesReader)
assert(err, IsNil)
assert(byte(meta.SessionStatus), Equals, byte(SessionStatusKeep))
assert(meta.SessionID, Equals, uint16(1))
assert(byte(meta.Option), Equals, byte(1))
data, err = readAll(NewStreamReader(bytesReader))
assert(err, IsNil)
assert(len(data), Equals, 1)
assert(data[0].String(), Equals, "efgh")
err = meta.ReadFrom(bytesReader)
assert(err, IsNil)
assert(byte(meta.SessionStatus), Equals, byte(SessionStatusNew))
assert(meta.SessionID, Equals, uint16(3))
assert(byte(meta.Option), Equals, byte(1))
assert(meta.Target, Equals, dest3)
data, err = readAll(NewStreamReader(bytesReader))
assert(err, IsNil)
assert(len(data), Equals, 1)
assert(data[0].String(), Equals, "x")
err = meta.ReadFrom(bytesReader)
assert(err, IsNil)
assert(byte(meta.SessionStatus), Equals, byte(SessionStatusEnd))
assert(meta.SessionID, Equals, uint16(1))
assert(byte(meta.Option), Equals, byte(0))
err = meta.ReadFrom(bytesReader)
assert(err, IsNil)
assert(byte(meta.SessionStatus), Equals, byte(SessionStatusEnd))
assert(meta.SessionID, Equals, uint16(3))
assert(byte(meta.Option), Equals, byte(0))
err = meta.ReadFrom(bytesReader)
assert(err, IsNil)
assert(byte(meta.SessionStatus), Equals, byte(SessionStatusKeep))
assert(meta.SessionID, Equals, uint16(2))
assert(byte(meta.Option), Equals, byte(1))
data, err = readAll(NewStreamReader(bytesReader))
assert(err, IsNil)
assert(len(data), Equals, 1)
assert(data[0].String(), Equals, "y")
err = meta.ReadFrom(bytesReader)
assert(err, IsNil)
assert(byte(meta.SessionStatus), Equals, byte(SessionStatusEnd))
assert(meta.SessionID, Equals, uint16(2))
assert(byte(meta.Option), Equals, byte(0))
pWriter.Close()
err = meta.ReadFrom(bytesReader)
assert(err, IsNotNil)
} | explode_data.jsonl/1942 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1281
} | [
2830,
3393,
5062,
6492,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
3085,
1155,
692,
3223,
5062,
11,
281,
6492,
1669,
13647,
7121,
70053,
26124,
1695,
16527,
7,
16,
15,
17,
19,
4390,
49616,
1669,
4179,
836,
7123,
33605,
30723,
20442,
42... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStateDeltaSizeSetting(t *testing.T) {
_, state := createFreshDBAndConstructState(t)
if state.historyStateDeltaSize != 500 {
t.Fatalf("Error reading historyStateDeltaSize. Expected 500, but got %d", state.historyStateDeltaSize)
}
} | explode_data.jsonl/69020 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 77
} | [
2830,
3393,
1397,
20277,
1695,
15400,
1155,
353,
8840,
836,
8,
341,
197,
6878,
1584,
1669,
1855,
55653,
3506,
3036,
28468,
1397,
1155,
340,
743,
1584,
23430,
1397,
20277,
1695,
961,
220,
20,
15,
15,
341,
197,
3244,
30762,
445,
1454,
5... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestNewWriter(t *testing.T) {
samples := []struct {
channel string
user string
token string
err bool
}{
{"", "", "", true},
{"", "user", "", true},
{"", "", "token", true},
{"", "user", "token", true},
{"channel", "", "", true},
{"channel", "user", "", true},
{"channel", "", "token", true},
{"channel", "user", "token", false},
}
for _, s := range samples {
_, err := NewWriter(s.channel, s.user, s.token)
if (err != nil) != s.err {
t.Errorf("NewWriter(channel=%s, user=%s, token=%s): got err!=nil == %t",
s.channel, s.user, s.token, err != nil)
}
}
} | explode_data.jsonl/71432 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 267
} | [
2830,
3393,
3564,
6492,
1155,
353,
8840,
836,
8,
341,
1903,
4023,
1669,
3056,
1235,
341,
197,
71550,
914,
198,
197,
19060,
262,
914,
198,
197,
43947,
256,
914,
198,
197,
9859,
257,
1807,
198,
197,
59403,
197,
197,
4913,
497,
7342,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestProvider(t *testing.T) {
cfg := rpctest.GetConfig()
defer os.RemoveAll(cfg.RootDir)
rpcAddr := cfg.RPC.ListenAddress
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
if err != nil {
panic(err)
}
chainID := genDoc.ChainID
t.Log("chainID:", chainID)
c, err := rpchttp.New(rpcAddr, "/websocket")
require.Nil(t, err)
p := lighthttp.NewWithClient(chainID, c)
require.NoError(t, err)
require.NotNil(t, p)
// let it produce some blocks
err = rpcclient.WaitForHeight(c, 10, nil)
require.NoError(t, err)
// let's get the highest block
sh, err := p.LightBlock(context.Background(), 0)
require.NoError(t, err)
assert.True(t, sh.Height < 1000)
// let's check this is valid somehow
assert.Nil(t, sh.ValidateBasic(chainID))
// historical queries now work :)
lower := sh.Height - 3
sh, err = p.LightBlock(context.Background(), lower)
require.NoError(t, err)
assert.Equal(t, lower, sh.Height)
// fetching missing heights (both future and pruned) should return appropriate errors
lb, err := p.LightBlock(context.Background(), 1000)
require.Error(t, err)
require.Nil(t, lb)
assert.Equal(t, provider.ErrHeightTooHigh, err)
_, err = p.LightBlock(context.Background(), 1)
require.Error(t, err)
assert.Equal(t, provider.ErrLightBlockNotFound, err)
} | explode_data.jsonl/47997 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 489
} | [
2830,
3393,
5179,
1155,
353,
8840,
836,
8,
341,
50286,
1669,
33109,
67880,
2234,
2648,
741,
16867,
2643,
84427,
28272,
45345,
6184,
340,
7000,
3992,
13986,
1669,
13286,
2013,
4872,
68334,
4286,
198,
82281,
9550,
11,
1848,
1669,
4494,
6538... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFindPackageByName(t *testing.T) {
distro := distrodetector.New()
if distro.Name() == "Arch Linux" {
packages := FindPackageByName(distro, "pacman")
if !has(packages, "pacman") {
t.Fatal("pacman should be a package in Arch Linux")
}
}
} | explode_data.jsonl/37426 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 99
} | [
2830,
3393,
9885,
13100,
16898,
1155,
353,
8840,
836,
8,
341,
2698,
15561,
1669,
1582,
299,
91544,
7121,
741,
743,
1582,
299,
2967,
368,
621,
330,
18727,
14340,
1,
341,
197,
3223,
22211,
1669,
7379,
13100,
16898,
1500,
15561,
11,
330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestFile_IsDirectory(t *testing.T) {
fs := testFs()
fs.Mkdir("build", 0777)
err := CheckFile(fs, "build", false)
assert.Equal(t, linterrors.NewFileError("build", "is not a file"), err)
} | explode_data.jsonl/71218 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 77
} | [
2830,
3393,
1703,
31879,
9310,
1155,
353,
8840,
836,
8,
341,
53584,
1669,
1273,
48300,
741,
53584,
1321,
12438,
445,
5834,
497,
220,
15,
22,
22,
22,
692,
9859,
1669,
4248,
1703,
31856,
11,
330,
5834,
497,
895,
340,
6948,
12808,
1155,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestDegenerateCertificate(t *testing.T) {
cert, err := createTestCertificate(x509.SHA1WithRSA)
if err != nil {
t.Fatal(err)
}
deg, err := DegenerateCertificate(cert.Certificate.Raw)
if err != nil {
t.Fatal(err)
}
testOpenSSLParse(t, deg)
pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: deg})
} | explode_data.jsonl/25099 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 141
} | [
2830,
3393,
45657,
13220,
33202,
1155,
353,
8840,
836,
8,
341,
1444,
529,
11,
1848,
1669,
1855,
2271,
33202,
2075,
20,
15,
24,
808,
17020,
16,
2354,
73564,
340,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
532,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestIntegrationRoundTrip(t *testing.T) {
queueName, queuesClient, cleanup := newTestQueue(t, "receive")
defer cleanup()
tests := []struct {
label string
sessions int
data []string
}{
{
label: "1 roundtrip, small payload",
sessions: 1,
data: []string{"1Hello there!"},
},
{
label: "3 roundtrip, small payload",
sessions: 1,
data: []string{
"2Hey there!",
"2Hi there!",
"2Ho there!",
},
},
{
label: "1000 roundtrip, small payload",
sessions: 1,
data: repeatStrings(1000,
"3Hey there!",
"3Hi there!",
"3Ho there!",
),
},
{
label: "1 roundtrip, small payload, 10 sessions",
sessions: 10,
data: []string{"1Hello there!"},
},
}
for _, tt := range tests {
t.Run(tt.label, func(t *testing.T) {
checkLeaks := leaktest.CheckTimeout(t, 60*time.Second)
// Create client
client := newClient(t, tt.label,
amqp.ConnMaxSessions(tt.sessions),
)
defer client.Close()
for i := 0; i < tt.sessions; i++ {
// Open a session
session, err := client.NewSession()
if err != nil {
t.Fatal(err)
}
// Create a sender
sender, err := session.NewSender(
amqp.LinkTargetAddress(queueName),
)
if err != nil {
t.Fatal(err)
}
// Perform test concurrently for speed and to catch races
var wg sync.WaitGroup
wg.Add(2)
var sendErr error
go func() {
defer wg.Done()
defer sender.Close()
for i, data := range tt.data {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
err = sender.Send(ctx, amqp.NewMessage([]byte(data)))
cancel()
if err != nil {
sendErr = fmt.Errorf("Error after %d sends: %+v", i, err)
return
}
}
}()
var receiveErr error
go func() {
defer wg.Done()
// Create a receiver
receiver, err := session.NewReceiver(
amqp.LinkSourceAddress(queueName),
amqp.LinkCredit(10),
amqp.LinkBatching(false),
)
if err != nil {
receiveErr = err
return
}
defer receiver.Close()
for i, data := range tt.data {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
msg, err := receiver.Receive(ctx)
cancel()
if err != nil {
receiveErr = fmt.Errorf("Error after %d receives: %+v", i, err)
return
}
// Accept message
msg.Accept()
if !bytes.Equal([]byte(data), msg.GetData()) {
receiveErr = fmt.Errorf("Expected received message %d to be %v, but it was %v", i+1, string(data), string(msg.GetData()))
}
}
}()
wg.Wait()
if sendErr != nil || receiveErr != nil {
t.Error("Send error:", sendErr)
t.Fatal("Receive error:", receiveErr)
}
}
client.Close() // close before leak check
checkLeaks() // this is done here because queuesClient starts additional goroutines
// Wait for Azure to update stats
time.Sleep(1 * time.Second)
q, err := queuesClient.Get(context.Background(), resourceGroup, namespace, queueName)
if err != nil {
t.Fatal(err)
}
if amc := *q.CountDetails.ActiveMessageCount; amc != 0 {
t.Fatalf("Expected ActiveMessageCount to be 0, but it was %d", amc)
}
if dead := *q.CountDetails.DeadLetterMessageCount; dead > 0 {
t.Fatalf("Expected DeadLetterMessageCount to be 0, but it was %d", dead)
}
})
}
} | explode_data.jsonl/60647 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1564
} | [
2830,
3393,
52464,
27497,
56352,
1155,
353,
8840,
836,
8,
341,
46993,
675,
11,
48094,
2959,
11,
21290,
1669,
501,
2271,
7554,
1155,
11,
330,
41893,
1138,
16867,
21290,
2822,
78216,
1669,
3056,
1235,
341,
197,
29277,
262,
914,
198,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestFingerprintsForLabels(t *testing.T) {
storage, closer := NewTestStorage(t, 1)
defer closer.Close()
samples := make([]*model.Sample, 100)
fingerprints := make(model.Fingerprints, 100)
for i := range samples {
metric := model.Metric{
model.MetricNameLabel: model.LabelValue(fmt.Sprintf("test_metric_%d", i)),
"label1": model.LabelValue(fmt.Sprintf("test_%d", i/10)),
"label2": model.LabelValue(fmt.Sprintf("test_%d", (i+5)/10)),
}
samples[i] = &model.Sample{
Metric: metric,
Timestamp: model.Time(i),
Value: model.SampleValue(i),
}
fingerprints[i] = metric.FastFingerprint()
}
for _, s := range samples {
storage.Append(s)
}
storage.WaitForIndexing()
var matcherTests = []struct {
pairs []model.LabelPair
expected model.Fingerprints
}{
{
pairs: []model.LabelPair{{"label1", "x"}},
expected: fingerprints[:0],
},
{
pairs: []model.LabelPair{{"label1", "test_0"}},
expected: fingerprints[:10],
},
{
pairs: []model.LabelPair{
{"label1", "test_0"},
{"label1", "test_1"},
},
expected: fingerprints[:0],
},
{
pairs: []model.LabelPair{
{"label1", "test_0"},
{"label2", "test_1"},
},
expected: fingerprints[5:10],
},
{
pairs: []model.LabelPair{
{"label1", "test_1"},
{"label2", "test_2"},
},
expected: fingerprints[15:20],
},
}
for _, mt := range matcherTests {
resfps := storage.fingerprintsForLabelPairs(mt.pairs...)
if len(mt.expected) != len(resfps) {
t.Fatalf("expected %d matches for %q, found %d", len(mt.expected), mt.pairs, len(resfps))
}
for fp1 := range resfps {
found := false
for _, fp2 := range mt.expected {
if fp1 == fp2 {
found = true
break
}
}
if !found {
t.Errorf("expected fingerprint %s for %q not in result", fp1, mt.pairs)
}
}
}
} | explode_data.jsonl/34432 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 868
} | [
2830,
3393,
37,
5137,
25738,
2461,
23674,
1155,
353,
8840,
836,
8,
341,
197,
16172,
11,
12128,
1669,
1532,
2271,
5793,
1155,
11,
220,
16,
340,
16867,
12128,
10421,
2822,
1903,
4023,
1669,
1281,
85288,
2528,
76266,
11,
220,
16,
15,
15,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func TestContextClientIP(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest("POST", "/", nil)
c.Request.Header.Set("X-Real-IP", " 10.10.10.10 ")
c.Request.Header.Set("X-Forwarded-For", " 20.20.20.20, 30.30.30.30")
c.Request.Header.Set("X-Appengine-Remote-Addr", "50.50.50.50")
c.Request.RemoteAddr = " 40.40.40.40:42123 "
assert.Equal(t, "20.20.20.20", c.ClientIP())
c.Request.Header.Del("X-Forwarded-For")
assert.Equal(t, "10.10.10.10", c.ClientIP())
c.Request.Header.Set("X-Forwarded-For", "30.30.30.30 ")
assert.Equal(t, "30.30.30.30", c.ClientIP())
c.Request.Header.Del("X-Forwarded-For")
c.Request.Header.Del("X-Real-IP")
c.engine.AppEngine = true
assert.Equal(t, "50.50.50.50", c.ClientIP())
c.Request.Header.Del("X-Appengine-Remote-Addr")
assert.Equal(t, "40.40.40.40", c.ClientIP())
// no port
c.Request.RemoteAddr = "50.50.50.50"
assert.Empty(t, c.ClientIP())
} | explode_data.jsonl/26816 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 426
} | [
2830,
3393,
1972,
2959,
3298,
1155,
353,
8840,
836,
8,
341,
1444,
11,
716,
1669,
4230,
2271,
1972,
73392,
83,
70334,
7121,
47023,
2398,
1444,
9659,
11,
716,
284,
1758,
75274,
445,
2946,
497,
64657,
2092,
692,
1444,
9659,
15753,
4202,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRemoveAllPushNotificationsBuildBody(t *testing.T) {
assert := assert.New(t)
opts := &removeAllPushChannelsForDeviceOpts{
DeviceIDForPush: "deviceId",
PushType: PNPushTypeAPNS,
pubnub: pubnub,
}
_, err := opts.buildBody()
assert.Nil(err)
} | explode_data.jsonl/51099 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 122
} | [
2830,
3393,
13021,
2403,
16644,
34736,
11066,
5444,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
692,
64734,
1669,
609,
5399,
2403,
16644,
35925,
2461,
6985,
43451,
515,
197,
197,
6985,
915,
2461,
16644,
25,
330,
94202,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestISORInterval_String(t *testing.T) {
check := func(exp string, r timeutil.ISORInterval) {
t.Helper()
assert.Equal(t, exp, r.String())
}
check("R0/2022-01-31T00:00:00Z/P1Y", timeutil.ISORInterval{
Repeat: 0,
Start: time.Date(2022, 1, 31, 0, 0, 0, 0, time.UTC),
Period: timeutil.ISODuration{Years: 1},
})
check("R1/2022-01-31T00:00:00Z/P1Y", timeutil.ISORInterval{
Repeat: 1,
Start: time.Date(2022, 1, 31, 0, 0, 0, 0, time.UTC),
Period: timeutil.ISODuration{Years: 1},
})
check("R0/2022-01-31T00:00:00Z/2022-01-31T01:00:00Z", timeutil.ISORInterval{
Repeat: 0,
Start: time.Date(2022, 1, 31, 0, 0, 0, 0, time.UTC),
Period: timeutil.ISODuration{TimePart: time.Hour},
})
check("R1/2022-01-31T00:00:00Z/2022-01-31T02:00:00Z", timeutil.ISORInterval{
Repeat: 1,
Start: time.Date(2022, 1, 31, 0, 0, 0, 0, time.UTC),
Period: timeutil.ISODuration{TimePart: time.Hour},
})
} | explode_data.jsonl/49482 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 445
} | [
2830,
3393,
1637,
868,
10256,
31777,
1155,
353,
8840,
836,
8,
341,
25157,
1669,
2915,
25865,
914,
11,
435,
882,
1314,
11230,
868,
10256,
8,
341,
197,
3244,
69282,
2822,
197,
6948,
12808,
1155,
11,
1343,
11,
435,
6431,
2398,
197,
630,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInvalidRef(t *testing.T) {
tmpDir, err := ioutil.TempDir("", t.Name())
if err != nil {
t.Fatalf("Failed to create directory: %v", err)
}
if os.Getenv("PRESERVE") == "" {
defer os.RemoveAll(tmpDir)
}
log := base.StderrLog()
ts := httptest.NewServer(GitHandler(
tmpDir,
NewGitProtocol(authorize, nil, false, OverallWallTimeHardLimit, fakeInteractiveSettingsCompiler, log),
&base.NoOpMetrics{},
log,
))
defer ts.Close()
problemAlias := "sumas"
{
repo, err := InitRepository(path.Join(tmpDir, problemAlias))
if err != nil {
t.Fatalf("Failed to initialize git repository: %v", err)
}
repo.Free()
}
newOid, packContents := createCommit(
t,
tmpDir,
problemAlias,
&git.Oid{},
map[string]io.Reader{
"settings.json": strings.NewReader(gitservertest.DefaultSettingsJSON),
"cases/0.in": strings.NewReader("1 2"),
"cases/0.out": strings.NewReader("3"),
"statements/es.markdown": strings.NewReader("Sumas"),
},
"Initial commit",
log,
)
push(
t,
tmpDir,
userAuthorization,
problemAlias,
"refs/heads/private",
&git.Oid{}, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/heads/private read-only\n", Err: nil},
},
ts,
)
push(
t,
tmpDir,
userAuthorization,
problemAlias,
"refs/heads/arbitrarybranchname",
&git.Oid{}, newOid,
packContents,
[]githttp.PktLineResponse{
{Line: "unpack ok\n", Err: nil},
{Line: "ng refs/heads/arbitrarybranchname invalid-ref\n", Err: nil},
},
ts,
)
} | explode_data.jsonl/63163 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 712
} | [
2830,
3393,
7928,
3945,
1155,
353,
8840,
836,
8,
341,
20082,
6184,
11,
1848,
1669,
43144,
65009,
6184,
19814,
259,
2967,
2398,
743,
1848,
961,
2092,
341,
197,
3244,
30762,
445,
9408,
311,
1855,
6220,
25,
1018,
85,
497,
1848,
340,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestParseCaseStatementSingle(t *testing.T) {
p := createParser(`case 5:`)
bvmUtils.Assert(t, isCaseStatement(p), "should detect case statement")
parseCaseStatement(p)
} | explode_data.jsonl/49723 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 63
} | [
2830,
3393,
14463,
4207,
8636,
10888,
1155,
353,
8840,
836,
8,
341,
3223,
1669,
1855,
6570,
5809,
5638,
220,
20,
18736,
340,
2233,
7338,
4209,
11711,
1155,
11,
374,
4207,
8636,
1295,
701,
330,
5445,
11140,
1142,
5114,
1138,
75115,
4207,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestDisableCgroup(t *testing.T) {
containerConfig, sandboxConfig, imageConfig, _ := getCreateContainerTestData()
ociRuntime := config.Runtime{}
c := newTestCRIService()
c.config.DisableCgroup = true
spec, err := c.containerSpec("test-id", "sandbox-id", 1234, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
require.NoError(t, err)
t.Log("resource limit should not be set")
assert.Nil(t, spec.Linux.Resources.Memory)
assert.Nil(t, spec.Linux.Resources.CPU)
t.Log("cgroup path should be empty")
assert.Empty(t, spec.Linux.CgroupsPath)
} | explode_data.jsonl/6419 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 194
} | [
2830,
3393,
25479,
34,
4074,
1155,
353,
8840,
836,
8,
341,
53290,
2648,
11,
42754,
2648,
11,
2168,
2648,
11,
716,
1669,
633,
4021,
4502,
83920,
741,
197,
2119,
15123,
1669,
2193,
16706,
16094,
1444,
1669,
501,
2271,
8973,
95125,
741,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestClhSetConfig(t *testing.T) {
assert := assert.New(t)
config, err := newClhConfig()
assert.NoError(err)
clh := &cloudHypervisor{}
assert.Equal(clh.config, HypervisorConfig{})
err = clh.setConfig(&config)
assert.NoError(err)
assert.Equal(clh.config, config)
} | explode_data.jsonl/68507 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 109
} | [
2830,
3393,
5066,
71,
1649,
2648,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
692,
25873,
11,
1848,
1669,
501,
5066,
71,
2648,
741,
6948,
35699,
3964,
692,
39407,
71,
1669,
609,
12361,
39,
1082,
31396,
16094,
6948,
128... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRequireParentDirES6(t *testing.T) {
default_suite.expectBundled(t, bundled{
files: map[string]string{
"/Users/user/project/src/dir/entry.js": `
import value from '..'
console.log(value)
`,
"/Users/user/project/src/index.js": `
export default 123
`,
},
entryPaths: []string{"/Users/user/project/src/dir/entry.js"},
options: config.Options{
Mode: config.ModeBundle,
AbsOutputFile: "/out.js",
},
})
} | explode_data.jsonl/38461 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 205
} | [
2830,
3393,
17959,
8387,
6184,
1570,
21,
1155,
353,
8840,
836,
8,
341,
11940,
57239,
25952,
33,
1241,
832,
1155,
11,
51450,
515,
197,
74075,
25,
2415,
14032,
30953,
515,
298,
197,
3115,
7137,
11739,
40118,
13437,
88226,
14,
4085,
2857,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMultipleInitiators(t *testing.T) {
t.Parallel()
// Scenario: inst1, inst2 and inst3 both start protocol with inst4 at the same time.
// Expected outcome: inst4 successfully transfers state to all of them
peers := make(map[string]*pullTestInstance)
inst1 := newPushPullTestInstance("p1", peers)
inst2 := newPushPullTestInstance("p2", peers)
inst3 := newPushPullTestInstance("p3", peers)
inst4 := newPushPullTestInstance("p4", peers)
defer inst1.stop()
defer inst2.stop()
defer inst3.stop()
defer inst4.stop()
inst4.Add("1", "2", "3", "4")
inst1.setNextPeerSelection([]string{"p4"})
inst2.setNextPeerSelection([]string{"p4"})
inst3.setNextPeerSelection([]string{"p4"})
time.Sleep(time.Duration(2000) * time.Millisecond)
for _, inst := range []*pullTestInstance{inst1, inst2, inst3} {
assert.True(t, util.IndexInSlice(inst.state.ToArray(), "1", Strcmp) != -1)
assert.True(t, util.IndexInSlice(inst.state.ToArray(), "2", Strcmp) != -1)
assert.True(t, util.IndexInSlice(inst.state.ToArray(), "3", Strcmp) != -1)
assert.True(t, util.IndexInSlice(inst.state.ToArray(), "4", Strcmp) != -1)
}
} | explode_data.jsonl/56505 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 434
} | [
2830,
3393,
32089,
3803,
80290,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
197,
322,
58663,
25,
1761,
16,
11,
1761,
17,
323,
1761,
18,
2176,
1191,
11507,
448,
1761,
19,
518,
279,
1852,
882,
624,
197,
322,
31021,
15274,
25... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestWriteConditions(t *testing.T) {
for _, p := range permutations {
permuted := make([]apis.Condition, len(someConditions))
for i, j := range p {
permuted[i] = someConditions[j]
}
buf := &bytes.Buffer{}
dw := printers.NewBarePrefixWriter(buf)
WriteConditions(dw, permuted, false)
assert.Equal(t, normalizeSpace(buf.String()), normalizeSpace(`Conditions:
OK TYPE AGE REASON
++ Ready
++ Aaa
!! Zzz
W Bbb Bad
I Ccc Eh.`))
}
} | explode_data.jsonl/36447 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 181
} | [
2830,
3393,
7985,
35435,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
281,
1669,
2088,
71949,
341,
197,
197,
19913,
2774,
1669,
1281,
10556,
13725,
75134,
11,
2422,
1141,
635,
35435,
1171,
197,
2023,
600,
11,
502,
1669,
2088,
281,
341,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestIsAExitError(t *testing.T) {
var err error
err = &dockerExitError{nil}
_, ok := err.(uexec.ExitError)
if !ok {
t.Error("couldn't cast dockerExitError to exec.ExitError")
}
} | explode_data.jsonl/31160 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 76
} | [
2830,
3393,
3872,
32,
15339,
1454,
1155,
353,
8840,
836,
8,
341,
2405,
1848,
1465,
198,
9859,
284,
609,
28648,
15339,
1454,
90,
8385,
532,
197,
6878,
5394,
1669,
1848,
12832,
84,
11748,
34358,
1454,
340,
743,
753,
562,
341,
197,
3244,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestAnnotatingExecuteKeyRanges(t *testing.T) {
keyspace, shards := setUpSandboxWithTwoShards("TestAnnotatingExecuteKeyRanges")
_, err := rpcVTGate.ExecuteKeyRanges(
context.Background(),
"UPDATE table SET col1=1 WHERE col2>3;",
nil,
keyspace,
[]*topodatapb.KeyRange{{Start: []byte{0x10}, End: []byte{0x40}}},
topodatapb.TabletType_MASTER,
nil,
false,
nil)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
// Keyrange spans both shards.
verifyQueryAnnotatedAsUnfriendly(t, shards[0])
verifyQueryAnnotatedAsUnfriendly(t, shards[1])
} | explode_data.jsonl/7846 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 236
} | [
2830,
3393,
2082,
1921,
1095,
17174,
1592,
74902,
1155,
353,
8840,
836,
8,
341,
23634,
8746,
11,
74110,
1669,
18620,
50,
31536,
2354,
11613,
2016,
2347,
445,
2271,
2082,
1921,
1095,
17174,
1592,
74902,
5130,
197,
6878,
1848,
1669,
35596,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestLocalLoaderReferencingGitBase(t *testing.T) {
topDir := "/whatever"
cloneRoot := topDir + "/someClone"
fSys := filesys.MakeFsInMemory()
fSys.MkdirAll(topDir)
fSys.MkdirAll(cloneRoot + "/foo/base")
root, err := demandDirectoryRoot(fSys, topDir)
if err != nil {
t.Fatalf("unexpected err: %v\n", err)
}
l1 := newLoaderAtConfirmedDir(
RestrictionRootOnly, root, fSys, nil,
git.DoNothingCloner(filesys.ConfirmedDir(cloneRoot)))
if l1.Root() != topDir {
t.Fatalf("unexpected root %s", l1.Root())
}
l2, err := l1.New("github.com/someOrg/someRepo/foo/base")
if err != nil {
t.Fatalf("unexpected err: %v\n", err)
}
if l2.Root() != cloneRoot+"/foo/base" {
t.Fatalf("unexpected root %s", l2.Root())
}
} | explode_data.jsonl/65709 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 310
} | [
2830,
3393,
7319,
9181,
47447,
11373,
46562,
3978,
1155,
353,
8840,
836,
8,
341,
42118,
6184,
1669,
3521,
68286,
698,
197,
19982,
8439,
1669,
1909,
6184,
488,
3521,
14689,
37677,
698,
1166,
32792,
1669,
3542,
1047,
50133,
48300,
641,
1064... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestGetMessage_Get(t *testing.T) {
// Arrange
req, err := http.NewRequest("GET", "https://api.secrethub.io/repos/jdoe/catpictures", nil)
assert.OK(t, err)
req.Header.Set("Date", "Fri, 10 Mar 2017 16:25:54 CET")
expected := "GET\n" +
"\n" +
"Fri, 10 Mar 2017 16:25:54 CET\n" +
"/repos/jdoe/catpictures;"
// Act
result, err := getMessage(req)
assert.OK(t, err)
// Assert
assertMessage(t, expected, string(result))
} | explode_data.jsonl/15486 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 187
} | [
2830,
3393,
1949,
2052,
13614,
1155,
353,
8840,
836,
8,
1476,
197,
322,
40580,
198,
24395,
11,
1848,
1669,
1758,
75274,
445,
3806,
497,
330,
2428,
1110,
2068,
4523,
837,
339,
392,
4245,
49505,
4437,
2982,
68,
92807,
75403,
497,
2092,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestFullyQualifiedId(t *testing.T) {
// the URI is invalid. It should cause an error
invalidUriAcquirer, err := newGitAcquirer(
structs.Source{
Uri: "git@github.com:helm:thing/charts.git//stable/wordpress#master",
})
assert.Nil(t, invalidUriAcquirer)
assert.NotNil(t, err)
// no branch. It should cause an error
invalidUriAcquirer2, err := newGitAcquirer(
structs.Source{
Uri: "git@github.com:helm:thing/charts.git//stable/wordpress",
})
assert.Nil(t, invalidUriAcquirer2)
assert.NotNil(t, err)
tests := []struct {
name string
desc string
input Acquirer
expectValues string
expectError bool
}{
{
name: "good",
desc: "check IDs are generated with expected input",
input: discardErr(newGitAcquirer(
structs.Source{
Uri: "git@github.com:helm/charts.git//stable/wordpress",
Options: map[string]interface{}{
BranchKey: "master",
},
})),
expectValues: "helm-charts-wordpress",
},
{
name: "good_path_leading_trailing_slash",
desc: "check leading/trailing slashes on paths don't affect IDs",
input: discardErr(newGitAcquirer(
structs.Source{
Uri: "git@github.com:helm/charts.git///stable/wordpress/#master",
})),
expectValues: "helm-charts-wordpress",
},
{
name: "good_no_path",
desc: "check missing paths use the git repo",
input: discardErr(newGitAcquirer(
structs.Source{
Uri: "git@github.com:helm/charts.git///#master",
})),
expectValues: "helm-charts",
},
{
name: "good_name_in_id",
desc: "check explicit names are put into IDs",
input: discardErr(newGitAcquirer(
structs.Source{
Id: "site1-values",
Uri: "git@github.com:sugarkube/sugarkube.git//examples/values/wordpress/site1/#master",
})),
expectValues: "sugarkube-sugarkube-site1-values",
},
}
for _, test := range tests {
result, err := test.input.FullyQualifiedId()
if test.expectError {
assert.NotNil(t, err)
assert.Empty(t, result)
} else {
assert.Nil(t, err)
assert.Equal(t, test.expectValues, result, "IDs don't match in: %+v", test.input)
}
}
} | explode_data.jsonl/73141 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 941
} | [
2830,
3393,
67386,
65993,
764,
1155,
353,
8840,
836,
8,
341,
197,
322,
279,
16020,
374,
8318,
13,
1084,
1265,
5240,
458,
1465,
198,
197,
11808,
13899,
11654,
50792,
11,
1848,
1669,
501,
46562,
11654,
50792,
1006,
197,
6472,
82,
30350,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestReaderBloomUsed(t *testing.T) {
// wantActualNegatives is the minimum number of nonsense words (i.e. false
// positives or true negatives) to run through our filter. Some nonsense
// words might be rejected even before the filtering step, if they are out
// of the [minWord, maxWord] range of keys in the table.
wantActualNegatives := 0
for _, s := range nonsenseWords {
if minWord < s && s < maxWord {
wantActualNegatives++
}
}
files := []struct {
path string
comparer *Comparer
}{
{"h.table-bloom.no-compression.sst", nil},
{"h.table-bloom.no-compression.prefix_extractor.no_whole_key_filter.sst", fixtureComparer},
}
for _, tc := range files {
t.Run(tc.path, func(t *testing.T) {
for _, degenerate := range []bool{false, true} {
t.Run(fmt.Sprintf("degenerate=%t", degenerate), func(t *testing.T) {
c := &countingFilterPolicy{
FilterPolicy: bloom.FilterPolicy(10),
degenerate: degenerate,
}
testReader(t, tc.path, tc.comparer, c)
if c.truePositives != len(wordCount) {
t.Errorf("degenerate=%t: true positives: got %d, want %d", degenerate, c.truePositives, len(wordCount))
}
if c.falseNegatives != 0 {
t.Errorf("degenerate=%t: false negatives: got %d, want %d", degenerate, c.falseNegatives, 0)
}
if got := c.falsePositives + c.trueNegatives; got < wantActualNegatives {
t.Errorf("degenerate=%t: actual negatives (false positives + true negatives): "+
"got %d (%d + %d), want >= %d",
degenerate, got, c.falsePositives, c.trueNegatives, wantActualNegatives)
}
if !degenerate {
// The true negative count should be much greater than the false
// positive count.
if c.trueNegatives < 10*c.falsePositives {
t.Errorf("degenerate=%t: true negative to false positive ratio (%d:%d) is too small",
degenerate, c.trueNegatives, c.falsePositives)
}
}
})
}
})
}
} | explode_data.jsonl/40338 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 803
} | [
2830,
3393,
5062,
33,
18474,
22743,
1155,
353,
8840,
836,
8,
341,
197,
322,
1366,
28123,
47800,
5859,
374,
279,
8028,
1372,
315,
40802,
4244,
320,
72,
1734,
13,
895,
198,
197,
322,
63656,
476,
830,
74085,
8,
311,
1598,
1526,
1039,
4... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestBackwardNextDeferPanic(t *testing.T) {
if testBackend != "rr" {
t.Skip("Reverse stepping test needs rr")
}
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 18) {
testseq2(t, "defercall", "", []seqTest{
{contContinue, 12},
{contReverseNext, 11},
{contReverseNext, 10},
{contReverseNext, 9},
{contReverseNext, 27},
{contContinueToBreakpoint, 12}, // skip first call to sampleFunction
{contContinueToBreakpoint, 6}, // go to call to sampleFunction through deferreturn
{contReverseNext, -1}, // runtime.deferreturn, maybe we should try to skip this
{contReverseStepout, 13},
{contReverseNext, 12},
{contReverseNext, 11},
{contReverseNext, 10},
{contReverseNext, 9},
{contReverseNext, 27},
{contContinueToBreakpoint, 18}, // go to panic call
{contNext, 6}, // panic so the deferred call happens
{contReverseNext, 18},
{contReverseNext, 17},
{contReverseNext, 16},
{contReverseNext, 15},
{contReverseNext, 23},
{contReverseNext, 22},
{contReverseNext, 21},
{contReverseNext, 28},
})
} else {
testseq2(t, "defercall", "", []seqTest{
{contContinue, 12},
{contReverseNext, 11},
{contReverseNext, 10},
{contReverseNext, 9},
{contReverseNext, 27},
{contContinueToBreakpoint, 12}, // skip first call to sampleFunction
{contContinueToBreakpoint, 6}, // go to call to sampleFunction through deferreturn
{contReverseNext, 13},
{contReverseNext, 12},
{contReverseNext, 11},
{contReverseNext, 10},
{contReverseNext, 9},
{contReverseNext, 27},
{contContinueToBreakpoint, 18}, // go to panic call
{contNext, 6}, // panic so the deferred call happens
{contReverseNext, 18},
{contReverseNext, 17},
{contReverseNext, 16},
{contReverseNext, 15},
{contReverseNext, 23},
{contReverseNext, 22},
{contReverseNext, 21},
{contReverseNext, 28},
})
}
} | explode_data.jsonl/56345 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 844
} | [
2830,
3393,
3707,
1606,
5847,
1912,
802,
47,
31270,
1155,
353,
8840,
836,
8,
341,
743,
1273,
29699,
961,
330,
634,
1,
341,
197,
3244,
57776,
445,
45695,
35467,
1273,
3880,
34393,
1138,
197,
532,
743,
728,
4366,
35842,
6025,
2195,
2993... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestEncodeBoolValue(t *testing.T) {
testCases := []struct {
Val interface{}
Expected bool
}{
{
Val: true,
Expected: true,
},
{
Val: false,
Expected: false,
},
{
Val: boolPtr(true),
Expected: true,
},
{
Val: boolPtr(false),
Expected: false,
},
}
for i, test := range testCases {
val, err := EncodeValue(test.Val)
if err != nil {
t.Fatalf("Case [%d]: Got unexpected error %s!", i, err)
}
switch cast := val.ValueType.(type) {
case *proto.Data_Value_BoolValue:
if test.Expected != cast.BoolValue {
t.Errorf("Case [%d]: Expected %t, got %t", i, test.Expected, cast.BoolValue)
}
default:
t.Errorf("Case [%d]: Got unexpected type back %+v!", i, cast)
}
}
} | explode_data.jsonl/30506 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 365
} | [
2830,
3393,
32535,
11233,
1130,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
197,
2208,
414,
3749,
16094,
197,
197,
18896,
1807,
198,
197,
59403,
197,
197,
515,
298,
197,
2208,
25,
414,
830,
345,
298,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestUnparsableField(t *testing.T) {
r := stringReader("test")
if _, err := r.ReadSentence(); err == nil || err == io.EOF {
t.Fatal("Parsing a non-numeric value as the token id should fail.")
}
r = stringReader("1 _ _ _ _ _ foo")
if _, err := r.ReadSentence(); err == nil || err == io.EOF {
t.Fatal("Parsing a non-numeric value as the head should fail.")
}
r = stringReader("1 _ _ _ _ _ _ _ foo")
if _, err := r.ReadSentence(); err == nil || err == io.EOF {
t.Fatal("Parsing a non-numeric value as the head should fail.")
}
} | explode_data.jsonl/43879 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 223
} | [
2830,
3393,
1806,
72380,
480,
1877,
1155,
353,
8840,
836,
8,
341,
7000,
1669,
914,
5062,
445,
1944,
1138,
743,
8358,
1848,
1669,
435,
6503,
84564,
2129,
1848,
621,
2092,
1369,
1848,
621,
6399,
86492,
341,
197,
3244,
26133,
445,
68839,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestCompactionOutputLevel(t *testing.T) {
opts := (*Options)(nil).EnsureDefaults()
version := &version{}
datadriven.RunTest(t, "testdata/compaction_output_level",
func(d *datadriven.TestData) (res string) {
defer func() {
if r := recover(); r != nil {
res = fmt.Sprintln(r)
}
}()
switch d.Cmd {
case "compact":
var start, base int
d.ScanArgs(t, "start", &start)
d.ScanArgs(t, "base", &base)
pc := newPickedCompaction(opts, version, start, base)
c := newCompaction(pc, opts, new(uint64))
return fmt.Sprintf("output=%d\nmax-output-file-size=%d\n",
c.outputLevel.level, c.maxOutputFileSize)
default:
return fmt.Sprintf("unknown command: %s", d.Cmd)
}
})
} | explode_data.jsonl/51405 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 325
} | [
2830,
3393,
13552,
1311,
5097,
4449,
1155,
353,
8840,
836,
8,
341,
64734,
1669,
4609,
3798,
2376,
8385,
568,
64439,
16273,
741,
74954,
1669,
609,
4366,
31483,
2698,
266,
14666,
2071,
16708,
2271,
1155,
11,
330,
92425,
14,
5689,
1311,
76... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestBow_WithMetadata(t *testing.T) {
t.Run("adding meta should not modify bow, but correctly change schema", func(t *testing.T) {
metadata := NewMetadata([]string{"testKey"}, []string{"testValue"})
b, _ := NewBow(NewSeries("test", []int64{1, 2}, nil))
res := b.WithMetadata(metadata)
assert.True(t, res.Metadata().Equal(metadata.Metadata),
"expected %q have %q", metadata.String(), b.Metadata().String())
assert.Equal(t, 0, b.Metadata().Len())
assert.Equal(t, 1, res.Metadata().Len())
})
} | explode_data.jsonl/49105 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 190
} | [
2830,
3393,
86692,
62,
2354,
14610,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
2998,
8823,
1265,
537,
5602,
15273,
11,
714,
12440,
2297,
10802,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
2109,
7603,
1669,
1532,
14610,
10556,
9... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestFailure(t *testing.T) {
f := newFixture(t)
defer f.teardown()
t1 := time.Unix(1, 0)
f.resource("foo", "true", t1)
f.step()
f.assertStatus("foo", model.RuntimeStatusOK, 1)
f.assertLogMessage("foo", "Starting cmd true")
err := f.fe.stop("true", 5)
require.NoError(t, err)
f.assertStatus("foo", model.RuntimeStatusError, 1)
f.assertLogMessage("foo", "cmd true exited with code 5")
} | explode_data.jsonl/53808 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 158
} | [
2830,
3393,
17507,
1155,
353,
8840,
836,
8,
341,
1166,
1669,
501,
18930,
1155,
340,
16867,
282,
31853,
37496,
2822,
3244,
16,
1669,
882,
10616,
941,
7,
16,
11,
220,
15,
340,
1166,
24013,
445,
7975,
497,
330,
1866,
497,
259,
16,
340,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLoadV31(t *testing.T) {
actual, err := loadYAML(`
version: "3.1"
services:
foo:
image: busybox
secrets: [super]
secrets:
super:
external: true
`)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, len(actual.Services), 1)
assert.Equal(t, len(actual.Secrets), 1)
} | explode_data.jsonl/36729 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 133
} | [
2830,
3393,
5879,
53,
18,
16,
1155,
353,
8840,
836,
8,
341,
88814,
11,
1848,
1669,
2795,
56,
31102,
61528,
4366,
25,
330,
18,
13,
16,
698,
12779,
510,
220,
15229,
510,
262,
2168,
25,
13028,
2011,
198,
262,
23594,
25,
508,
9522,
92... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestReadConfigEnv(t *testing.T) {
var c TestConfig
os.Setenv("SECOND", "osConfig2")
os.Setenv("THIRD", "osConfig3")
err := GetConfig(&c, "config_test.yml")
assert.Equal(t, nil, err)
assert.Equal(t, "configItem1", c.First)
assert.Equal(t, "osConfig2", c.Second)
assert.Equal(t, "osConfig3", c.Third)
os.Clearenv()
} | explode_data.jsonl/3115 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 144
} | [
2830,
3393,
4418,
2648,
14359,
1155,
353,
8840,
836,
8,
341,
2405,
272,
3393,
2648,
271,
25078,
4202,
3160,
445,
98204,
497,
330,
436,
2648,
17,
1138,
25078,
4202,
3160,
445,
3617,
45180,
497,
330,
436,
2648,
18,
5130,
9859,
1669,
212... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestString(t *testing.T) {
g := &TTY{}
if err := json.Unmarshal([]byte(j), g); err != nil {
t.Fatalf("stty load: %v", err)
}
if g.String() != s {
t.Errorf("GTTY: want '%v', got '%v'", s, g.String())
as := strings.Split(s, " ")
ag := strings.Split(g.String(), " ")
if len(as) != len(ag) {
t.Fatalf("Wrong # elements in gtty: want %d, got %d", len(as), len(ag))
}
for i := range as {
t.Errorf("want %s got %s Same %v", as[i], ag[i], as[i] == ag[i])
}
}
} | explode_data.jsonl/73093 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 237
} | [
2830,
3393,
703,
1155,
353,
8840,
836,
8,
341,
3174,
1669,
609,
55544,
16094,
743,
1848,
1669,
2951,
38097,
10556,
3782,
3325,
701,
342,
1215,
1848,
961,
2092,
341,
197,
3244,
30762,
445,
267,
1881,
2795,
25,
1018,
85,
497,
1848,
340,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestHttpParser_301_response(t *testing.T) {
if testing.Verbose() {
logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"http"})
}
http := HttpModForTests()
data := []byte(
"HTTP/1.1 301 Moved Permanently\r\n" +
"Date: Sun, 29 Sep 2013 16:53:59 GMT\r\n" +
"Server: Apache\r\n" +
"Location: http://www.hotnews.ro/\r\n" +
"Vary: Accept-Encoding\r\n" +
"Content-Length: 290\r\n" +
"Connection: close\r\n" +
"Content-Type: text/html; charset=iso-8859-1\r\n" +
"\r\n" +
"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\r\n" +
"<html><head>\r\n" +
"<title>301 Moved Permanently</title>\r\n" +
"</head><body>\r\n" +
"<h1>Moved Permanently</h1>\r\n" +
"<p>The document has moved <a href=\"http://www.hotnews.ro/\">here</a>.</p>\r\n" +
"<hr>\r\n" +
"<address>Apache Server at hotnews.ro Port 80</address>\r\n" +
"</body></html>")
stream := &HttpStream{data: data, message: new(HttpMessage)}
ok, complete := http.messageParser(stream)
if !ok {
t.Errorf("Parsing returned error")
}
if !complete {
t.Errorf("Expecting a complete message")
}
if stream.message.ContentLength != 290 {
t.Errorf("Expecting content length 290")
}
} | explode_data.jsonl/6841 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 556
} | [
2830,
3393,
2905,
6570,
62,
18,
15,
16,
9655,
1155,
353,
8840,
836,
8,
341,
743,
7497,
42505,
8297,
368,
341,
197,
6725,
79,
5247,
3803,
12531,
79,
36202,
11139,
11,
7342,
895,
11,
830,
11,
3056,
917,
4913,
1254,
23625,
197,
532,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestNewNodeInfoWithMultiTenancy(t *testing.T) {
nodeName := "test-node"
pods := []*v1.Pod{
makeBasePodWithMultiTenancy(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
makeBasePodWithMultiTenancy(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
}
expected := &NodeInfo{
requestedResource: &Resource{
MilliCPU: 300,
Memory: 1524,
EphemeralStorage: 0,
AllowedPodNumber: 0,
ScalarResources: map[v1.ResourceName]int64(nil),
},
nonzeroRequest: &Resource{
MilliCPU: 300,
Memory: 1524,
EphemeralStorage: 0,
AllowedPodNumber: 0,
ScalarResources: map[v1.ResourceName]int64(nil),
},
TransientInfo: NewTransientSchedulerInfo(),
allocatableResource: &Resource{},
generation: 2,
usedPorts: HostPortInfo{
"127.0.0.1": map[ProtocolPort]struct{}{
{Protocol: "TCP", Port: 80}: {},
{Protocol: "TCP", Port: 8080}: {},
},
},
imageStates: map[string]*ImageStateSummary{},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Tenant: "test-te",
Namespace: "node_info_cache_test",
Name: "test-1",
UID: types.UID("test-1"),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("500"),
},
},
ResourcesAllocated: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("500"),
},
Ports: []v1.ContainerPort{
{
HostIP: "127.0.0.1",
HostPort: 80,
Protocol: "TCP",
},
},
},
},
NodeName: nodeName,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Tenant: "test-te",
Namespace: "node_info_cache_test",
Name: "test-2",
UID: types.UID("test-2"),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("200m"),
v1.ResourceMemory: resource.MustParse("1Ki"),
},
},
ResourcesAllocated: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("200m"),
v1.ResourceMemory: resource.MustParse("1Ki"),
},
Ports: []v1.ContainerPort{
{
HostIP: "127.0.0.1",
HostPort: 8080,
Protocol: "TCP",
},
},
},
},
NodeName: nodeName,
},
},
},
}
gen := generation
ni := NewNodeInfo(pods...)
if ni.generation <= gen {
t.Errorf("generation is not incremented. previous: %v, current: %v", gen, ni.generation)
}
for i := range expected.pods {
_ = expected.pods[i].Spec.Workloads()
}
expected.generation = ni.generation
if !reflect.DeepEqual(expected, ni) {
t.Errorf("\nEXPECT: %#v\nACTUAL: %#v\n", expected, ni)
}
} | explode_data.jsonl/20558 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1610
} | [
2830,
3393,
3564,
1955,
1731,
2354,
20358,
32687,
6572,
1155,
353,
8840,
836,
8,
341,
20831,
675,
1669,
330,
1944,
39054,
698,
3223,
29697,
1669,
29838,
85,
16,
88823,
515,
197,
77438,
3978,
23527,
2354,
20358,
32687,
6572,
1155,
11,
76... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestNewPopUp(t *testing.T) {
label := NewLabel("Hi")
pop := NewPopUp(label, test.Canvas())
assert.True(t, pop.Visible())
assert.Equal(t, pop, test.Canvas().Overlay())
} | explode_data.jsonl/65212 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 71
} | [
2830,
3393,
3564,
11598,
2324,
1155,
353,
8840,
836,
8,
341,
29277,
1669,
1532,
2476,
445,
13048,
1138,
74813,
1669,
1532,
11598,
2324,
13345,
11,
1273,
54121,
12367,
6948,
32443,
1155,
11,
2420,
11800,
2398,
6948,
12808,
1155,
11,
2420,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestSpanLinkSlice_RemoveIf(t *testing.T) {
// Test RemoveIf on empty slice
emptySlice := NewSpanLinkSlice()
emptySlice.RemoveIf(func(el SpanLink) bool {
t.Fail()
return false
})
// Test RemoveIf
filtered := generateTestSpanLinkSlice()
pos := 0
filtered.RemoveIf(func(el SpanLink) bool {
pos++
return pos%3 == 0
})
assert.Equal(t, 5, filtered.Len())
} | explode_data.jsonl/63293 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 146
} | [
2830,
3393,
12485,
3939,
33236,
66843,
2679,
1155,
353,
8840,
836,
8,
341,
197,
322,
3393,
10783,
2679,
389,
4287,
15983,
198,
197,
3194,
33236,
1669,
1532,
12485,
3939,
33236,
741,
197,
3194,
33236,
13270,
2679,
18552,
18584,
11903,
3939... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTwoServers(t *testing.T) {
var serverLinkEP, clientLinkEP endpoint
serverLinkEP.remote = append(serverLinkEP.remote, &clientLinkEP)
clientLinkEP.remote = append(clientLinkEP.remote, &serverLinkEP)
serverStack := createTestStack()
addEndpointToStack(t, []tcpip.Address{serverAddr}, testNICID, serverStack, &serverLinkEP)
clientStack := createTestStack()
addEndpointToStack(t, nil, testNICID, clientStack, &clientLinkEP)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if _, err := newEPConnServer(ctx, serverStack, []tcpip.Address{"\xc0\xa8\x03\x02"}, Config{
ServerAddress: "\xc0\xa8\x03\x01",
SubnetMask: "\xff\xff\xff\x00",
Router: []tcpip.Address{"\xc0\xa8\x03\xF0"},
DNS: []tcpip.Address{"\x08\x08\x08\x08"},
LeaseLength: Seconds(30 * 60),
}); err != nil {
t.Fatal(err)
}
if _, err := newEPConnServer(ctx, serverStack, []tcpip.Address{"\xc0\xa8\x04\x02"}, Config{
ServerAddress: "\xc0\xa8\x04\x01",
SubnetMask: "\xff\xff\xff\x00",
Router: []tcpip.Address{"\xc0\xa8\x03\xF0"},
DNS: []tcpip.Address{"\x08\x08\x08\x08"},
LeaseLength: Seconds(30 * 60),
}); err != nil {
t.Fatal(err)
}
c := newZeroJitterClient(clientStack, testNICID, linkAddr1, defaultAcquireTimeout, defaultBackoffTime, defaultRetransTime, nil)
info := c.Info()
if _, err := acquire(ctx, c, t.Name(), &info); err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/20584 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 618
} | [
2830,
3393,
11613,
78139,
1155,
353,
8840,
836,
8,
341,
2405,
3538,
3939,
9197,
11,
2943,
3939,
9197,
14887,
198,
41057,
3939,
9197,
34093,
284,
8737,
21421,
3939,
9197,
34093,
11,
609,
2972,
3939,
9197,
340,
25291,
3939,
9197,
34093,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func Test_ErrCouldNotRollbackMigration_RollbackError(t *testing.T) {
err := ErrCouldNotRollbackMigration{
Name: "some-migration",
RollbackError: errors.New("rollback failed"),
Wrapped: errors.New("the root cause"),
}
assert.Equal(t, "error rolling back after failed migration rollback (some-migration), rollback error: rollback failed (the root cause)", err.Error())
} | explode_data.jsonl/55403 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 135
} | [
2830,
3393,
93623,
12895,
2623,
32355,
1419,
20168,
2568,
965,
1419,
1454,
1155,
353,
8840,
836,
8,
341,
9859,
1669,
15495,
12895,
2623,
32355,
1419,
20168,
515,
197,
21297,
25,
688,
330,
14689,
1448,
5033,
756,
197,
11143,
965,
1419,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRetryDisabledByDefault(t *testing.T) {
if strings.EqualFold(os.Getenv("GRPC_GO_RETRY"), "on") {
return
}
i := -1
ss := &stubServer{
emptyCall: func(context.Context, *testpb.Empty) (*testpb.Empty, error) {
i++
switch i {
case 0:
return nil, status.New(codes.AlreadyExists, "retryable error").Err()
}
return &testpb.Empty{}, nil
},
}
if err := ss.Start([]grpc.ServerOption{}); err != nil {
t.Fatalf("Error starting endpoint server: %v", err)
}
defer ss.Stop()
ss.r.NewServiceConfig(`{
"methodConfig": [{
"name": [{"service": "grpc.testing.TestService"}],
"waitForReady": true,
"retryPolicy": {
"MaxAttempts": 4,
"InitialBackoff": ".01s",
"MaxBackoff": ".01s",
"BackoffMultiplier": 1.0,
"RetryableStatusCodes": [ "ALREADY_EXISTS" ]
}
}]}`)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
for {
if ctx.Err() != nil {
t.Fatalf("Timed out waiting for service config update")
}
if ss.cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil {
break
}
time.Sleep(time.Millisecond)
}
cancel()
testCases := []struct {
code codes.Code
count int
}{
{codes.AlreadyExists, 0},
}
for _, tc := range testCases {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
_, err := ss.client.EmptyCall(ctx, &testpb.Empty{})
cancel()
if status.Code(err) != tc.code {
t.Fatalf("EmptyCall(_, _) = _, %v; want _, <Code() = %v>", err, tc.code)
}
if i != tc.count {
t.Fatalf("i = %v; want %v", i, tc.count)
}
}
} | explode_data.jsonl/64033 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 716
} | [
2830,
3393,
51560,
25907,
1359,
3675,
1155,
353,
8840,
836,
8,
341,
743,
9069,
12808,
75536,
9638,
64883,
445,
8626,
4872,
39622,
77924,
3975,
330,
263,
899,
341,
197,
853,
198,
197,
532,
8230,
1669,
481,
16,
198,
34472,
1669,
609,
59... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFile(t *testing.T) {
is := is.New(t)
var calls int
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
calls++
file, header, err := r.FormFile("file")
is.NoErr(err)
defer file.Close()
is.Equal(header.Filename, "filename.txt")
b, err := ioutil.ReadAll(file)
is.NoErr(err)
is.Equal(string(b), `This is a file`)
_, err = io.WriteString(w, `{"data":{"value":"some data"}}`)
is.NoErr(err)
}))
defer srv.Close()
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
client := NewClient(srv.URL, UseMultipartForm())
f := strings.NewReader(`This is a file`)
req := NewRequest("query {}")
req.File("file", "filename.txt", f)
err := client.Run(ctx, req, nil)
is.NoErr(err)
} | explode_data.jsonl/53427 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 326
} | [
2830,
3393,
1703,
1155,
353,
8840,
836,
8,
341,
19907,
1669,
374,
7121,
1155,
692,
2405,
6738,
526,
198,
1903,
10553,
1669,
54320,
70334,
7121,
5475,
19886,
89164,
18552,
3622,
1758,
37508,
11,
435,
353,
1254,
9659,
8,
341,
197,
1444,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestHandshakeRace(t *testing.T) {
if testing.Short() {
t.Skip("skipping in -short mode")
}
t.Parallel()
// This test races a Read and Write to try and complete a handshake in
// order to provide some evidence that there are no races or deadlocks
// in the handshake locking.
for i := 0; i < 32; i++ {
c, s := localPipe(t)
go func() {
server := Server(s, testConfig)
if err := server.Handshake(); err != nil {
panic(err)
}
var request [1]byte
if n, err := server.Read(request[:]); err != nil || n != 1 {
panic(err)
}
server.Write(request[:])
server.Close()
}()
startWrite := make(chan struct{})
startRead := make(chan struct{})
readDone := make(chan struct{}, 1)
client := Client(c, testConfig)
go func() {
<-startWrite
var request [1]byte
client.Write(request[:])
}()
go func() {
<-startRead
var reply [1]byte
if _, err := io.ReadFull(client, reply[:]); err != nil {
panic(err)
}
c.Close()
readDone <- struct{}{}
}()
if i&1 == 1 {
startWrite <- struct{}{}
startRead <- struct{}{}
} else {
startRead <- struct{}{}
startWrite <- struct{}{}
}
<-readDone
}
} | explode_data.jsonl/27732 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 492
} | [
2830,
3393,
2314,
29661,
55991,
1155,
353,
8840,
836,
8,
341,
743,
7497,
55958,
368,
341,
197,
3244,
57776,
445,
4886,
5654,
304,
481,
8676,
3856,
1138,
197,
532,
3244,
41288,
7957,
741,
197,
322,
1096,
1273,
20588,
264,
4457,
323,
96... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestShouldInvokeRunnersRunMethod(t *testing.T) {
// mock
watcher := NewFsWatcher()
runner := &MockedRunner{}
runner.On("Run", "file.go").Return(nil)
// act
watcher.Subscribers[16] = []Runner{runner}
watcher.notifySubscribers(16, fsnotify.Event{Name: "file.go"})
// assert
runner.AssertCalled(t, "Run", "file.go")
} | explode_data.jsonl/75739 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 131
} | [
2830,
3393,
14996,
17604,
6727,
4972,
6727,
3523,
1155,
353,
8840,
836,
8,
341,
197,
322,
7860,
198,
6692,
28058,
1669,
1532,
48300,
47248,
741,
197,
41736,
1669,
609,
11571,
291,
19486,
16094,
197,
41736,
8071,
445,
6727,
497,
330,
119... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRunnerValidate(t *testing.T) {
store := NewMockStoreIface()
store.ListFunc.SetDefaultReturn([]Migration{
{ID: 1, Introduced: NewVersion(3, 10), Progress: 1, Deprecated: newVersionPtr(3, 11)},
{ID: 1, Introduced: NewVersion(3, 11), Progress: 1, Deprecated: newVersionPtr(3, 13)},
{ID: 1, Introduced: NewVersion(3, 11), Progress: 1, Deprecated: newVersionPtr(3, 12)},
{ID: 1, Introduced: NewVersion(3, 12), Progress: 0},
{ID: 1, Introduced: NewVersion(3, 13), Progress: 0},
}, nil)
runner := newRunner(store, nil, &observation.TestContext)
statusErr := runner.Validate(context.Background(), NewVersion(3, 12), NewVersion(0, 0))
if statusErr != nil {
t.Errorf("unexpected status error: %s ", statusErr)
}
} | explode_data.jsonl/11538 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 268
} | [
2830,
3393,
19486,
17926,
1155,
353,
8840,
836,
8,
341,
57279,
1669,
1532,
11571,
6093,
40,
1564,
741,
57279,
5814,
9626,
4202,
3675,
5598,
10556,
20168,
515,
197,
197,
90,
915,
25,
220,
16,
11,
41689,
25,
1532,
5637,
7,
18,
11,
220... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestCheckFlowLayer(t *testing.T) {
dumpPackets, err := utils.ReadDumpFile("../../godpi_example/dumps/http.cap")
if err != nil {
t.Fatal(err)
}
flow := types.NewFlow()
for packet := range dumpPackets {
packetCopy := packet
flow.AddPacket(packetCopy)
}
noDetections := checkFlowLayer(flow, layers.LayerTypeTCP, func(layer gopacket.Layer) bool {
_, ok := layer.(*layers.TCP)
if !ok {
t.Error("Invalid layer passed to callback")
}
return false
})
if noDetections {
t.Error("Detection returned true when callback only returns false")
}
i := 0
yesDetections := checkFlowLayer(flow, layers.LayerTypeTCP, func(layer gopacket.Layer) bool {
i++
return i == 10
})
if !yesDetections {
t.Error("Detection should have returned true when callback returns true once")
}
} | explode_data.jsonl/61051 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 294
} | [
2830,
3393,
3973,
18878,
9188,
1155,
353,
8840,
836,
8,
341,
2698,
1510,
47,
18382,
11,
1848,
1669,
12439,
6503,
51056,
1703,
36800,
39711,
2493,
39304,
3446,
11793,
15627,
27388,
1138,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestStartCmdContents(t *testing.T) {
startCmd := GetStartCmd()
require.Equal(t, "start", startCmd.Use)
require.Equal(t, "Start orb-server", startCmd.Short)
require.Equal(t, "Start orb-server", startCmd.Long)
checkFlagPropertiesCorrect(t, startCmd, hostURLFlagName, hostURLFlagShorthand, hostURLFlagUsage)
} | explode_data.jsonl/31120 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 111
} | [
2830,
3393,
3479,
15613,
14803,
1155,
353,
8840,
836,
8,
341,
21375,
15613,
1669,
2126,
3479,
15613,
2822,
17957,
12808,
1155,
11,
330,
2468,
497,
1191,
15613,
9046,
340,
17957,
12808,
1155,
11,
330,
3479,
36366,
26300,
497,
1191,
15613,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPassthrough(t *testing.T) {
flag.Set("enable_buffer", "true")
flag.Set("buffer_keyspace_shards", topoproto.KeyspaceShardString(keyspace, shard))
defer resetFlagsForTesting()
b := New()
if retryDone, err := b.WaitForFailoverEnd(context.Background(), keyspace, shard, nil); err != nil || retryDone != nil {
t.Fatalf("requests with no error must never be buffered. err: %v retryDone: %v", err, retryDone)
}
if retryDone, err := b.WaitForFailoverEnd(context.Background(), keyspace, shard, nonFailoverErr); err != nil || retryDone != nil {
t.Fatalf("requests with non-failover errors must never be buffered. err: %v retryDone: %v", err, retryDone)
}
if err := waitForPoolSlots(b, *size); err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/55855 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 271
} | [
2830,
3393,
70911,
86901,
1155,
353,
8840,
836,
8,
341,
30589,
4202,
445,
12552,
7776,
497,
330,
1866,
1138,
30589,
4202,
445,
7573,
3097,
8746,
3712,
2347,
497,
1909,
45926,
983,
37863,
1306,
2016,
567,
703,
4857,
8746,
11,
52069,
1171... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestAliasTags(t *testing.T) {
validate := New()
validate.RegisterAlias("iscoloralias", "hexcolor|rgb|rgba|hsl|hsla")
s := "rgb(255,255,255)"
errs := validate.Var(s, "iscoloralias")
Equal(t, errs, nil)
s = ""
errs = validate.Var(s, "omitempty,iscoloralias")
Equal(t, errs, nil)
s = "rgb(255,255,0)"
errs = validate.Var(s, "iscoloralias,len=5")
NotEqual(t, errs, nil)
AssertError(t, errs, "", "", "", "", "len")
type Test struct {
Color string `validate:"iscoloralias"`
}
tst := &Test{
Color: "#000",
}
errs = validate.Struct(tst)
Equal(t, errs, nil)
tst.Color = "cfvre"
errs = validate.Struct(tst)
NotEqual(t, errs, nil)
AssertError(t, errs, "Test.Color", "Test.Color", "Color", "Color", "iscoloralias")
fe := getError(errs, "Test.Color", "Test.Color")
NotEqual(t, fe, nil)
Equal(t, fe.ActualTag(), "hexcolor|rgb|rgba|hsl|hsla")
validate.RegisterAlias("req", "required,dive,iscoloralias")
arr := []string{"val1", "#fff", "#000"}
errs = validate.Var(arr, "req")
NotEqual(t, errs, nil)
AssertError(t, errs, "[0]", "[0]", "[0]", "[0]", "iscoloralias")
PanicMatches(t, func() { validate.RegisterAlias("exists!", "gt=5,lt=10") }, "Alias 'exists!' either contains restricted characters or is the same as a restricted tag needed for normal operation")
} | explode_data.jsonl/77222 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 548
} | [
2830,
3393,
22720,
15930,
1155,
353,
8840,
836,
8,
1476,
197,
7067,
1669,
1532,
741,
197,
7067,
19983,
22720,
445,
285,
3423,
14956,
497,
330,
17308,
3423,
91,
16509,
91,
20400,
91584,
3226,
91584,
51642,
5130,
1903,
1669,
330,
16509,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestContextRenderHTMLString(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Header("Content-Type", "text/html; charset=utf-8")
c.String(http.StatusCreated, "<html>%s %d</html>", "string", 3)
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "<html>string 3</html>", w.Body.String())
assert.Equal(t, "text/html; charset=utf-8", w.Header().Get("Content-Type"))
} | explode_data.jsonl/26788 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 162
} | [
2830,
3393,
1972,
6750,
5835,
703,
1155,
353,
8840,
836,
8,
341,
6692,
1669,
54320,
70334,
7121,
47023,
741,
1444,
11,
716,
1669,
4230,
2271,
1972,
3622,
692,
1444,
15753,
445,
2762,
10804,
497,
330,
1318,
13739,
26,
11617,
22264,
12,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReplManagerSetReplicationStopped(t *testing.T) {
defer func(saved bool) { *mysqlctl.DisableActiveReparents = saved }(*mysqlctl.DisableActiveReparents)
*mysqlctl.DisableActiveReparents = true
tm := &TabletManager{}
tm.replManager = newReplManager(context.Background(), tm, 100*time.Millisecond)
// DisableActiveReparents == true should result in no-op
*mysqlctl.DisableActiveReparents = true
tm.replManager.setReplicationStopped(true)
assert.False(t, tm.replManager.ticks.Running())
tm.replManager.setReplicationStopped(false)
assert.False(t, tm.replManager.ticks.Running())
*mysqlctl.DisableActiveReparents = false
tm.replManager.setReplicationStopped(false)
assert.True(t, tm.replManager.ticks.Running())
tm.replManager.setReplicationStopped(true)
assert.False(t, tm.replManager.ticks.Running())
} | explode_data.jsonl/77815 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 284
} | [
2830,
3393,
693,
500,
2043,
1649,
18327,
1693,
59803,
1155,
353,
8840,
836,
8,
341,
16867,
2915,
14217,
1807,
8,
314,
353,
12272,
12373,
10166,
480,
5728,
693,
25456,
284,
6781,
335,
4071,
12272,
12373,
10166,
480,
5728,
693,
25456,
340... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTaskPrintWithReplicatedService(t *testing.T) {
quiet := false
trunc := false
noResolve := true
apiClient := &fakeClient{}
cli := test.NewFakeCli(apiClient)
tasks := []swarm.Task{
*Task(TaskServiceID("service-id-foo"), TaskSlot(1)),
}
err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, "{{ .Name }}")
assert.NilError(t, err)
golden.Assert(t, cli.OutBuffer().String(), "task-print-with-replicated-service.golden")
} | explode_data.jsonl/43887 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 178
} | [
2830,
3393,
6262,
8994,
2354,
18327,
13724,
1860,
1155,
353,
8840,
836,
8,
341,
197,
43650,
1669,
895,
198,
25583,
1347,
1669,
895,
198,
72104,
56808,
1669,
830,
198,
54299,
2959,
1669,
609,
30570,
2959,
16094,
86448,
1669,
1273,
7121,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetConnectionDetails(t *testing.T) {
cases := map[string]struct {
queue v1beta1.Queue
want managed.ConnectionDetails
}{
"ValidInstance": {
queue: v1beta1.Queue{
Status: v1beta1.QueueStatus{
AtProvider: v1beta1.QueueObservation{
URL: url,
},
},
},
want: managed.ConnectionDetails{
xpv1.ResourceCredentialsSecretEndpointKey: []byte(url),
},
},
"NilInstance": {
queue: v1beta1.Queue{},
want: nil,
}}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
got := GetConnectionDetails(tc.queue)
if diff := cmp.Diff(tc.want, got); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
})
}
} | explode_data.jsonl/82685 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 309
} | [
2830,
3393,
1949,
4526,
7799,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
2415,
14032,
60,
1235,
341,
197,
46993,
348,
16,
19127,
16,
50251,
198,
197,
50780,
220,
8975,
17463,
7799,
198,
197,
59403,
197,
197,
1,
4088,
2523,
788,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestTemporaryDirectorySymlinkingRunner(t *testing.T) {
ctrl, ctx := gomock.WithContext(context.Background(), t)
buildDirectory, scopeWalker := path.EmptyBuilder.Join(path.VoidScopeWalker)
require.NoError(t, path.Resolve("/worker/build", scopeWalker))
t.Run("InvalidTemporaryDirectory", func(t *testing.T) {
// The temporary directory path provided by bb_worker is
// invalid. This should cause the symbolic link creation
// to fail.
baseRunner := mock.NewMockRunner(ctrl)
runner := runner.NewTemporaryDirectorySymlinkingRunner(baseRunner, "/hello", buildDirectory)
_, err := runner.Run(ctx, &runner_pb.RunRequest{
Arguments: []string{"cc", "-o", "hello.o", "hello.c"},
WorkingDirectory: "a/root/subdir",
StdoutPath: "a/stdout",
StderrPath: "a/stderr",
InputRootDirectory: "a/root",
TemporaryDirectory: "a/\x00tmp",
})
require.Equal(t, status.Error(codes.InvalidArgument, "Failed to resolve temporary directory: Path contains a null byte"), err)
})
t.Run("InvalidSymlinkPath", func(t *testing.T) {
// Failures to replace the provided path with a symbolic
// link should be propagated.
baseRunner := mock.NewMockRunner(ctrl)
runner := runner.NewTemporaryDirectorySymlinkingRunner(baseRunner, "/", buildDirectory)
_, err := runner.Run(ctx, &runner_pb.RunRequest{
Arguments: []string{"cc", "-o", "hello.o", "hello.c"},
WorkingDirectory: "a/root/subdir",
StdoutPath: "a/stdout",
StderrPath: "a/stderr",
InputRootDirectory: "a/root",
TemporaryDirectory: "a/tmp",
})
testutil.RequirePrefixedStatus(t, status.Error(codes.Internal, "Failed to remove symbolic link \"/\": "), err)
})
t.Run("Success", func(t *testing.T) {
// Successfully replace the provided path with a
// symbolic link. The execution request should be
// forwarded to the underlying Runner. The symbolic link
// should have the right contents.
request := &runner_pb.RunRequest{
Arguments: []string{"cc", "-o", "hello.o", "hello.c"},
WorkingDirectory: "a/root/subdir",
StdoutPath: "a/stdout",
StderrPath: "a/stderr",
InputRootDirectory: "a/root",
TemporaryDirectory: "a/tmp",
}
response := &runner_pb.RunResponse{
ExitCode: 123,
}
baseRunner := mock.NewMockRunner(ctrl)
baseRunner.EXPECT().Run(ctx, testutil.EqProto(t, request)).Return(response, nil)
symlinkPath := filepath.Join(t.TempDir(), "symlink")
runner := runner.NewTemporaryDirectorySymlinkingRunner(baseRunner, symlinkPath, buildDirectory)
observedResponse, err := runner.Run(ctx, request)
require.NoError(t, err)
testutil.RequireEqualProto(t, response, observedResponse)
observedSymlinkPath, err := os.Readlink(symlinkPath)
require.NoError(t, err)
require.Equal(t, "/worker/build/a/tmp", observedSymlinkPath)
})
} | explode_data.jsonl/53871 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1079
} | [
2830,
3393,
59362,
9310,
34667,
1014,
15736,
19486,
1155,
353,
8840,
836,
8,
341,
84381,
11,
5635,
1669,
342,
316,
1176,
26124,
1972,
5378,
19047,
1507,
259,
692,
69371,
9310,
11,
6891,
84892,
1669,
1815,
11180,
3297,
22363,
5581,
21710,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSubquery(t *testing.T) {
ctx := getContext()
people := model.QueryPeople(ctx).
Alias("manager_count",
model.QueryProjects(ctx).
Alias("", Count(node.Project().ManagerID())).
Where(Equal(node.Project().ManagerID(), node.Person().ID())).
Subquery()).
Where(Equal(node.Person().LastName(), "Wolfe")).
Load()
assert.Equal(t, 2, people[0].GetAlias("manager_count").Int(), "Karen Wolfe manages 2 projects.")
} | explode_data.jsonl/26327 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 161
} | [
2830,
3393,
3136,
1631,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
66175,
741,
197,
16069,
1669,
1614,
15685,
15919,
7502,
4292,
197,
197,
22720,
445,
13297,
3180,
756,
298,
19727,
15685,
29958,
7502,
4292,
571,
197,
22720,
19814,
4504,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestABCIValidatorWithoutPubKey(t *testing.T) {
pkBLS := bls12381.GenPrivKey().PubKey()
abciVal := TM2PB.Validator(NewValidator(pkBLS, 10))
// pubkey must be nil
tmValExpected := abci.Validator{
Address: pkBLS.Address(),
Power: 10,
}
assert.Equal(t, tmValExpected, abciVal)
} | explode_data.jsonl/65078 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 123
} | [
2830,
3393,
1867,
11237,
14256,
26040,
29162,
1592,
1155,
353,
8840,
836,
8,
341,
3223,
81524,
7268,
1669,
1501,
82,
16,
17,
18,
23,
16,
65384,
32124,
1592,
1005,
29162,
1592,
2822,
197,
370,
5855,
2208,
1669,
23975,
17,
40637,
13,
14... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestWalletSetDefault(t *testing.T) {
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletSetDefault))
defer done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
addr, err := address.NewIDAddress(1234)
assert.NoError(t, err)
mockApi.EXPECT().WalletSetDefault(ctx, addr).Return(nil)
//stm: @CLI_WALLET_SET_DEFAULT_001
err = app.Run([]string{"wallet", "set-default", "f01234"})
assert.NoError(t, err)
} | explode_data.jsonl/8254 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 177
} | [
2830,
3393,
38259,
1649,
3675,
1155,
353,
8840,
836,
8,
341,
28236,
11,
7860,
6563,
11,
8358,
2814,
1669,
1532,
11571,
2164,
2354,
9432,
7082,
1155,
11,
3085,
6746,
445,
35735,
497,
15085,
1649,
3675,
1171,
16867,
2814,
2822,
20985,
11,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetCnameForRollout(t *testing.T) {
nameSuffix := "global"
identifier := "identity"
identifierVal := "COMPANY.platform.server"
testCases := []struct {
name string
rollout argo.Rollout
expected string
}{
{
name: "should return valid cname (from label)",
rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}}},
expected: strings.ToLower("stage." + identifierVal + ".global"),
}, {
name: "should return valid cname (from label)- case sensitive cname annotation enabled",
rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{"admiral.io/cname-case-sensitive": "true"}, Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}}},
expected: "stage." + identifierVal + ".global",
}, {
name: "should return valid cname (from label)- case sensitive cname annotation disabled",
rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{"admiral.io/cname-case-sensitive": "false"}, Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}}},
expected: strings.ToLower("stage." + identifierVal + ".global"),
},
{
name: "should return valid cname (from annotation)",
rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal}, Labels: map[string]string{"env": "stage"}}}}},
expected: strings.ToLower("stage." + identifierVal + ".global"),
},
{
name: "should return empty string",
rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"env": "stage"}}}}},
expected: "",
},
}
for _, c := range testCases {
t.Run(c.name, func(t *testing.T) {
cname := GetCnameForRollout(&c.rollout, identifier, nameSuffix)
if !(cname == c.expected) {
t.Errorf("Wanted Cname: %s, got: %s", c.expected, cname)
}
})
}
} | explode_data.jsonl/73047 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 816
} | [
2830,
3393,
1949,
34,
606,
2461,
32355,
411,
1155,
353,
8840,
836,
8,
1476,
11609,
40177,
1669,
330,
9752,
698,
197,
15909,
1669,
330,
16912,
698,
197,
15909,
2208,
1669,
330,
23707,
17293,
24695,
12638,
1837,
18185,
37302,
1669,
3056,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGetSpotKline(t *testing.T) {
t.Parallel()
_, err := b.GetSpotKline(context.Background(),
&KlinesRequestParams{
Symbol: currency.NewPair(currency.BTC, currency.USDT),
Interval: kline.FiveMin.Short(),
Limit: 24,
StartTime: time.Unix(1577836800, 0),
EndTime: time.Unix(1580515200, 0),
})
if err != nil {
t.Error("Binance GetSpotKline() error", err)
}
} | explode_data.jsonl/76647 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 173
} | [
2830,
3393,
1949,
47049,
42,
1056,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
197,
6878,
1848,
1669,
293,
2234,
47049,
42,
1056,
5378,
19047,
3148,
197,
197,
5,
42,
7969,
1900,
4870,
515,
298,
7568,
4001,
25,
262,
11413,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestError(t *testing.T) {
_, err := NewHTTPSListener("./httpcert/nofile.pem", "./httpcert/nofile.pem", ":12341")
if err == nil {
t.Error("NewHTTPSListener should return error with unexisted file path\n")
}
_, err = NewHTTPSListener("./httpcert/cert.pem", "./httpcert/key.pem", "error address")
if err == nil {
t.Error("NewHTTPSListener should return error with error address\n")
}
} | explode_data.jsonl/22527 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 150
} | [
2830,
3393,
1454,
1155,
353,
8840,
836,
8,
341,
197,
6878,
1848,
1669,
1532,
82354,
2743,
13988,
1254,
12246,
9612,
65273,
49373,
497,
5924,
1254,
12246,
9612,
65273,
49373,
497,
13022,
16,
17,
18,
19,
16,
1138,
743,
1848,
621,
2092,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestValidateSupportedVersion(t *testing.T) {
tests := []struct {
gv schema.GroupVersion
allowDeprecated bool
expectedErr bool
}{
{
gv: schema.GroupVersion{
Group: KubeadmGroupName,
Version: "v1alpha1",
},
expectedErr: true,
},
{
gv: schema.GroupVersion{
Group: KubeadmGroupName,
Version: "v1alpha2",
},
expectedErr: true,
},
{
gv: schema.GroupVersion{
Group: KubeadmGroupName,
Version: "v1alpha3",
},
expectedErr: true,
},
{
gv: schema.GroupVersion{
Group: KubeadmGroupName,
Version: "v1beta1",
},
expectedErr: true,
},
{
gv: schema.GroupVersion{
Group: KubeadmGroupName,
Version: "v1beta2",
},
},
{
gv: schema.GroupVersion{
Group: KubeadmGroupName,
Version: "v1beta3",
},
},
{
gv: schema.GroupVersion{
Group: "foo.k8s.io",
Version: "v1",
},
},
}
for _, rt := range tests {
t.Run(fmt.Sprintf("%s/allowDeprecated:%t", rt.gv, rt.allowDeprecated), func(t *testing.T) {
err := validateSupportedVersion(rt.gv, rt.allowDeprecated)
if rt.expectedErr && err == nil {
t.Error("unexpected success")
} else if !rt.expectedErr && err != nil {
t.Errorf("unexpected failure: %v", err)
}
})
}
} | explode_data.jsonl/10225 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 637
} | [
2830,
3393,
17926,
34636,
5637,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
3174,
85,
1060,
10802,
5407,
5637,
198,
197,
197,
7183,
51344,
1807,
198,
197,
42400,
7747,
257,
1807,
198,
197,
59403,
197,
197,
515,
29... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestAllocateIP(t *testing.T) {
tests := []struct {
name string
ipPools []*antreacrds.ExternalIPPool
allocatedIP []struct {
ip string
pool string
}
allocateFrom string
expectedIP string
expectError bool
expectedIPPoolStatus []antreacrds.ExternalIPPoolUsage
}{
{
name: "allocate from proper IP pool",
ipPools: []*antreacrds.ExternalIPPool{
newExternalIPPool("eip1", "", "10.10.10.2", "10.10.10.3"),
},
allocatedIP: nil,
allocateFrom: "eip1",
expectedIP: "10.10.10.2",
expectError: false,
expectedIPPoolStatus: []antreacrds.ExternalIPPoolUsage{
{Total: 2, Used: 1},
},
},
{
name: "allocate from exhausted IP pool",
ipPools: []*antreacrds.ExternalIPPool{
newExternalIPPool("eip1", "", "10.10.10.2", "10.10.10.3"),
},
allocatedIP: []struct {
ip string
pool string
}{
{"10.10.10.2", "eip1"},
{"10.10.10.3", "eip1"},
},
allocateFrom: "eip1",
expectedIP: "",
expectError: true,
expectedIPPoolStatus: []antreacrds.ExternalIPPoolUsage{
{Total: 2, Used: 2},
},
},
{
name: "allocate from non existing IP pool",
ipPools: []*antreacrds.ExternalIPPool{
newExternalIPPool("eip1", "", "10.10.10.2", "10.10.10.3"),
},
allocatedIP: nil,
allocateFrom: "eip2",
expectedIP: "",
expectError: true,
expectedIPPoolStatus: []antreacrds.ExternalIPPoolUsage{
{Total: 2, Used: 0},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
stopCh := make(chan struct{})
defer close(stopCh)
var fakeCRDObjects []runtime.Object
for _, p := range tt.ipPools {
fakeCRDObjects = append(fakeCRDObjects, p)
}
controller := newController(fakeCRDObjects)
controller.crdInformerFactory.Start(stopCh)
controller.crdInformerFactory.WaitForCacheSync(stopCh)
go controller.Run(stopCh)
require.True(t, cache.WaitForCacheSync(stopCh, controller.HasSynced))
for _, alloc := range tt.allocatedIP {
require.NoError(t, controller.UpdateIPAllocation(alloc.pool, net.ParseIP(alloc.ip)))
}
ipGot, err := controller.AllocateIPFromPool(tt.allocateFrom)
assert.Equal(t, tt.expectError, err != nil)
assert.Equal(t, net.ParseIP(tt.expectedIP), ipGot)
for idx, pool := range tt.ipPools {
checkExternalIPPoolStatus(t, controller, pool.Name, tt.expectedIPPoolStatus[idx])
}
})
}
} | explode_data.jsonl/10257 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1113
} | [
2830,
3393,
75380,
3298,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
286,
914,
198,
197,
46531,
47,
6178,
257,
29838,
517,
265,
64748,
5356,
5121,
15342,
3298,
10551,
198,
197,
197,
57372,
3298,
3056,
1235,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestConfig_DeleteSyncResources(t *testing.T) {
log.Info("Running test: Cancel the expectations when sync only resource gets deleted")
g := gomega.NewGomegaWithT(t)
// setup the Manager and Controller. Wrap the Controller Reconcile function so it writes each request to a
// channel when it is finished.
mgr, wm := setupManager(t)
c := testclient.NewRetryClient(mgr.GetClient())
// create the Config object and expect the Reconcile to be created when controller starts
instance := &configv1alpha1.Config{
ObjectMeta: metav1.ObjectMeta{
Name: "config",
Namespace: "gatekeeper-system",
Finalizers: []string{finalizerName},
},
Spec: configv1alpha1.ConfigSpec{
Sync: configv1alpha1.Sync{
SyncOnly: []configv1alpha1.SyncOnlyEntry{
{Group: "", Version: "v1", Kind: "Pod"},
},
},
},
}
err := c.Create(context.TODO(), instance)
g.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
err = c.Delete(context.TODO(), instance)
g.Expect(err).NotTo(gomega.HaveOccurred())
}()
// create the pod that is a sync only resource in config obj
pod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "default",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "nginx",
Image: "nginx",
},
},
},
}
g.Expect(c.Create(context.TODO(), pod)).NotTo(gomega.HaveOccurred())
// set up tracker
tracker, err := readiness.SetupTracker(mgr, false)
g.Expect(err).NotTo(gomega.HaveOccurred())
// events channel will be used to receive events from dynamic watches
events := make(chan event.GenericEvent, 1024)
// set up controller and add it to the manager
err = setupController(mgr, wm, tracker, events)
g.Expect(err).NotTo(gomega.HaveOccurred())
// start manager that will start tracker and controller
ctx, cancelFunc := context.WithCancel(context.Background())
mgrStopped := StartTestManager(ctx, mgr, g)
once := gosync.Once{}
defer func() {
once.Do(func() {
cancelFunc()
mgrStopped.Wait()
})
}()
gvk := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}
// get the object tracker for the synconly pod resource
tr, ok := tracker.ForData(gvk).(testExpectations)
if !ok {
t.Fatalf("unexpected tracker, got %T", tr)
}
// ensure that expectations are set for the constraint gvk
g.Eventually(func() bool {
return tr.IsExpecting(gvk, types.NamespacedName{Name: "testpod", Namespace: "default"})
}, timeout).Should(gomega.BeTrue())
// delete the pod , the delete event will be reconciled by sync controller
// to cancel the expectation set for it by tracker
g.Expect(c.Delete(context.TODO(), pod)).NotTo(gomega.HaveOccurred())
// register events for the pod to go in the event channel
podObj := &corev1.Pod{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "default",
},
}
events <- event.GenericEvent{
Object: podObj,
}
// check readiness tracker is satisfied post-reconcile
g.Eventually(func() bool {
return tracker.ForData(gvk).Satisfied()
}, timeout).Should(gomega.BeTrue())
} | explode_data.jsonl/66489 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1230
} | [
2830,
3393,
2648,
57418,
12154,
11277,
1155,
353,
8840,
836,
8,
341,
6725,
20132,
445,
18990,
1273,
25,
23542,
279,
16665,
979,
12811,
1172,
5101,
5221,
11062,
5130,
3174,
1669,
342,
32696,
7121,
38,
32696,
2354,
51,
1155,
692,
197,
322... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCreateFilesystemFailure(t *testing.T) {
clientFunc := func(client RookRestClient) (interface{}, error) {
return client.CreateFilesystem(model.FilesystemRequest{Name: "myfs1"})
}
verifyFunc := getStringVerifyFunc(t)
ClientFailureHelperWithVerification(t, clientFunc, verifyFunc)
} | explode_data.jsonl/27859 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 99
} | [
2830,
3393,
4021,
1703,
8948,
17507,
1155,
353,
8840,
836,
8,
341,
25291,
9626,
1669,
2915,
12805,
431,
1941,
12416,
2959,
8,
320,
4970,
22655,
1465,
8,
341,
197,
853,
2943,
7251,
1703,
8948,
7635,
52009,
612,
1900,
63121,
25,
330,
24... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestJobsTableProgressFamily(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.TODO()
s, db, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop(ctx)
var table, schema string
sqlutils.MakeSQLRunner(db).QueryRow(t, `SHOW CREATE system.jobs`).Scan(&table, &schema)
if !strings.Contains(schema, `FAMILY progress (progress)`) {
t.Fatalf("expected progress family, got %q", schema)
}
} | explode_data.jsonl/24125 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 162
} | [
2830,
3393,
40667,
2556,
9496,
15192,
1155,
353,
8840,
836,
8,
341,
16867,
23352,
1944,
36892,
2271,
1155,
8,
2822,
20985,
1669,
2266,
90988,
741,
1903,
11,
2927,
11,
716,
1669,
3538,
6031,
12101,
5475,
1155,
11,
2331,
8787,
5475,
4117,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestFilePutOperation(t *testing.T) {
tcs := []struct {
want string
values url.Values
header http.Header
}{
// https://github.com/elastic/apm/blob/main/specs/agents/tracing-instrumentation-azure.md#determining-operations
{
want: "Copy",
header: http.Header{"x-ms-copy-source": []string{}},
},
{
want: "Abort",
header: http.Header{"x-ms-copy-action:abort": []string{}},
},
{
want: "Create",
values: url.Values{"restype": []string{"directory"}},
},
{
want: "Upload",
values: url.Values{"comp": []string{"range"}},
},
{
want: "CloseHandles",
values: url.Values{"comp": []string{"forceclosehandles"}},
},
{
want: "Lease",
values: url.Values{"comp": []string{"lease"}},
},
{
want: "Snapshot",
values: url.Values{"comp": []string{"snapshot"}},
},
{
want: "Undelete",
values: url.Values{"comp": []string{"undelete"}},
},
{
want: "SetAcl",
values: url.Values{"comp": []string{"acl"}},
},
{
want: "SetPermission",
values: url.Values{"comp": []string{"filepermission"}},
},
{
want: "SetMetadata",
values: url.Values{"comp": []string{"metadata"}},
},
{
want: "SetProperties",
values: url.Values{"comp": []string{"properties"}},
},
}
f := new(fileRPC)
for _, tc := range tcs {
assert.Equal(t, tc.want, f.putOperation(tc.values, tc.header))
}
} | explode_data.jsonl/35221 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 626
} | [
2830,
3393,
1703,
19103,
8432,
1155,
353,
8840,
836,
8,
341,
3244,
4837,
1669,
3056,
1235,
341,
197,
50780,
256,
914,
198,
197,
45939,
2515,
35145,
198,
197,
20883,
1758,
15753,
198,
197,
59403,
197,
197,
322,
3703,
1110,
5204,
905,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestCommand_exec(t *testing.T) {
tests := []struct {
name string
expectedExitCode int
}{
{
name: "Success",
expectedExitCode: command.Success,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
c := &Command{
ui: ui.NewNop(),
}
exitCode := c.exec()
assert.Equal(t, tc.expectedExitCode, exitCode)
})
}
} | explode_data.jsonl/13409 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 189
} | [
2830,
3393,
4062,
18430,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
1797,
914,
198,
197,
42400,
15339,
2078,
526,
198,
197,
59403,
197,
197,
515,
298,
11609,
25,
1797,
330,
7188,
756,
298,
42400,
15339,
20... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestParsePKIXPublicKey(t *testing.T) {
block, _ := pem.Decode([]byte(pemPublicKey))
pub, err := ParsePKIXPublicKey(block.Bytes)
if err != nil {
t.Errorf("Failed to parse RSA public key: %s", err)
return
}
rsaPub, ok := pub.(*rsa.PublicKey)
if !ok {
t.Errorf("Value returned from ParsePKIXPublicKey was not an RSA public key")
return
}
pubBytes2, err := MarshalPKIXPublicKey(rsaPub)
if err != nil {
t.Errorf("Failed to marshal RSA public key for the second time: %s", err)
return
}
if !bytes.Equal(pubBytes2, block.Bytes) {
t.Errorf("Reserialization of public key didn't match. got %x, want %x", pubBytes2, block.Bytes)
}
} | explode_data.jsonl/56542 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 257
} | [
2830,
3393,
14463,
22242,
5396,
61822,
1155,
353,
8840,
836,
8,
341,
47996,
11,
716,
1669,
54184,
56372,
10556,
3782,
1295,
336,
61822,
1171,
62529,
11,
1848,
1669,
14775,
22242,
5396,
61822,
18682,
36868,
340,
743,
1848,
961,
2092,
341,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestPostCreation(t *testing.T) {
ctx := context.Background()
client := enttest.Open(t, "sqlite3", "file:ent?mode=memory&cache=shared&_fk=1")
defer client.Close()
client.Card.Use(hook.On(func(next ent.Mutator) ent.Mutator {
return hook.CardFunc(func(ctx context.Context, m *ent.CardMutation) (ent.Value, error) {
id, exists := m.ID()
require.False(t, exists, "id should not exist pre mutation")
require.Zero(t, id)
value, err := next.Mutate(ctx, m)
if err != nil {
return nil, err
}
id, exists = m.ID()
require.True(t, exists, "id should exist post mutation")
require.NotZero(t, id)
require.True(t, id == value.(*ent.Card).ID)
return value, nil
})
}, ent.OpCreate))
client.Card.Create().SetNumber("12345").SetName("a8m").SaveX(ctx)
client.Card.CreateBulk(client.Card.Create().SetNumber("12345")).SaveX(ctx)
} | explode_data.jsonl/36050 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 347
} | [
2830,
3393,
4133,
32701,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
2266,
19047,
741,
25291,
1669,
1197,
1944,
12953,
1155,
11,
330,
37042,
18,
497,
330,
1192,
25,
306,
30,
8516,
28,
17269,
5,
9360,
28,
6100,
85047,
41718,
28,
16,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestAPIReposGitCommitList(t *testing.T) {
defer prepareTestEnv(t)()
user := models.AssertExistsAndLoadBean(t, &models.User{ID: 2}).(*models.User)
// Login as User2.
session := loginUser(t, user.Name)
token := getTokenForLoggedInUser(t, session)
// Test getting commits (Page 1)
req := NewRequestf(t, "GET", "/api/v1/repos/%s/repo16/commits?token="+token, user.Name)
resp := session.MakeRequest(t, req, http.StatusOK)
var apiData []api.Commit
DecodeJSON(t, resp, &apiData)
assert.Equal(t, 3, len(apiData))
assert.Equal(t, "69554a64c1e6030f051e5c3f94bfbd773cd6a324", apiData[0].CommitMeta.SHA)
assert.Equal(t, "27566bd5738fc8b4e3fef3c5e72cce608537bd95", apiData[1].CommitMeta.SHA)
assert.Equal(t, "5099b81332712fe655e34e8dd63574f503f61811", apiData[2].CommitMeta.SHA)
} | explode_data.jsonl/2218 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 348
} | [
2830,
3393,
7082,
693,
966,
46562,
33441,
852,
1155,
353,
8840,
836,
8,
341,
16867,
10549,
2271,
14359,
1155,
8,
741,
19060,
1669,
4119,
11711,
15575,
3036,
5879,
10437,
1155,
11,
609,
6507,
7344,
90,
915,
25,
220,
17,
16630,
4071,
65... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_JoinEq_CustomComparer(t *testing.T) {
outer := NewOnSliceEn("ABCxxx", "abcyyy", "defzzz", "ghizzz")
inner := NewOnSliceEn("000abc", "111gHi", "222333")
got, _ := JoinEq(outer, inner,
func(oel string) string { return oel[:3] },
func(iel string) string { return iel[3:] },
func(oel, iel string) string { return oel + ":" + iel },
CaseInsensitiveEqualer,
)
want := NewOnSliceEn("ABCxxx:000abc", "abcyyy:000abc", "ghizzz:111gHi")
if !SequenceEqualMust(got, want) {
got.Reset()
want.Reset()
t.Errorf("JoinEq_CustomComparer = '%v', want '%v'", String(got), String(want))
}
} | explode_data.jsonl/67375 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 259
} | [
2830,
3393,
10598,
1961,
27312,
57402,
31942,
1155,
353,
8840,
836,
8,
341,
197,
2676,
1669,
1532,
1925,
33236,
1702,
445,
25411,
24048,
497,
330,
13683,
39319,
497,
330,
750,
10400,
89,
497,
330,
866,
8759,
89,
1138,
197,
4382,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAccAWSS3BucketObject_NonVersioned(t *testing.T) {
sourceInitial := testAccAWSS3BucketObjectCreateTempFile(t, "initial object state")
defer os.Remove(sourceInitial)
var originalObj s3.GetObjectOutput
resourceName := "aws_s3_bucket_object.object"
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccAssumeRoleARNPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSS3BucketObjectDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSS3BucketObjectConfig_NonVersioned(acctest.RandInt(), sourceInitial),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketObjectExists(resourceName, &originalObj),
testAccCheckAWSS3BucketObjectBody(&originalObj, "initial object state"),
resource.TestCheckResourceAttr(resourceName, "version_id", ""),
),
},
},
})
} | explode_data.jsonl/64959 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 322
} | [
2830,
3393,
14603,
14419,
1220,
18,
36018,
1190,
1604,
263,
5637,
291,
1155,
353,
8840,
836,
8,
341,
47418,
6341,
1669,
1273,
14603,
14419,
1220,
18,
36018,
1190,
4021,
12151,
1703,
1155,
11,
330,
9426,
1633,
1584,
1138,
16867,
2643,
13... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAnalyzer_AnalyzerVersions(t *testing.T) {
tests := []struct {
name string
disabled []analyzer.Type
want map[string]int
}{
{
name: "happy path",
disabled: []analyzer.Type{},
want: map[string]int{
"alpine": 1,
"amazon": 1,
"apk": 1,
"bundler": 1,
"cargo": 1,
"centos": 1,
"rocky": 1,
"alma": 1,
"composer": 1,
"debian": 1,
"dpkg": 2,
"fedora": 1,
"gobinary": 1,
"gomod": 1,
"jar": 1,
"node-pkg": 1,
"npm": 1,
"nuget": 2,
"oracle": 1,
"photon": 1,
"pip": 1,
"pipenv": 1,
"poetry": 1,
"redhat": 1,
"rpm": 1,
"suse": 1,
"ubuntu": 1,
"yarn": 1,
"python-pkg": 1,
"gemspec": 1,
},
},
{
name: "disable analyzers",
disabled: []analyzer.Type{analyzer.TypeAlpine, analyzer.TypeUbuntu},
want: map[string]int{
"alpine": 0,
"amazon": 1,
"apk": 1,
"bundler": 1,
"cargo": 1,
"centos": 1,
"rocky": 1,
"alma": 1,
"composer": 1,
"debian": 1,
"dpkg": 2,
"fedora": 1,
"gobinary": 1,
"gomod": 1,
"jar": 1,
"node-pkg": 1,
"npm": 1,
"nuget": 2,
"oracle": 1,
"photon": 1,
"pip": 1,
"pipenv": 1,
"poetry": 1,
"redhat": 1,
"rpm": 1,
"suse": 1,
"ubuntu": 0,
"yarn": 1,
"python-pkg": 1,
"gemspec": 1,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
a := analyzer.NewAnalyzer(tt.disabled)
got := a.AnalyzerVersions()
fmt.Printf("%v\n", got)
assert.Equal(t, tt.want, got)
})
}
} | explode_data.jsonl/39286 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1214
} | [
2830,
3393,
54911,
32699,
27165,
69015,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
257,
914,
198,
197,
34597,
2312,
3056,
276,
27165,
10184,
198,
197,
50780,
257,
2415,
14032,
63025,
198,
197,
59403,
197,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestFetchParent(t *testing.T) {
tests := []struct {
name string
remoteChain bool
remoteLog bool
localEntries []*PaymentDescriptor
remoteEntries []*PaymentDescriptor
// parentIndex is the parent index of the entry that we will
// lookup with fetch parent.
parentIndex uint64
// expectErr indicates that we expect fetch parent to fail.
expectErr bool
// expectedIndex is the htlc index that we expect the parent
// to have.
expectedIndex uint64
}{
{
name: "not found in remote log",
localEntries: nil,
remoteEntries: nil,
remoteChain: true,
remoteLog: true,
parentIndex: 0,
expectErr: true,
},
{
name: "not found in local log",
localEntries: nil,
remoteEntries: nil,
remoteChain: false,
remoteLog: false,
parentIndex: 0,
expectErr: true,
},
{
name: "remote log + chain, remote add height 0",
localEntries: nil,
remoteEntries: []*PaymentDescriptor{
// This entry will be added at log index =0.
{
HtlcIndex: 1,
addCommitHeightLocal: 100,
addCommitHeightRemote: 100,
},
// This entry will be added at log index =1, it
// is the parent entry we are looking for.
{
HtlcIndex: 2,
addCommitHeightLocal: 100,
addCommitHeightRemote: 0,
},
},
remoteChain: true,
remoteLog: true,
parentIndex: 1,
expectErr: true,
},
{
name: "remote log, local chain, local add height 0",
remoteEntries: []*PaymentDescriptor{
// This entry will be added at log index =0.
{
HtlcIndex: 1,
addCommitHeightLocal: 100,
addCommitHeightRemote: 100,
},
// This entry will be added at log index =1, it
// is the parent entry we are looking for.
{
HtlcIndex: 2,
addCommitHeightLocal: 0,
addCommitHeightRemote: 100,
},
},
localEntries: nil,
remoteChain: false,
remoteLog: true,
parentIndex: 1,
expectErr: true,
},
{
name: "local log + chain, local add height 0",
localEntries: []*PaymentDescriptor{
// This entry will be added at log index =0.
{
HtlcIndex: 1,
addCommitHeightLocal: 100,
addCommitHeightRemote: 100,
},
// This entry will be added at log index =1, it
// is the parent entry we are looking for.
{
HtlcIndex: 2,
addCommitHeightLocal: 0,
addCommitHeightRemote: 100,
},
},
remoteEntries: nil,
remoteChain: false,
remoteLog: false,
parentIndex: 1,
expectErr: true,
},
{
name: "local log + remote chain, remote add height 0",
localEntries: []*PaymentDescriptor{
// This entry will be added at log index =0.
{
HtlcIndex: 1,
addCommitHeightLocal: 100,
addCommitHeightRemote: 100,
},
// This entry will be added at log index =1, it
// is the parent entry we are looking for.
{
HtlcIndex: 2,
addCommitHeightLocal: 100,
addCommitHeightRemote: 0,
},
},
remoteEntries: nil,
remoteChain: true,
remoteLog: false,
parentIndex: 1,
expectErr: true,
},
{
name: "remote log found",
localEntries: nil,
remoteEntries: []*PaymentDescriptor{
// This entry will be added at log index =0.
{
HtlcIndex: 1,
addCommitHeightLocal: 100,
addCommitHeightRemote: 0,
},
// This entry will be added at log index =1, it
// is the parent entry we are looking for.
{
HtlcIndex: 2,
addCommitHeightLocal: 100,
addCommitHeightRemote: 100,
},
},
remoteChain: true,
remoteLog: true,
parentIndex: 1,
expectErr: false,
expectedIndex: 2,
},
{
name: "local log found",
localEntries: []*PaymentDescriptor{
// This entry will be added at log index =0.
{
HtlcIndex: 1,
addCommitHeightLocal: 0,
addCommitHeightRemote: 100,
},
// This entry will be added at log index =1, it
// is the parent entry we are looking for.
{
HtlcIndex: 2,
addCommitHeightLocal: 100,
addCommitHeightRemote: 100,
},
},
remoteEntries: nil,
remoteChain: false,
remoteLog: false,
parentIndex: 1,
expectErr: false,
expectedIndex: 2,
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
// Create a lightning channel with newly initialized
// local and remote logs.
lc := LightningChannel{
localUpdateLog: newUpdateLog(0, 0),
remoteUpdateLog: newUpdateLog(0, 0),
}
// Add the local and remote entries to update logs.
for _, entry := range test.localEntries {
lc.localUpdateLog.appendHtlc(entry)
}
for _, entry := range test.remoteEntries {
lc.remoteUpdateLog.appendHtlc(entry)
}
parent, err := lc.fetchParent(
&PaymentDescriptor{
ParentIndex: test.parentIndex,
},
test.remoteChain,
test.remoteLog,
)
gotErr := err != nil
if test.expectErr != gotErr {
t.Fatalf("expected error: %v, got: %v, "+
"error:%v", test.expectErr, gotErr, err)
}
// If our lookup failed, we do not need to check parent
// index.
if err != nil {
return
}
if parent.HtlcIndex != test.expectedIndex {
t.Fatalf("expected parent index: %v, got: %v",
test.parentIndex, parent.HtlcIndex)
}
})
}
} | explode_data.jsonl/72515 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2580
} | [
2830,
3393,
20714,
8387,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
688,
914,
198,
197,
197,
18147,
18837,
256,
1807,
198,
197,
197,
18147,
2201,
257,
1807,
198,
197,
8854,
24533,
220,
29838,
20188,
11709,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func Test_Problem820(t *testing.T) {
qs := []question820{
{
para820{[]string{"time", "me", "bell"}},
ans820{10},
},
{
para820{[]string{"t"}},
ans820{2},
},
}
fmt.Printf("------------------------Leetcode Problem 820------------------------\n")
for _, q := range qs {
_, p := q.ans820, q.para820
fmt.Printf("【input】:%v 【output】:%v\n", p, minimumLengthEncoding(p.words))
}
fmt.Printf("\n\n\n")
} | explode_data.jsonl/35418 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 200
} | [
2830,
3393,
16670,
10121,
23,
17,
15,
1155,
353,
8840,
836,
8,
1476,
18534,
82,
1669,
3056,
7841,
23,
17,
15,
4257,
197,
197,
515,
298,
197,
14794,
23,
17,
15,
90,
1294,
917,
4913,
1678,
497,
330,
2660,
497,
330,
17250,
48085,
298... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_run(t *testing.T) {
type args struct {
c []string
}
tests := []struct {
name string
args args
want Result
wantErr bool
}{
// Run ls successfully to find a file that exists
{"succeed:list_readme", args{
c: []string{"ls", "-b", "README.md"},
}, Result{
ReturnCode: 0,
StdOut: "README.md\n",
StdErr: "",
}, false},
// Run an executable that doesn't exist
{"fail: executable doesn't exist", args{
c: []string{"i_dont_exist", "first_arg", "second_arg"},
}, Result{
ReturnCode: 0,
StdOut: "",
StdErr: "",
}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := Run(tt.args.c)
if (err != nil) != tt.wantErr {
t.Errorf("Run() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("Run() got = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/46407 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 445
} | [
2830,
3393,
14007,
1155,
353,
8840,
836,
8,
341,
13158,
2827,
2036,
341,
197,
1444,
3056,
917,
198,
197,
532,
78216,
1669,
3056,
1235,
341,
197,
11609,
262,
914,
198,
197,
31215,
262,
2827,
198,
197,
50780,
262,
5714,
198,
197,
50780,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestOpenFlagsString(t *testing.T) {
var f = fuse.OpenFlags(os.O_RDWR | os.O_SYNC | os.O_APPEND)
if g, e := f.String(), "OpenReadWrite+OpenAppend+OpenSync"; g != e {
t.Fatalf("OpenFlags.String: %q != %q", g, e)
}
} | explode_data.jsonl/39260 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 99
} | [
2830,
3393,
5002,
9195,
703,
1155,
353,
8840,
836,
8,
341,
2405,
282,
284,
31702,
12953,
9195,
9638,
8382,
66266,
760,
2643,
8382,
39189,
760,
2643,
8382,
65851,
340,
743,
342,
11,
384,
1669,
282,
6431,
1507,
330,
5002,
58610,
10,
500... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestNumRawDataModules(t *testing.T) {
cases := [][2]int{
{1, 208},
{2, 359},
{3, 567},
{6, 1383},
{7, 1568},
{12, 3728},
{15, 5243},
{18, 7211},
{22, 10068},
{26, 13652},
{32, 19723},
{37, 25568},
{40, 29648},
}
for _, tc := range cases {
t.Run(fmt.Sprintf("TestNumRawDataModules %v", tc), func(t *testing.T) {
assert.Equal(t, numRawDataModules[tc[0]], tc[1])
})
}
} | explode_data.jsonl/54181 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 221
} | [
2830,
3393,
4651,
20015,
1043,
28201,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
508,
1457,
17,
63025,
515,
197,
197,
90,
16,
11,
220,
17,
15,
23,
1583,
197,
197,
90,
17,
11,
220,
18,
20,
24,
1583,
197,
197,
90,
18,
11,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRemoveNode(t *testing.T) {
r := newRaft(1, []uint64{1, 2}, 10, 1, NewMemoryStorage(), 0)
r.pendingConf = true
r.removeNode(2)
if r.pendingConf != false {
t.Errorf("pendingConf = %v, want false", r.pendingConf)
}
w := []uint64{1}
if g := r.nodes(); !reflect.DeepEqual(g, w) {
t.Errorf("nodes = %v, want %v", g, w)
}
} | explode_data.jsonl/67370 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 155
} | [
2830,
3393,
13021,
1955,
1155,
353,
8840,
836,
8,
341,
7000,
1669,
501,
55535,
723,
7,
16,
11,
3056,
2496,
21,
19,
90,
16,
11,
220,
17,
2137,
220,
16,
15,
11,
220,
16,
11,
1532,
10642,
5793,
1507,
220,
15,
340,
7000,
64788,
1557... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestDashboardConfig(t *testing.T) {
re := require.New(t)
registerDefaultSchedulers()
cfgData := `
[dashboard]
tidb-cacert-path = "/path/ca.pem"
tidb-key-path = "/path/client-key.pem"
tidb-cert-path = "/path/client.pem"
`
cfg := NewConfig()
meta, err := toml.Decode(cfgData, &cfg)
re.NoError(err)
err = cfg.Adjust(&meta, false)
re.NoError(err)
re.Equal("/path/ca.pem", cfg.Dashboard.TiDBCAPath)
re.Equal("/path/client-key.pem", cfg.Dashboard.TiDBKeyPath)
re.Equal("/path/client.pem", cfg.Dashboard.TiDBCertPath)
// Test different editions
tests := []struct {
Edition string
EnableTelemetry bool
}{
{"Community", true},
{"Enterprise", false},
}
originalDefaultEnableTelemetry := defaultEnableTelemetry
for _, test := range tests {
defaultEnableTelemetry = true
initByLDFlags(test.Edition)
cfg = NewConfig()
meta, err = toml.Decode(cfgData, &cfg)
re.NoError(err)
err = cfg.Adjust(&meta, false)
re.NoError(err)
re.Equal(test.EnableTelemetry, cfg.Dashboard.EnableTelemetry)
}
defaultEnableTelemetry = originalDefaultEnableTelemetry
} | explode_data.jsonl/78168 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 441
} | [
2830,
3393,
26947,
2648,
1155,
353,
8840,
836,
8,
341,
17200,
1669,
1373,
7121,
1155,
340,
29422,
3675,
74674,
741,
50286,
1043,
1669,
22074,
58,
18641,
921,
24449,
65,
1786,
580,
529,
33095,
284,
3521,
2343,
80591,
49373,
698,
24449,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_NoMatch_False(t *testing.T) {
// Arrange
ass := assert.New(t)
data := generateRandomStringSlice(size, 50)
bloom := NewBloomFilter(data)
m := NewExactMatch(data)
all := NewMatchAll(bloom, m)
// Act
ok := all.Match(nomatch)
// Assert
ass.False(ok)
} | explode_data.jsonl/13363 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 109
} | [
2830,
3393,
36989,
8331,
1400,
710,
1155,
353,
8840,
836,
8,
341,
197,
322,
40580,
198,
197,
395,
1669,
2060,
7121,
1155,
340,
8924,
1669,
6923,
13999,
703,
33236,
6856,
11,
220,
20,
15,
340,
2233,
18474,
1669,
1532,
33,
18474,
5632,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, clientset, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
var updated *apps.DaemonSet
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
if action.GetSubresource() != "status" {
return false, nil, nil
}
if u, ok := action.(core.UpdateAction); ok {
updated = u.GetObject().(*apps.DaemonSet)
}
return false, nil, nil
})
manager.dsStore.Add(ds)
addNodes(manager.nodeStore, 0, 5, nil)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
// Make sure the single sync() updated Status already for the change made
// during the manage() phase.
if got, want := updated.Status.CurrentNumberScheduled, int32(5); got != want {
t.Errorf("Status.CurrentNumberScheduled = %v, want %v", got, want)
}
}
} | explode_data.jsonl/50306 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 403
} | [
2830,
3393,
16374,
89177,
1649,
37091,
2522,
6025,
72332,
23527,
82,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
8282,
1669,
2088,
2647,
2580,
69388,
368,
341,
197,
83336,
1669,
501,
89177,
1649,
445,
7975,
1138,
197,
83336,
36473,
16689,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestUpdateBuilderContextRunners(t *testing.T) {
db := &DBStub{}
b := Update("test").Set("x", 1).RunWith(db)
expectedSql := "UPDATE test SET x = ?"
b.ExecContext(ctx)
assert.Equal(t, expectedSql, db.LastExecSql)
b.QueryContext(ctx)
assert.Equal(t, expectedSql, db.LastQuerySql)
b.QueryRowContext(ctx)
assert.Equal(t, expectedSql, db.LastQueryRowSql)
err := b.ScanContext(ctx)
assert.NoError(t, err)
} | explode_data.jsonl/14850 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 171
} | [
2830,
3393,
4289,
3297,
1972,
6727,
4972,
1155,
353,
8840,
836,
8,
341,
20939,
1669,
609,
3506,
33838,
16094,
2233,
1669,
5549,
445,
1944,
1827,
1649,
445,
87,
497,
220,
16,
568,
51918,
9791,
692,
42400,
8269,
1669,
330,
9239,
1273,
9... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPrepareInputData2(t *testing.T) {
result := prepareInputData(templateData2, varMap)
expected := "{\"services\":{\"dbadmin\":[{\"label\":\"PhpMyAdmin\",\"name\":\"phpmyadmin\"}],\"tlshelper\":[{\"label\":\"Lets Encrypt Companion\",\"name\":\"letsencrypt\"}]},\"var\":{\"BAR\":\"test1\",\"FOO\":\"test\"}}"
assert.Equal(t, expected, result)
} | explode_data.jsonl/67930 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 129
} | [
2830,
3393,
50590,
2505,
1043,
17,
1155,
353,
8840,
836,
8,
341,
9559,
1669,
10549,
2505,
1043,
29963,
1043,
17,
11,
762,
2227,
340,
42400,
1669,
54734,
12779,
92729,
1999,
2882,
59,
8899,
64238,
1502,
23488,
50144,
5050,
7210,
34333,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestL1Data(t *testing.T) {
_, err := L1Data(
unix.PERF_COUNT_HW_CACHE_OP_READ,
unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS,
func() error { return nil },
)
if err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/33122 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 106
} | [
2830,
3393,
43,
16,
1043,
1155,
353,
8840,
836,
8,
341,
197,
6878,
1848,
1669,
444,
16,
1043,
1006,
197,
20479,
941,
47320,
37,
14672,
44013,
29138,
13908,
13117,
345,
197,
20479,
941,
47320,
37,
14672,
44013,
29138,
21181,
24420,
345,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSearcher_ACHFindPostalCodeOnly(t *testing.T) {
s := searcher{}
if err := s.helperLoadFEDACHFile(t); err != nil {
t.Fatal(err)
}
achP := s.ACHFindPostalCodeOnly(hardResultsLimit, "43724")
if len(achP) == 0 {
t.Fatalf("%s", "No matches found for postal code")
}
for _, p := range achP {
if !strings.Contains(p.PostalCode, "43724") {
t.Errorf("Postal Code=%s", p.PostalCode)
}
}
} | explode_data.jsonl/71094 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 180
} | [
2830,
3393,
5890,
261,
1566,
2149,
9885,
43800,
2078,
7308,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
94674,
16094,
743,
1848,
1669,
274,
38922,
5879,
37,
1479,
11873,
1703,
1155,
1215,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestNewCloudEventNats(t *testing.T) {
assert := assert.New(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
const ITERATIONS = 100
var counter int
type Count struct {
N int
}
ConnErrHandler := func(conn *nats.Conn, err error) {
log.WithError(err).Infoln("nats disconnected")
}
ConnHandler := func(conn *nats.Conn) {
log.Infoln("nats connected")
}
opts := cenats.NatsOptions(nats.DisconnectErrHandler(ConnErrHandler), nats.ReconnectHandler(ConnHandler))
protocol, err := cenats.NewProtocol("https://nats1.plugis.com", "ce.test", "ce.test", opts)
assert.NoError(err)
defer protocol.Close(ctx)
_ = protocol
c, err := cloudevents.NewClient(protocol)
assert.NoError(err)
receiver := func(ctx context.Context, event event.Event) {
switch event.Type() {
case "com.drone-box.sample.count":
var count Count
err := event.DataAs(&count)
if err != nil {
log.WithError(err).Error("event.DataAs")
}
counter++
//log.WithField("count", count.N).Info("receiver")
}
}
go func() {
c.StartReceiver(ctx, receiver)
log.Info("StartReceiver exited")
}()
// wait until connected
//time.Sleep(time.Second * 2)
// Create an Event.
event := cloudevents.NewEvent()
event.SetSource("example/uri")
event.SetType("com.drone-box.sample.count")
var count Count
// Send that Event.
for i := 0; i < ITERATIONS; i++ {
count.N = i
event.SetData(cloudevents.ApplicationJSON, count)
event.SetID(uuid.New().String())
event.SetTime(time.Now())
result := c.Send(ctx, event)
if result == nil {
fmt.Printf("+")
} else {
fmt.Printf("!")
}
assert.Equal(false, cloudevents.IsUndelivered(result))
time.Sleep(time.Millisecond * 5)
}
time.Sleep(time.Second * 5)
cancel()
<-ctx.Done()
time.Sleep(time.Second * 2)
assert.Equal(ITERATIONS, counter)
} | explode_data.jsonl/18722 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 725
} | [
2830,
3393,
3564,
16055,
1556,
45,
1862,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
340,
20985,
11,
9121,
1669,
2266,
26124,
9269,
5378,
19047,
2398,
16867,
9121,
2822,
4777,
87816,
21792,
284,
220,
16,
15,
15,
271,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIncident_SnoozeIncidentWithResponse(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/incidents/1/snooze", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, "POST")
_, _ = w.Write([]byte(`{"incident": {"id": "1", "pending_actions": [{"type": "unacknowledge", "at":"2019-12-31T16:58:35Z"}]}}`))
})
client := defaultTestClient(server.URL, "foo")
var duration uint = 3600
id := "1"
res, err := client.SnoozeIncidentWithResponse(id, duration)
want := &Incident{
Id: "1",
PendingActions: []PendingAction{
{
Type: "unacknowledge",
At: "2019-12-31T16:58:35Z",
},
},
}
if err != nil {
t.Fatal(err)
}
testEqual(t, want, res)
} | explode_data.jsonl/76399 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 309
} | [
2830,
3393,
39245,
1713,
1098,
2152,
90153,
39245,
1713,
2354,
2582,
1155,
353,
8840,
836,
8,
341,
84571,
741,
16867,
49304,
2822,
2109,
2200,
63623,
4283,
2840,
6880,
14,
16,
2687,
2152,
90153,
497,
2915,
3622,
1758,
37508,
11,
435,
35... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNuoDBCollectorSidecarsDisabled(t *testing.T) {
options := &helm.Options{
SetValues: map[string]string{
"nuocollector.enabled": "false",
"nuocollector.image.registry": "docker.io",
"nuocollector.image.repository": "nuodb/nuocd",
"nuocollector.image.tag": "1.0.0",
"nuocollector.watcher.registry": "docker.io",
"nuocollector.watcher.repository": "kiwigrid/k8s-sidecar",
"nuocollector.watcher.tag": "latest",
},
}
executeSidecarTests(t, options)
} | explode_data.jsonl/29343 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 248
} | [
2830,
3393,
45,
23137,
3506,
53694,
16384,
50708,
25907,
1155,
353,
8840,
836,
8,
1476,
35500,
1669,
609,
51899,
22179,
515,
197,
22212,
6227,
25,
2415,
14032,
30953,
515,
298,
197,
1,
8933,
4547,
27669,
22141,
788,
310,
330,
3849,
756,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.