text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
tests := []struct {
oldReplicas int
readyPods int
unHealthyPods int
maxCleanupCount int
cleanupCountExpected int
}{
{
oldReplicas: 10,
readyPods: 8,
unHealthyPods: 2,
maxCleanupCount: 1,
cleanupCountExpected: 1,
},
{
oldReplicas: 10,
readyPods: 8,
unHealthyPods: 2,
maxCleanupCount: 3,
cleanupCountExpected: 2,
},
{
oldReplicas: 10,
readyPods: 8,
unHealthyPods: 2,
maxCleanupCount: 0,
cleanupCountExpected: 0,
},
{
oldReplicas: 10,
readyPods: 10,
unHealthyPods: 0,
maxCleanupCount: 3,
cleanupCountExpected: 0,
},
}
for i, test := range tests {
t.Logf("executing scenario %d", i)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPods)
oldRSs := []*extensions.ReplicaSet{oldRS}
maxSurge := intstr.FromInt(2)
maxUnavailable := intstr.FromInt(2)
deployment := newDeployment("foo", 10, nil, &maxSurge, &maxUnavailable, nil)
fakeClientset := fake.Clientset{}
controller := &DeploymentController{
client: &fakeClientset,
eventRecorder: &record.FakeRecorder{},
}
_, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, deployment, int32(test.maxCleanupCount))
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if int(cleanupCount) != test.cleanupCountExpected {
t.Errorf("expected %v unhealthy replicas been cleaned up, got %v", test.cleanupCountExpected, cleanupCount)
continue
}
}
} | explode_data.jsonl/44996 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 836
} | [
2830,
3393,
75286,
2051,
42444,
1806,
37028,
18327,
52210,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
61828,
18327,
52210,
688,
526,
198,
197,
197,
2307,
23527,
82,
310,
526,
198,
197,
20479,
96113,
23527,
82,
286,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestServer_Rejects_TLSBadCipher(t *testing.T) {
st := newServerTester(t, nil, func(c *tls.Config) {
// Only list bad ones:
c.CipherSuites = []uint16{
tls.TLS_RSA_WITH_RC4_128_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
}
})
defer st.Close()
gf := st.wantGoAway()
if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
t.Errorf("Got error code %v; want %v", got, want)
}
} | explode_data.jsonl/71680 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 464
} | [
2830,
3393,
5475,
50693,
583,
82,
69067,
17082,
79460,
1155,
353,
8840,
836,
8,
341,
18388,
1669,
501,
5475,
58699,
1155,
11,
2092,
11,
2915,
1337,
353,
34488,
10753,
8,
341,
197,
197,
322,
8278,
1140,
3873,
6174,
510,
197,
1444,
727,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAddVBAProject(t *testing.T) {
f := NewFile()
assert.NoError(t, f.SetSheetPrOptions("Sheet1", CodeName("Sheet1")))
assert.EqualError(t, f.AddVBAProject("macros.bin"), "stat macros.bin: no such file or directory")
assert.EqualError(t, f.AddVBAProject(filepath.Join("test", "Book1.xlsx")), "unsupported VBA project extension")
assert.NoError(t, f.AddVBAProject(filepath.Join("test", "vbaProject.bin")))
// Test add VBA project twice.
assert.NoError(t, f.AddVBAProject(filepath.Join("test", "vbaProject.bin")))
assert.NoError(t, f.SaveAs(filepath.Join("test", "TestAddVBAProject.xlsm")))
} | explode_data.jsonl/36997 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 232
} | [
2830,
3393,
2212,
53,
7064,
7849,
1155,
353,
8840,
836,
8,
341,
1166,
1669,
1532,
1703,
741,
6948,
35699,
1155,
11,
282,
4202,
10541,
3533,
3798,
445,
10541,
16,
497,
6119,
675,
445,
10541,
16,
29836,
6948,
12808,
1454,
1155,
11,
282,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLimit(t *testing.T) {
users := []User{
{Name: "LimitUser1", Age: 1},
{Name: "LimitUser2", Age: 10},
{Name: "LimitUser3", Age: 20},
{Name: "LimitUser4", Age: 10},
{Name: "LimitUser5", Age: 20},
}
DB.Create(&users)
var users1, users2, users3 []User
DB.Order("age desc").Limit(3).Find(&users1).Limit(5).Find(&users2).Limit(-1).Find(&users3)
if len(users1) != 3 || len(users2) != 5 || len(users3) <= 5 {
t.Errorf("Limit should works")
}
} | explode_data.jsonl/48714 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 197
} | [
2830,
3393,
16527,
1155,
353,
8840,
836,
8,
341,
90896,
1669,
3056,
1474,
515,
197,
197,
63121,
25,
330,
16527,
1474,
16,
497,
13081,
25,
220,
16,
1583,
197,
197,
63121,
25,
330,
16527,
1474,
17,
497,
13081,
25,
220,
16,
15,
1583,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestBootstrapSnapshotMissingVolume(t *testing.T) {
const (
offlineBackendName = "snapNoVolBackend"
scName = "snapNoVolSC"
volumeName = "snapNoVolVolume"
snapName = "snapNoVolSnapshot"
backendProtocol = config.File
)
orchestrator := getOrchestrator(t)
defer cleanup(t, orchestrator)
addBackendStorageClass(t, orchestrator, offlineBackendName, scName, backendProtocol)
_, err := orchestrator.AddVolume(
ctx(), tu.GenerateVolumeConfig(
volumeName, 50,
scName, config.File,
),
)
if err != nil {
t.Fatal("Unable to create volume: ", err)
}
// For the full test, we create everything and recreate the AddSnapshot transaction.
snapshotConfig := generateSnapshotConfig(snapName, volumeName, volumeName)
if _, err := orchestrator.CreateSnapshot(ctx(), snapshotConfig); err != nil {
t.Fatal("Unable to add snapshot: ", err)
}
// Simulate deleting the existing volume without going through Trident then bootstrapping
vol, ok := orchestrator.volumes[volumeName]
if !ok {
t.Fatalf("Unable to find volume %s in backend.", volumeName)
}
orchestrator.mutex.Lock()
err = orchestrator.storeClient.DeleteVolume(ctx(), vol)
if err != nil {
t.Fatalf("Unable to delete volume from store: %v", err)
}
orchestrator.mutex.Unlock()
newOrchestrator := getOrchestrator(t)
bootstrappedSnapshot, err := newOrchestrator.GetSnapshot(ctx(), snapshotConfig.VolumeName, snapshotConfig.Name)
if err != nil {
t.Fatalf("error getting snapshot: %v", err)
}
if bootstrappedSnapshot == nil {
t.Error("Volume not found during bootstrap.")
}
if !bootstrappedSnapshot.State.IsMissingVolume() {
t.Error("Unexpected snapshot state.")
}
// Delete volume in missing_volume state
err = newOrchestrator.DeleteSnapshot(ctx(), volumeName, snapName)
if err != nil {
t.Error("could not delete snapshot with missing volume")
}
} | explode_data.jsonl/62721 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 661
} | [
2830,
3393,
45511,
15009,
25080,
18902,
1155,
353,
8840,
836,
8,
341,
4777,
2399,
197,
197,
63529,
29699,
675,
284,
330,
48854,
2753,
36361,
29699,
698,
197,
29928,
675,
1797,
284,
330,
48854,
2753,
36361,
3540,
698,
197,
5195,
4661,
67... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func TestConfigCanBeValidated(t *testing.T) {
c := getTestConfig()
assert.Nil(t, c.Validate())
c1 := c
c1.DeploymentID = 0
assert.True(t, errors.Is(c1.Validate(), ErrInvalidConfig))
c2 := c
c2.ServiceAddress = ""
assert.True(t, errors.Is(c2.Validate(), ErrInvalidConfig))
c3 := c
c3.RaftAddress = ""
assert.True(t, errors.Is(c3.Validate(), ErrInvalidConfig))
c4 := c
c4.GossipAddress = ""
assert.True(t, errors.Is(c4.Validate(), ErrInvalidConfig))
c5 := c
c5.GossipSeedAddresses = []string{}
assert.True(t, errors.Is(c5.Validate(), ErrInvalidConfig))
} | explode_data.jsonl/50730 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 234
} | [
2830,
3393,
2648,
69585,
4088,
657,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
633,
2271,
2648,
741,
6948,
59678,
1155,
11,
272,
47667,
12367,
1444,
16,
1669,
272,
198,
1444,
16,
34848,
39130,
915,
284,
220,
15,
198,
6948,
32443,
1155,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLogWriter(t *testing.T) {
expected := "FOO"
lw := &logWriter{nil, bytes.NewBuffer(nil)}
lw.Write([]byte(expected))
if expected != lw.buf.String() {
t.Errorf("Expected %q, but received %q", expected, lw.buf.String())
}
} | explode_data.jsonl/41771 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 95
} | [
2830,
3393,
2201,
6492,
1155,
353,
8840,
836,
8,
341,
42400,
1669,
330,
3788,
46,
698,
8810,
86,
1669,
609,
839,
6492,
90,
8385,
11,
5820,
7121,
4095,
27907,
10569,
8810,
86,
4073,
10556,
3782,
15253,
4390,
743,
3601,
961,
41073,
4448... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func Test_ExecuteSET(t *testing.T) {
cmd := &command{
action: SET,
key: "foo",
value: "bar",
}
got, err := cmd.Execute()
want := "OK"
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("%s = %q, want %q", got, want, want)
}
got = ds.Find("foo")
want = "bar"
if got != want {
t.Errorf("%s = %q, want %q", got, want, want)
}
} | explode_data.jsonl/79584 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 177
} | [
2830,
3393,
83453,
5884,
1155,
353,
8840,
836,
8,
341,
25920,
1669,
609,
5631,
515,
197,
38933,
25,
9019,
345,
197,
23634,
25,
262,
330,
7975,
756,
197,
16309,
25,
220,
330,
2257,
756,
197,
630,
3174,
354,
11,
1848,
1669,
5439,
1382... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestVersionHelpers(t *testing.T) {
if v, _ := versionGen(31, 3); v != `v031` {
t.Errorf(`expected v031, got: %s: `, v)
}
if _, err := versionGen(31, 2); err == nil {
t.Error(`expected an error`)
}
if _, err := nextVersionLike(``); err == nil {
t.Error(`expected an error`)
}
if _, err := nextVersionLike(`adf`); err == nil {
t.Error(`expected an error`)
}
if v, err := nextVersionLike(`v099`); err == nil {
t.Errorf(`expected a padding overflow error, got: %s`, v)
}
if v, _ := nextVersionLike(`v1`); v != `v2` {
t.Errorf(`expected v2, got: %s`, v)
}
if v, _ := nextVersionLike(`v01`); v != `v02` {
t.Errorf(`expected v02, got: %s`, v)
}
} | explode_data.jsonl/43248 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 302
} | [
2830,
3393,
5637,
28430,
1155,
353,
8840,
836,
8,
1476,
743,
348,
11,
716,
1669,
2319,
9967,
7,
18,
16,
11,
220,
18,
1215,
348,
961,
1565,
85,
15,
18,
16,
63,
341,
197,
3244,
13080,
5809,
7325,
348,
15,
18,
16,
11,
2684,
25,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestLoad(t *testing.T) {
cases := []struct {
name string
path string
wantData *config.Configuration
wantErr bool
}{
{
name: "does not exist",
path: "no",
wantErr: true,
},
{
name: "invalid format",
path: "testdata/invalid.yaml",
wantErr: true,
},
{
name: "success",
path: "testdata/success.yaml",
wantData: &config.Configuration{
Server: config.Server{
Port: ":8080",
ReadTimeoutSeconds: 31,
WriteTimeoutSeconds: 30,
},
DB: config.DatabaseEnv{
Dev: config.Database{
PSN: "postgre",
LogQueries: true,
TimeoutSeconds: 10,
},
Test: config.Database{
PSN: "postgre",
LogQueries: true,
TimeoutSeconds: 10,
},
},
JWT: config.JWT{
Secret: "changedvalue",
Duration: 15,
Algorithm: "HS256",
},
App: config.Application{
MinPasswordStrength: 1,
},
OpenAPI: config.OpenAPI{
Username: "twisk",
Password: "twisk",
},
},
wantErr: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
cfg, err := config.Load(tc.path)
assert.Equal(t, tc.wantData, cfg)
assert.Equal(t, tc.wantErr, err != nil)
})
}
} | explode_data.jsonl/65119 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 678
} | [
2830,
3393,
5879,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
3056,
1235,
341,
197,
11609,
257,
914,
198,
197,
26781,
257,
914,
198,
197,
50780,
1043,
353,
1676,
17334,
198,
197,
50780,
7747,
220,
1807,
198,
197,
59403,
197,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSpanEventSlice_CopyTo(t *testing.T) {
dest := NewSpanEventSlice()
// Test CopyTo to empty
NewSpanEventSlice().CopyTo(dest)
assert.EqualValues(t, NewSpanEventSlice(), dest)
// Test CopyTo larger slice
generateTestSpanEventSlice().CopyTo(dest)
assert.EqualValues(t, generateTestSpanEventSlice(), dest)
// Test CopyTo same size slice
generateTestSpanEventSlice().CopyTo(dest)
assert.EqualValues(t, generateTestSpanEventSlice(), dest)
} | explode_data.jsonl/63279 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 161
} | [
2830,
3393,
12485,
1556,
33236,
77637,
1249,
1155,
353,
8840,
836,
8,
341,
49616,
1669,
1532,
12485,
1556,
33236,
741,
197,
322,
3393,
14540,
1249,
311,
4287,
198,
197,
3564,
12485,
1556,
33236,
1005,
12106,
1249,
27010,
340,
6948,
12808,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEtcdHealthCheckerSuccess(t *testing.T) {
serverConfig := NewTestServerConfig()
_, clientconfig, shutdownServer := withConfigGetFreshApiserverAndClient(t, serverConfig)
t.Log(clientconfig.Host)
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
c := &http.Client{Transport: tr}
resp, err := c.Get(clientconfig.Host + "/healthz")
if nil != err {
t.Fatal("health check endpoint should not have failed", err)
}
if http.StatusOK != resp.StatusCode {
t.Fatal("health check endpoint should have had a 200 status code", resp)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal("couldn't read response body", err)
}
if strings.Contains(string(body), "healthz check failed") {
t.Fatal("health check endpoint should not have failed")
}
defer shutdownServer()
} | explode_data.jsonl/51877 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 297
} | [
2830,
3393,
31860,
4385,
14542,
35188,
7188,
1155,
353,
8840,
836,
8,
341,
41057,
2648,
1669,
1532,
2271,
5475,
2648,
741,
197,
6878,
2943,
1676,
11,
23766,
5475,
1669,
448,
2648,
1949,
55653,
91121,
2836,
3036,
2959,
1155,
11,
3538,
26... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestComputeExtensionID(t *testing.T) {
dir := testutil.TempDir(t)
defer os.RemoveAll(dir)
// Taken from Chrome's components/crx_file/id_util_unittest.cc.
manifest := `{ "key": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC4fysg3HybDNxRYZkNNg/UZogIVYTVOr8rpGSFewwEEz+N9Lw4DUn+a8RasEBTOtdmCQ+eNnQw2ooxTx8UUNfHIJQX3k65V15+CuWyZXqJTrZH/xy9tzgTr0eFhDIz8xdJv+mW0NYUbxONxfwscrqs6n4YU1amg6LOk5PnHw/mDwIDAQAB" }`
if err := ioutil.WriteFile(filepath.Join(dir, "manifest.json"), []byte(manifest), 0644); err != nil {
t.Fatal(err)
}
id, err := ComputeExtensionID(dir)
if err != nil {
t.Fatalf("ComputeExtensionID(%q) failed with %v", dir, err)
}
exp := "melddjfinppjdikinhbgehiennejpfhp"
if id != exp {
t.Errorf("ComputeExtensionID(%q) = %q; want %q", dir, id, exp)
}
} | explode_data.jsonl/7589 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 405
} | [
2830,
3393,
46254,
12049,
915,
1155,
353,
8840,
836,
8,
341,
48532,
1669,
1273,
1314,
65009,
6184,
1155,
340,
16867,
2643,
84427,
14161,
692,
197,
322,
55974,
504,
17225,
594,
6813,
2899,
12651,
2458,
38146,
18974,
4907,
14267,
27812,
624... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestMkdirStickyUmask(t *testing.T) {
const umask = 0077
dir := newDir("TestMkdirStickyUmask", t)
defer RemoveAll(dir)
oldUmask := syscall.Umask(umask)
defer syscall.Umask(oldUmask)
p := filepath.Join(dir, "dir1")
if err := Mkdir(p, ModeSticky|0755); err != nil {
t.Fatal(err)
}
fi, err := Stat(p)
if err != nil {
t.Fatal(err)
}
if mode := fi.Mode(); (mode&umask) != 0 || (mode&^ModePerm) != (ModeDir|ModeSticky) {
t.Errorf("unexpected mode %s", mode)
}
} | explode_data.jsonl/74929 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 219
} | [
2830,
3393,
44,
12438,
623,
18964,
52,
11258,
1155,
353,
8840,
836,
8,
341,
4777,
4443,
1073,
284,
220,
15,
15,
22,
22,
198,
48532,
1669,
501,
6184,
445,
2271,
44,
12438,
623,
18964,
52,
11258,
497,
259,
340,
16867,
10783,
2403,
141... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestFormatWithdrawPermissions(t *testing.T) {
b.SetDefaults()
expectedResult := exchange.AutoWithdrawCryptoText + " & " + exchange.AutoWithdrawFiatText
withdrawPermissions := b.FormatWithdrawPermissions()
if withdrawPermissions != expectedResult {
t.Errorf("Expected: %s, Received: %s", expectedResult, withdrawPermissions)
}
} | explode_data.jsonl/49003 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 105
} | [
2830,
3393,
4061,
92261,
23851,
1155,
353,
8840,
836,
8,
341,
2233,
4202,
16273,
741,
42400,
2077,
1669,
9289,
6477,
92261,
58288,
1178,
488,
330,
609,
330,
488,
9289,
6477,
92261,
37,
10358,
1178,
271,
46948,
7633,
23851,
1669,
293,
99... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGenerateISO(t *testing.T) {
key, _, _, _, _ := ssh.ParseAuthorizedKey([]byte("ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBITowPn2Ol1eCvXN5XV+Lb6jfXzgDbXyEdtayadDUJtFrcN2m2mjC1B20VBAoJcZtSYkmjrllS06Q26Te5sTYvE= testkey"))
i := netutil.ParseCIDR("192.168.122.10/24")
hw, _ := net.ParseMAC("52:54:00:78:71:f1")
e := &CloudConfigEthernet{
MacAddress: hw,
Address4: i,
Gateway4: net.ParseIP("192.168.122.1"),
NameServers: []net.IP{
net.ParseIP("192.168.122.1"),
},
}
c := StructConfig("user", "host", []ssh.PublicKey{key}, []*CloudConfigEthernet{e})
f, err := c.Generate(".")
if err != nil {
t.Errorf("Failed to generate iso: err='%s'", err.Error())
}
_, err = img.OpenQemuImg(f)
if err != nil {
t.Errorf("Failed to open qemu image, maybe this is not valid image: path='%s', err='%s'", f, err.Error())
}
if err := c.Delete(); err != nil {
t.Errorf("Failed to delete: err='%s'", err.Error())
}
} | explode_data.jsonl/46114 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 479
} | [
2830,
3393,
31115,
24167,
1155,
353,
8840,
836,
8,
341,
23634,
11,
8358,
8358,
8358,
716,
1669,
29230,
8937,
60454,
1592,
10556,
3782,
445,
757,
96780,
7514,
64,
17,
5279,
380,
79,
17,
20,
21,
47097,
13669,
17,
53,
73,
57,
43165,
71... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestTxnAbortCount(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
s, metrics, cleanupFn := setupMetricsTest(t)
defer cleanupFn()
value := []byte("value")
const intentionalErrText = "intentional error to cause abort"
// Test aborted transaction.
if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
key := []byte("key-abort")
if err := txn.Put(ctx, key, value); err != nil {
t.Fatal(err)
}
return errors.New(intentionalErrText)
}); !testutils.IsError(err, intentionalErrText) {
t.Fatalf("unexpected error: %v", err)
}
checkTxnMetrics(t, metrics, "abort txn", 0, 0, 1 /* aborts */, 0)
} | explode_data.jsonl/76895 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 266
} | [
2830,
3393,
31584,
77,
85891,
2507,
1155,
353,
8840,
836,
8,
341,
16867,
23352,
1944,
36892,
2271,
1155,
8,
741,
16867,
1487,
77940,
1155,
568,
7925,
1155,
340,
1903,
11,
16734,
11,
21290,
24911,
1669,
6505,
27328,
2271,
1155,
340,
1686... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestLoadBalanceSourceRanges(t *testing.T) {
ipt, fp := buildFakeProxier()
svcIP := "10.20.30.41"
svcPort := 80
svcLBIP := "1.2.3.4"
svcLBSource := "10.0.0.0/8"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
epIP := "10.180.0.1"
tcpProtocol := v1.ProtocolTCP
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "LoadBalancer"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}}
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
IP: svcLBIP,
}}
svc.Spec.LoadBalancerSourceRanges = []string{
svcLBSource,
}
}),
)
populateEndpointSlices(fp,
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
eps.AddressType = discovery.AddressTypeIPv4
eps.Endpoints = []discovery.Endpoint{{
Addresses: []string{epIP},
}}
eps.Ports = []discovery.EndpointPort{{
Name: utilpointer.StringPtr(svcPortName.Port),
Port: utilpointer.Int32(int32(svcPort)),
Protocol: &tcpProtocol,
}}
}),
)
fp.syncProxyRules()
// Check ipvs service and destinations
epVS := &netlinktest.ExpectedVirtualServer{
VSNum: 2, IP: svcLBIP, Port: uint16(svcPort), Protocol: string(v1.ProtocolTCP),
RS: []netlinktest.ExpectedRealServer{{
IP: epIP, Port: uint16(svcPort),
}}}
checkIPVS(t, fp, epVS)
// Check ipset entry
epIPSet := netlinktest.ExpectedIPSet{
kubeLoadBalancerSet: {{
IP: svcLBIP,
Port: svcPort,
Protocol: strings.ToLower(string(v1.ProtocolTCP)),
SetType: utilipset.HashIPPort,
}},
kubeLoadbalancerFWSet: {{
IP: svcLBIP,
Port: svcPort,
Protocol: strings.ToLower(string(v1.ProtocolTCP)),
SetType: utilipset.HashIPPort,
}},
kubeLoadBalancerSourceCIDRSet: {{
IP: svcLBIP,
Port: svcPort,
Protocol: strings.ToLower(string(v1.ProtocolTCP)),
Net: svcLBSource,
SetType: utilipset.HashIPPortNet,
}},
}
checkIPSet(t, fp, epIPSet)
// Check iptables chain and rules
epIpt := netlinktest.ExpectedIptablesChain{
string(kubeServicesChain): {{
JumpChain: string(KubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet,
}, {
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet,
}, {
JumpChain: string(KubeNodePortChain), MatchSet: "",
}, {
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
}, {
JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet,
}},
string(KubeLoadBalancerChain): {{
JumpChain: string(KubeFireWallChain), MatchSet: kubeLoadbalancerFWSet,
}, {
JumpChain: string(KubeMarkMasqChain), MatchSet: "",
}},
string(KubeFireWallChain): {{
JumpChain: "RETURN", MatchSet: kubeLoadBalancerSourceCIDRSet,
}, {
JumpChain: string(KubeMarkDropChain), MatchSet: "",
}},
}
checkIptables(t, ipt, epIpt)
} | explode_data.jsonl/44360 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1371
} | [
2830,
3393,
5879,
21190,
3608,
74902,
1155,
353,
8840,
836,
8,
341,
8230,
417,
11,
12007,
1669,
1936,
52317,
1336,
87,
1268,
2822,
1903,
7362,
3298,
1669,
330,
16,
15,
13,
17,
15,
13,
18,
15,
13,
19,
16,
698,
1903,
7362,
7084,
166... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAllocationsCreateDestroy(t *testing.T) {
clients.RequireLong(t)
client, err := clients.NewBareMetalV1NoAuthClient()
th.AssertNoErr(t, err)
client.Microversion = "1.52"
allocation, err := v1.CreateAllocation(t, client)
th.AssertNoErr(t, err)
defer v1.DeleteAllocation(t, client, allocation)
found := false
err = allocations.List(client, allocations.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
allocationList, err := allocations.ExtractAllocations(page)
if err != nil {
return false, err
}
for _, a := range allocationList {
if a.UUID == allocation.UUID {
found = true
return true, nil
}
}
return false, nil
})
th.AssertNoErr(t, err)
th.AssertEquals(t, found, true)
} | explode_data.jsonl/35916 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 290
} | [
2830,
3393,
25154,
804,
4021,
14245,
1155,
353,
8840,
836,
8,
341,
197,
31869,
81288,
6583,
1155,
692,
25291,
11,
1848,
1669,
8239,
7121,
33,
546,
65661,
53,
16,
2753,
5087,
2959,
741,
70479,
11711,
2753,
7747,
1155,
11,
1848,
692,
25... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestComputeProposalTxID(t *testing.T) {
txid, err := utils.ComputeTxID([]byte{1}, []byte{1})
assert.NoError(t, err, "Failed computing TxID")
//计算computetxid计算的函数,
//即base64(sha256(nonce creator)
hf := sha256.New()
hf.Write([]byte{1})
hf.Write([]byte{1})
hashOut := hf.Sum(nil)
txid2 := hex.EncodeToString(hashOut)
t.Logf("% x\n", hashOut)
t.Logf("% s\n", txid)
t.Logf("% s\n", txid2)
assert.Equal(t, txid, txid2)
} | explode_data.jsonl/39994 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 235
} | [
2830,
3393,
46254,
98637,
31584,
915,
1155,
353,
8840,
836,
8,
341,
46237,
307,
11,
1848,
1669,
12439,
89237,
31584,
915,
10556,
3782,
90,
16,
2137,
3056,
3782,
90,
16,
3518,
6948,
35699,
1155,
11,
1848,
11,
330,
9408,
24231,
39850,
9... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTrigger_MissingYaml(t *testing.T) {
controller := gomock.NewController(t)
defer controller.Finish()
mockUsers := mock.NewMockUserStore(controller)
mockUsers.EXPECT().Find(noContext, dummyRepo.UserID).Return(dummyUser, nil)
mockConfigService := mock.NewMockConfigService(controller)
mockConfigService.EXPECT().Find(gomock.Any(), gomock.Any()).Return(nil, io.EOF)
triggerer := New(
nil,
mockConfigService,
nil,
nil,
nil,
nil,
nil,
nil,
mockUsers,
nil,
nil,
)
_, err := triggerer.Trigger(noContext, dummyRepo, dummyHook)
if err == nil {
t.Errorf("Expect error when yaml not found")
}
} | explode_data.jsonl/26994 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 255
} | [
2830,
3393,
17939,
1245,
13577,
56,
9467,
1155,
353,
8840,
836,
8,
341,
61615,
1669,
342,
316,
1176,
7121,
2051,
1155,
340,
16867,
6461,
991,
18176,
2822,
77333,
7137,
1669,
7860,
7121,
11571,
1474,
6093,
40845,
340,
77333,
7137,
22402,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestExposeClaims(t *testing.T) {
cases := []struct {
name string
cluster *clusterv1.ManagedCluster
claims []*clusterv1alpha1.ClusterClaim
maxCustomClusterClaims int
validateActions func(t *testing.T, actions []clienttesting.Action)
expectedErr string
}{
{
name: "sync claims into status of the managed cluster",
cluster: testinghelpers.NewJoinedManagedCluster(),
claims: []*clusterv1alpha1.ClusterClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "a",
},
Spec: clusterv1alpha1.ClusterClaimSpec{
Value: "b",
},
},
},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
testinghelpers.AssertActions(t, actions, "get", "patch")
patch := actions[1].(clienttesting.PatchAction).GetPatch()
cluster := &clusterv1.ManagedCluster{}
err := json.Unmarshal(patch, cluster)
if err != nil {
t.Fatal(err)
}
expected := []clusterv1.ManagedClusterClaim{
{
Name: "a",
Value: "b",
},
}
actual := cluster.Status.ClusterClaims
if !reflect.DeepEqual(actual, expected) {
t.Errorf("expected cluster claim %v but got: %v", expected, actual)
}
},
},
{
name: "truncate custom cluster claims",
cluster: testinghelpers.NewJoinedManagedCluster(),
claims: []*clusterv1alpha1.ClusterClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "a",
},
Spec: clusterv1alpha1.ClusterClaimSpec{
Value: "b",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "e",
},
Spec: clusterv1alpha1.ClusterClaimSpec{
Value: "f",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "id.k8s.io",
},
Spec: clusterv1alpha1.ClusterClaimSpec{
Value: "cluster1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "c",
},
Spec: clusterv1alpha1.ClusterClaimSpec{
Value: "d",
},
},
},
maxCustomClusterClaims: 2,
validateActions: func(t *testing.T, actions []clienttesting.Action) {
testinghelpers.AssertActions(t, actions, "get", "patch")
patch := actions[1].(clienttesting.PatchAction).GetPatch()
cluster := &clusterv1.ManagedCluster{}
err := json.Unmarshal(patch, cluster)
if err != nil {
t.Fatal(err)
}
expected := []clusterv1.ManagedClusterClaim{
{
Name: "id.k8s.io",
Value: "cluster1",
},
{
Name: "a",
Value: "b",
},
{
Name: "c",
Value: "d",
},
}
actual := cluster.Status.ClusterClaims
if !reflect.DeepEqual(actual, expected) {
t.Errorf("expected cluster claim %v but got: %v", expected, actual)
}
},
},
{
name: "remove claims from managed cluster",
cluster: newManagedCluster([]clusterv1.ManagedClusterClaim{
{
Name: "a",
Value: "b",
},
}),
validateActions: func(t *testing.T, actions []clienttesting.Action) {
testinghelpers.AssertActions(t, actions, "get", "patch")
patch := actions[1].(clienttesting.PatchAction).GetPatch()
cluster := &clusterv1.ManagedCluster{}
err := json.Unmarshal(patch, cluster)
if err != nil {
t.Fatal(err)
}
actual := cluster.Status.ClusterClaims
if len(actual) > 0 {
t.Errorf("expected no cluster claim but got: %v", actual)
}
},
},
{
name: "sync non-customized-only claims into status of the managed cluster",
cluster: testinghelpers.NewJoinedManagedCluster(),
claims: []*clusterv1alpha1.ClusterClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "a",
Labels: map[string]string{labelCustomizedOnly: ""},
},
Spec: clusterv1alpha1.ClusterClaimSpec{
Value: "b",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "c",
},
Spec: clusterv1alpha1.ClusterClaimSpec{
Value: "d",
},
},
},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
testinghelpers.AssertActions(t, actions, "get", "patch")
patch := actions[1].(clienttesting.PatchAction).GetPatch()
cluster := &clusterv1.ManagedCluster{}
err := json.Unmarshal(patch, cluster)
if err != nil {
t.Fatal(err)
}
expected := []clusterv1.ManagedClusterClaim{
{
Name: "c",
Value: "d",
},
}
actual := cluster.Status.ClusterClaims
if !reflect.DeepEqual(actual, expected) {
t.Errorf("expected cluster claim %v but got: %v", expected, actual)
}
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
if c.cluster != nil {
objects = append(objects, c.cluster)
}
clusterClient := clusterfake.NewSimpleClientset(objects...)
clusterInformerFactory := clusterinformers.NewSharedInformerFactory(clusterClient, time.Minute*10)
if c.cluster != nil {
if err := clusterInformerFactory.Cluster().V1().ManagedClusters().Informer().GetStore().Add(c.cluster); err != nil {
t.Fatal(err)
}
}
for _, claim := range c.claims {
if err := clusterInformerFactory.Cluster().V1alpha1().ClusterClaims().Informer().GetStore().Add(claim); err != nil {
t.Fatal(err)
}
}
if c.maxCustomClusterClaims == 0 {
c.maxCustomClusterClaims = 20
}
ctrl := managedClusterClaimController{
clusterName: testinghelpers.TestManagedClusterName,
maxCustomClusterClaims: c.maxCustomClusterClaims,
hubClusterClient: clusterClient,
hubClusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(),
claimLister: clusterInformerFactory.Cluster().V1alpha1().ClusterClaims().Lister(),
}
syncErr := ctrl.exposeClaims(context.TODO(), testinghelpers.NewFakeSyncContext(t, c.cluster.Name), c.cluster)
testinghelpers.AssertError(t, syncErr, c.expectedErr)
c.validateActions(t, clusterClient.Actions())
})
}
} | explode_data.jsonl/31990 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2791
} | [
2830,
3393,
40073,
51133,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
3056,
1235,
341,
197,
11609,
4293,
914,
198,
197,
197,
18855,
394,
353,
564,
590,
648,
16,
29902,
3279,
28678,
198,
197,
197,
48561,
338,
29838,
564,
590,
648,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestStatefulSetStoragePath(t *testing.T) {
labels := map[string]string{
"testlabel": "testlabelvalue",
}
annotations := map[string]string{
"testannotation": "testannotationvalue",
}
sset, err := makeStatefulSet(&monitoringv1.Alertmanager{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
Annotations: annotations,
},
}, defaultTestConfig, "")
require.NoError(t, err)
reg := strings.Join(sset.Spec.Template.Spec.Containers[0].Args, " ")
for _, k := range sset.Spec.Template.Spec.Containers[0].VolumeMounts {
if k.Name == "config-volume" {
if !strings.Contains(reg, k.MountPath) {
t.Fatal("config-volume Path not configured correctly")
} else {
return
}
}
}
t.Fatal("config-volume not set")
} | explode_data.jsonl/25237 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 293
} | [
2830,
3393,
1397,
1262,
1649,
5793,
1820,
1155,
353,
8840,
836,
8,
341,
95143,
1669,
2415,
14032,
30953,
515,
197,
197,
1,
1944,
1502,
788,
330,
1944,
1502,
957,
756,
197,
532,
197,
39626,
1669,
2415,
14032,
30953,
515,
197,
197,
1,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestReorder(t *testing.T) {
test(t, []testSequence{
{
in: layers.TCP{
SrcPort: 1,
DstPort: 2,
Seq: 1001,
BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3}},
},
want: []Reassembly{},
},
{
in: layers.TCP{
SrcPort: 1,
DstPort: 2,
Seq: 1004,
BaseLayer: layers.BaseLayer{Payload: []byte{2, 2, 3}},
},
want: []Reassembly{},
},
{
in: layers.TCP{
SrcPort: 1,
DstPort: 2,
Seq: 1010,
BaseLayer: layers.BaseLayer{Payload: []byte{4, 2, 3}},
},
want: []Reassembly{},
},
{
in: layers.TCP{
SrcPort: 1,
DstPort: 2,
Seq: 1007,
BaseLayer: layers.BaseLayer{Payload: []byte{3, 2, 3}},
},
want: []Reassembly{
Reassembly{
Skip: -1,
Bytes: []byte{1, 2, 3},
},
Reassembly{
Bytes: []byte{2, 2, 3},
},
Reassembly{
Bytes: []byte{3, 2, 3},
},
Reassembly{
Bytes: []byte{4, 2, 3},
},
},
},
{
in: layers.TCP{
SrcPort: 1,
DstPort: 2,
Seq: 1016,
BaseLayer: layers.BaseLayer{Payload: []byte{2, 2, 3}},
},
want: []Reassembly{},
},
{
in: layers.TCP{
SrcPort: 1,
DstPort: 2,
Seq: 1019,
BaseLayer: layers.BaseLayer{Payload: []byte{3, 2, 3}},
},
want: []Reassembly{},
},
{
in: layers.TCP{
SrcPort: 1,
DstPort: 2,
Seq: 1013,
BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3}},
},
want: []Reassembly{
Reassembly{
Bytes: []byte{1, 2, 3},
},
Reassembly{
Bytes: []byte{2, 2, 3},
},
Reassembly{
Bytes: []byte{3, 2, 3},
},
},
},
})
} | explode_data.jsonl/9650 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1083
} | [
2830,
3393,
693,
1358,
1155,
353,
8840,
836,
8,
972,
18185,
1155,
11,
3056,
1944,
14076,
1666,
197,
197,
1666,
298,
17430,
25,
13617,
836,
7123,
1666,
571,
7568,
1287,
7084,
25,
256,
220,
16,
1871,
571,
10957,
267,
7084,
25,
256,
22... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_InboundIpRule_Status_WhenSerializedToJson_DeserializesAsEqual(t *testing.T) {
t.Parallel()
parameters := gopter.DefaultTestParameters()
parameters.MaxSize = 10
properties := gopter.NewProperties(parameters)
properties.Property(
"Round trip of InboundIpRule_Status via JSON returns original",
prop.ForAll(RunJSONSerializationTestForInboundIpRuleStatus, InboundIpRuleStatusGenerator()))
properties.TestingRun(t, gopter.NewFormatedReporter(true, 240, os.Stdout))
} | explode_data.jsonl/39615 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 160
} | [
2830,
3393,
25972,
10891,
23378,
11337,
36449,
62,
4498,
77521,
78967,
98054,
2848,
4756,
2121,
2993,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
67543,
1669,
728,
73137,
13275,
2271,
9706,
741,
67543,
14535,
1695,
284,
220,
16,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBasicSessions(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vnet := getVirtualNetwork()
sesgen := NewTestSessionGenerator(vnet)
defer sesgen.Close()
bgen := blocksutil.NewBlockGenerator()
block := bgen.Next()
inst := sesgen.Instances(2)
a := inst[0]
b := inst[1]
if err := b.Blockstore().Put(block); err != nil {
t.Fatal(err)
}
sesa := a.Exchange.NewSession(ctx)
blkout, err := sesa.GetBlock(ctx, block.Cid())
if err != nil {
t.Fatal(err)
}
if !blkout.Cid().Equals(block.Cid()) {
t.Fatal("got wrong block")
}
} | explode_data.jsonl/48076 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 243
} | [
2830,
3393,
15944,
59062,
1155,
353,
8840,
836,
8,
341,
20985,
11,
9121,
1669,
2266,
26124,
9269,
5378,
19047,
2398,
16867,
9121,
2822,
5195,
4711,
1669,
633,
33026,
12320,
741,
1903,
288,
4370,
1669,
1532,
2271,
5283,
12561,
3747,
4711,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestDoubleDataPoint_Value(t *testing.T) {
ms := NewDoubleDataPoint()
ms.InitEmpty()
assert.EqualValues(t, float64(0.0), ms.Value())
testValValue := float64(17.13)
ms.SetValue(testValValue)
assert.EqualValues(t, testValValue, ms.Value())
} | explode_data.jsonl/19542 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 96
} | [
2830,
3393,
7378,
1043,
2609,
27867,
1155,
353,
8840,
836,
8,
341,
47691,
1669,
1532,
7378,
1043,
2609,
741,
47691,
26849,
3522,
741,
6948,
12808,
6227,
1155,
11,
2224,
21,
19,
7,
15,
13,
15,
701,
9829,
6167,
2398,
18185,
2208,
1130,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNetworkLinkAddMacVlan(t *testing.T) {
if testing.Short() {
return
}
tl := struct {
name string
mode string
}{
name: "tstVlan",
mode: "private",
}
masterLink := testLink{"tstEth", "dummy"}
addLink(t, masterLink.name, masterLink.linkType)
defer deleteLink(t, masterLink.name)
if err := NetworkLinkAddMacVlan(masterLink.name, tl.name, tl.mode); err != nil {
t.Fatalf("Unable to create %#v MAC VLAN interface: %s", tl, err)
}
readLink(t, tl.name)
} | explode_data.jsonl/76208 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 196
} | [
2830,
3393,
12320,
3939,
2212,
19552,
53,
10715,
1155,
353,
8840,
836,
8,
341,
743,
7497,
55958,
368,
341,
197,
853,
198,
197,
630,
3244,
75,
1669,
2036,
341,
197,
11609,
914,
198,
197,
60247,
914,
198,
197,
59403,
197,
11609,
25,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestListenError(t *testing.T) {
data := "data"
s, server := newServiceAndServer(t)
go server.Start()
l := New(s)
go l.Start()
_, execution, err := server.Execute("log", data)
assert.Nil(t, err)
assert.Equal(t, "error", execution.Key())
var resp errorResponse
assert.Nil(t, execution.Decode(&resp))
assert.Contains(t, "json", resp.Message)
} | explode_data.jsonl/61162 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 138
} | [
2830,
3393,
38714,
1454,
1155,
353,
8840,
836,
8,
341,
8924,
1669,
330,
691,
1837,
1903,
11,
3538,
1669,
501,
1860,
3036,
5475,
1155,
340,
30680,
3538,
12101,
2822,
8810,
1669,
1532,
1141,
340,
30680,
326,
12101,
2822,
197,
6878,
11320,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestChunkSize(t *testing.T) {
for _, enc := range testEncoding {
t.Run(enc.String(), func(t *testing.T) {
c := NewMemChunk(enc, testBlockSize, testTargetSize)
inserted := fillChunk(c)
b, err := c.Bytes()
if err != nil {
t.Fatal(err)
}
t.Log("Chunk size", humanize.Bytes(uint64(len(b))))
t.Log("characters ", inserted)
})
}
} | explode_data.jsonl/15703 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 163
} | [
2830,
3393,
28304,
1695,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
3209,
1669,
2088,
1273,
14690,
341,
197,
3244,
16708,
66941,
6431,
1507,
2915,
1155,
353,
8840,
836,
8,
341,
298,
1444,
1669,
1532,
18816,
28304,
66941,
11,
1273,
89932,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestPkiService_SignClientCertificate(t *testing.T) {
mc := InitMockEnvironment(t)
defer mc.Close()
cn := "test"
altNames := models.AltNames{}
certId := "132"
certPem := []byte("pem")
ps, err := NewPKIService(mc.conf)
assert.NoError(t, err)
rootId := "12345678"
// good case
mc.pki.EXPECT().GetRootCertId().Return(rootId).Times(3)
mc.pki.EXPECT().CreateClientCert(gomock.Any(), rootId).Return(certId, nil).Times(1)
mc.pki.EXPECT().GetClientCert(certId).Return(certPem, nil).Times(1)
res, err := ps.SignClientCertificate(cn, altNames)
assert.NoError(t, err)
assert.Equal(t, certPem, res.CertPEM)
//bad case 1
mc.pki.EXPECT().CreateClientCert(gomock.Any(), rootId).Return("", os.ErrNotExist).Times(1)
res, err = ps.SignClientCertificate(cn, altNames)
assert.Error(t, err)
//bad case 2
mc.pki.EXPECT().CreateClientCert(gomock.Any(), rootId).Return(certId, nil).Times(1)
mc.pki.EXPECT().GetClientCert(certId).Return(nil, os.ErrNotExist).Times(1)
res, err = ps.SignClientCertificate(cn, altNames)
assert.Error(t, err)
} | explode_data.jsonl/66998 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 424
} | [
2830,
3393,
47,
6642,
1860,
1098,
622,
2959,
33202,
1155,
353,
8840,
836,
8,
341,
97662,
1669,
15690,
11571,
12723,
1155,
340,
16867,
19223,
10421,
2822,
1444,
77,
1669,
330,
1944,
698,
197,
3145,
7980,
1669,
4119,
875,
4832,
7980,
1609... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestScannerStats(t *testing.T) {
defer leaktest.AfterTest(t)
const count = 3
iter := newTestIterator(count)
q := &testQueue{}
stopper := util.NewStopper()
defer stopper.Stop()
s := newRangeScanner(1*time.Millisecond, 0, iter, nil)
s.AddQueues(q)
mc := hlc.NewManualClock(0)
clock := hlc.NewClock(mc.UnixNano)
// At start, scanner stats should be blank for MVCC, but have accurate number of ranges.
if rc := s.Stats().RangeCount; rc != count {
t.Errorf("range count expected %d; got %d", count, rc)
}
if vb := s.Stats().MVCC.ValBytes; vb != 0 {
t.Errorf("value bytes expected %d; got %d", 0, vb)
}
s.Start(clock, stopper)
// We expect a full run to accumulate stats from all ranges.
if err := util.IsTrueWithin(func() bool {
if rc := s.Stats().RangeCount; rc != count {
return false
}
if vb := s.Stats().MVCC.ValBytes; vb != count*2 {
return false
}
return true
}, 100*time.Millisecond); err != nil {
t.Error(err)
}
} | explode_data.jsonl/39878 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 383
} | [
2830,
3393,
31002,
16635,
1155,
353,
8840,
836,
8,
341,
16867,
23352,
1944,
36892,
2271,
1155,
340,
4777,
1760,
284,
220,
18,
198,
79924,
1669,
501,
2271,
11951,
11512,
340,
18534,
1669,
609,
1944,
7554,
16094,
62644,
712,
1669,
4094,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestChannelIdentitySpec(t *testing.T) {
s := &Channel{
Spec: ChannelSpec{
IdentitySpec: duckv1beta1.IdentitySpec{
ServiceAccountName: "test",
},
},
}
want := "test"
got := s.IdentitySpec().ServiceAccountName
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("failed to get expected (-want, +got) = %v", diff)
}
} | explode_data.jsonl/24164 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 138
} | [
2830,
3393,
9629,
18558,
8327,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
609,
9629,
515,
197,
7568,
992,
25,
13434,
8327,
515,
298,
197,
18558,
8327,
25,
35985,
85,
16,
19127,
16,
24423,
8327,
515,
571,
91619,
7365,
675,
25,
330,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestEncode(t *testing.T) {
test := []struct {
typ DataCoding
text []byte
want []byte
}{
{Latin1Type, []byte("áéíóú moço"), []byte("\xe1\xe9\xed\xf3\xfa mo\xe7o")},
{UCS2Type, []byte("áéíóú moço"), []byte("\x00\xe1\x00\xe9\x00\xed\x00\xf3\x00\xfa\x00 \x00m\x00o\x00\xe7\x00o")},
}
for _, tc := range test {
have := Encode(tc.typ, tc.text)
if !bytes.Equal(tc.want, have) {
t.Fatalf("unexpected text for %#x:\nwant: %q\nhave: %q",
tc.typ, tc.want, have)
}
}
} | explode_data.jsonl/72726 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 263
} | [
2830,
3393,
32535,
1155,
353,
8840,
836,
8,
341,
18185,
1669,
3056,
1235,
341,
197,
25314,
220,
2885,
77513,
198,
197,
15425,
3056,
3782,
198,
197,
50780,
3056,
3782,
198,
197,
59403,
197,
197,
90,
41547,
16,
929,
11,
3056,
3782,
445,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestDefaultConfig(t *testing.T) {
assert := assert.New(t)
// set up some defaults
cfg := DefaultConfig()
assert.NotNil(cfg.P2P)
assert.NotNil(cfg.Mempool)
assert.NotNil(cfg.Consensus)
// check the root dir stuff...
cfg.SetRoot("/foo")
cfg.Genesis = "bar"
cfg.DBPath = "/opt/data"
assert.Equal("/foo/bar", cfg.GenesisFile())
assert.Equal("/opt/data", cfg.DBDir())
} | explode_data.jsonl/66912 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 156
} | [
2830,
3393,
3675,
2648,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
692,
197,
322,
738,
705,
1045,
16674,
198,
50286,
1669,
7899,
2648,
741,
6948,
93882,
28272,
1069,
17,
47,
340,
6948,
93882,
28272,
1321,
3262,
1749,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_ddosxDomainWAFAdvancedRuleDelete(t *testing.T) {
t.Run("Success", func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
service := mocks.NewMockDDoSXService(mockCtrl)
service.EXPECT().DeleteDomainWAFAdvancedRule("testdomain1.co.uk", "00000000-0000-0000-0000-000000000000").Return(nil).Times(1)
ddosxDomainWAFAdvancedRuleDelete(service, &cobra.Command{}, []string{"testdomain1.co.uk", "00000000-0000-0000-0000-000000000000"})
})
t.Run("DeleteDomainWAFAdvancedRule_OutputsError", func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
service := mocks.NewMockDDoSXService(mockCtrl)
service.EXPECT().DeleteDomainWAFAdvancedRule("testdomain1.co.uk", "00000000-0000-0000-0000-000000000000").Return(errors.New("test error"))
test_output.AssertErrorOutput(t, "Error removing domain WAF advanced rule [00000000-0000-0000-0000-000000000000]: test error\n", func() {
ddosxDomainWAFAdvancedRuleDelete(service, &cobra.Command{}, []string{"testdomain1.co.uk", "00000000-0000-0000-0000-000000000000"})
})
})
} | explode_data.jsonl/43038 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 391
} | [
2830,
3393,
43174,
436,
87,
13636,
54,
8276,
35457,
11337,
6435,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
7188,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
77333,
15001,
1669,
342,
316,
1176,
7121,
2051,
1155,
340,
197,
16867... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCreateService(t *testing.T) {
// An example service
service := kobject.ServiceConfig{
ContainerName: "name",
Image: "image",
Environment: []kobject.EnvVar{kobject.EnvVar{Name: "env", Value: "value"}},
Port: []kobject.Ports{kobject.Ports{HostPort: 123, ContainerPort: 456, Protocol: string(corev1.ProtocolTCP)}},
Command: []string{"cmd"},
WorkingDir: "dir",
Args: []string{"arg1", "arg2"},
VolList: []string{"/tmp/volume"},
Network: []string{"network1", "network2"}, // not supported
Labels: nil,
Annotations: map[string]string{"abc": "def"},
CPUQuota: 1, // not supported
CapAdd: []string{"cap_add"}, // not supported
CapDrop: []string{"cap_drop"}, // not supported
Expose: []string{"expose"}, // not supported
Privileged: true,
Restart: "always",
}
// An example object generated via k8s runtime.Objects()
komposeObject := kobject.KomposeObject{
ServiceConfigs: map[string]kobject.ServiceConfig{"app": service},
}
k := Kubernetes{}
objects, err := k.Transform(komposeObject, kobject.ConvertOptions{CreateD: true, Replicas: 3})
if err != nil {
t.Error(errors.Wrap(err, "k.Transform failed"))
}
// Test the creation of the service
svc := k.CreateService("foo", service, objects)
if svc.Spec.Ports[0].Port != 123 {
t.Errorf("Expected port 123 upon conversion, actual %d", svc.Spec.Ports[0].Port)
}
} | explode_data.jsonl/58953 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 608
} | [
2830,
3393,
4021,
1860,
1155,
353,
8840,
836,
8,
341,
197,
322,
1527,
3110,
2473,
198,
52934,
1669,
595,
1700,
13860,
2648,
515,
197,
197,
4502,
675,
25,
330,
606,
756,
197,
53397,
25,
260,
330,
1805,
756,
197,
197,
12723,
25,
256,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestHarness(t *testing.T) {
// NOTE(axw) we do not support binary propagation, but we patch in
// basic support *for the tests only* so we can check compatibility
// with the HTTP and text formats.
binaryInject = func(w io.Writer, traceContext apm.TraceContext) error {
return json.NewEncoder(w).Encode(apmhttp.FormatTraceparentHeader(traceContext))
}
binaryExtract = func(r io.Reader) (apm.TraceContext, error) {
var headerValue string
if err := json.NewDecoder(r).Decode(&headerValue); err != nil {
return apm.TraceContext{}, err
}
return apmhttp.ParseTraceparentHeader(headerValue)
}
defer func() {
binaryInject = binaryInjectUnsupported
binaryExtract = binaryExtractUnsupported
}()
newTracer := func() (opentracing.Tracer, func()) {
apmtracer, err := apm.NewTracer("transporttest", "")
if err != nil {
panic(err)
}
apmtracer.Transport = transporttest.Discard
tracer := New(WithTracer(apmtracer))
return tracer, apmtracer.Close
}
harness.RunAPIChecks(t, newTracer,
harness.CheckExtract(true),
harness.CheckInject(true),
harness.UseProbe(harnessAPIProbe{}),
)
} | explode_data.jsonl/50552 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 419
} | [
2830,
3393,
74248,
1155,
353,
8840,
836,
8,
341,
197,
322,
16743,
41922,
86,
8,
582,
653,
537,
1824,
7868,
53643,
11,
714,
582,
10900,
304,
198,
197,
322,
6770,
1824,
353,
1958,
279,
7032,
1172,
9,
773,
582,
646,
1779,
24748,
198,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestImageSize4096(t *testing.T) {
src, _ := NewImage(4096, 4096, FilterNearest)
dst, _ := NewImage(4096, 4096, FilterNearest)
pix := make([]byte, 4096*4096*4)
for i := 0; i < 4096; i++ {
j := 4095
idx := 4 * (i + j*4096)
pix[idx] = uint8(i + j)
pix[idx+1] = uint8((i + j) >> 8)
pix[idx+2] = uint8((i + j) >> 16)
pix[idx+3] = 0xff
}
for j := 0; j < 4096; j++ {
i := 4095
idx := 4 * (i + j*4096)
pix[idx] = uint8(i + j)
pix[idx+1] = uint8((i + j) >> 8)
pix[idx+2] = uint8((i + j) >> 16)
pix[idx+3] = 0xff
}
src.ReplacePixels(pix)
dst.DrawImage(src, nil)
for i := 4095; i < 4096; i++ {
j := 4095
got := dst.At(i, j).(color.RGBA)
want := color.RGBA{uint8(i + j), uint8((i + j) >> 8), uint8((i + j) >> 16), 0xff}
if got != want {
t.Errorf("At(%d, %d): got: %#v, want: %#v", i, j, got, want)
}
}
for j := 4095; j < 4096; j++ {
i := 4095
got := dst.At(i, j).(color.RGBA)
want := color.RGBA{uint8(i + j), uint8((i + j) >> 8), uint8((i + j) >> 16), 0xff}
if got != want {
t.Errorf("At(%d, %d): got: %#v, want: %#v", i, j, got, want)
}
}
} | explode_data.jsonl/10905 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 590
} | [
2830,
3393,
1906,
1695,
19,
15,
24,
21,
1155,
353,
8840,
836,
8,
341,
41144,
11,
716,
1669,
1532,
1906,
7,
19,
15,
24,
21,
11,
220,
19,
15,
24,
21,
11,
12339,
8813,
15432,
340,
52051,
11,
716,
1669,
1532,
1906,
7,
19,
15,
24,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestBackoff(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mw := NewMailWorker()
go func(ctx context.Context) {
mw.Start(ctx)
}(ctx)
expectedError := &textproto.Error{
Code: 400,
Msg: "Temporary error",
}
sender := newMockErrorSender(expectedError)
dialer := newMockDialer()
dialer.setDial(func() (Sender, error) {
return sender, nil
})
messages := generateMessages(dialer)
// Send the campaign
mw.Queue(MailJob{Bcc: false, Mails: messages})
got := []*mockMessage{}
for message := range sender.messageChan {
got = append(got, message)
}
// Check that we only sent one message
expectedCount := 1
if len(got) != expectedCount {
t.Fatalf("Unexpected number of messages received. Expected %d Got %d", len(got), expectedCount)
}
// Check that it's the correct message
originalFrom := messages[1].(*mockMessage).from
if got[0].from != originalFrom {
t.Fatalf("Invalid message received. Expected %s, Got %s", originalFrom, got[0].from)
}
// Check that the first message performed a backoff
backoffCount := messages[0].(*mockMessage).backoffCount
if backoffCount != expectedCount {
t.Fatalf("Did not receive expected backoff. Got backoffCount %d, Expected %d", backoffCount, expectedCount)
}
// Check that there was a reset performed on the sender
if sender.resetCount != expectedCount {
t.Fatalf("Did not receive expected reset. Got resetCount %d, expected %d", sender.resetCount, expectedCount)
}
} | explode_data.jsonl/55821 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 499
} | [
2830,
3393,
3707,
1847,
1155,
353,
8840,
836,
8,
341,
20985,
11,
9121,
1669,
2266,
26124,
9269,
5378,
19047,
2398,
16867,
9121,
2822,
2109,
86,
1669,
1532,
16702,
21936,
741,
30680,
2915,
7502,
2266,
9328,
8,
341,
197,
2109,
86,
12101,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSelectDual(t *testing.T) {
executor, sbc1, _, lookup := createLegacyExecutorEnv()
_, err := executorExec(executor, "select @@aa.bb from dual", nil)
require.NoError(t, err)
wantQueries := []*querypb.BoundQuery{{
Sql: "select @@aa.bb from dual",
BindVariables: map[string]*querypb.BindVariable{},
}}
if !reflect.DeepEqual(sbc1.Queries, wantQueries) {
t.Errorf("sbc1.Queries: %+v, want %+v\n", sbc1.Queries, wantQueries)
}
_, err = executorExec(executor, "select @@aa.bb from TestUnsharded.dual", nil)
require.NoError(t, err)
if !reflect.DeepEqual(lookup.Queries, wantQueries) {
t.Errorf("sbc1.Queries: %+v, want %+v\n", sbc1.Queries, wantQueries)
}
} | explode_data.jsonl/67400 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 295
} | [
2830,
3393,
3379,
85074,
1155,
353,
8840,
836,
8,
341,
67328,
4831,
11,
7898,
66,
16,
11,
8358,
18615,
1669,
1855,
77415,
25255,
14359,
2822,
197,
6878,
1848,
1669,
31558,
10216,
46896,
4831,
11,
330,
1742,
22307,
5305,
94459,
504,
1857... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestAssessMetricStatusInFlightMeasurement(t *testing.T) {
// in-flight measurement
metric := v1alpha1.Metric{
Name: "success-rate",
}
result := v1alpha1.MetricResult{
Measurements: []v1alpha1.Measurement{
{
Value: "99",
Phase: v1alpha1.AnalysisPhaseSuccessful,
StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))),
FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))),
},
{
Value: "99",
Phase: v1alpha1.AnalysisPhaseRunning,
StartedAt: timePtr(metav1.NewTime(time.Now())),
},
},
}
assert.Equal(t, v1alpha1.AnalysisPhaseRunning, assessMetricStatus(metric, result, false))
assert.Equal(t, v1alpha1.AnalysisPhaseRunning, assessMetricStatus(metric, result, true))
} | explode_data.jsonl/75817 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 329
} | [
2830,
3393,
5615,
433,
54310,
2522,
641,
45305,
76548,
1155,
353,
8840,
836,
8,
341,
197,
322,
304,
76203,
18662,
198,
2109,
16340,
1669,
348,
16,
7141,
16,
1321,
16340,
515,
197,
21297,
25,
330,
5630,
43026,
756,
197,
532,
9559,
1669... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGrpc_AddPushSubscribe(t *testing.T) {
cfg := types.NewChain33Config(types.GetDefaultCfgstring())
g := Grpc{}
qapi = new(mocks.QueueProtocolAPI)
qapi.On("GetConfig", mock.Anything).Return(cfg)
g.cli.QueueProtocolAPI = qapi
qapi.On("AddPushSubscribe", &types.PushSubscribeReq{}).Return(&types.ReplySubscribePush{IsOk: false}, types.ErrInvalidParam)
_, err := g.AddPushSubscribe(getOkCtx(), &types.PushSubscribeReq{})
assert.NotNil(t, err)
} | explode_data.jsonl/350 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 171
} | [
2830,
3393,
6464,
3992,
21346,
16644,
28573,
1155,
353,
8840,
836,
8,
341,
50286,
1669,
4494,
7121,
18837,
18,
18,
2648,
52613,
2234,
3675,
42467,
917,
2398,
3174,
1669,
2825,
3992,
16094,
18534,
2068,
284,
501,
1255,
25183,
50251,
20689,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestImageRef_WebP__ReducedEffort(t *testing.T) {
Startup(nil)
srcBytes, err := ioutil.ReadFile(resources + "webp+alpha.webp")
require.NoError(t, err)
src := bytes.NewReader(srcBytes)
img, err := NewImageFromReader(src)
require.NoError(t, err)
require.NotNil(t, img)
defer img.Close()
params := NewDefaultWEBPExportParams()
params.Effort = 2
buf, _, err := img.Export(params)
assert.NoError(t, err)
assert.Equal(t, 48850, len(buf))
} | explode_data.jsonl/38808 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 185
} | [
2830,
3393,
1906,
3945,
62,
5981,
47,
563,
16609,
291,
46588,
371,
1155,
353,
8840,
836,
8,
341,
197,
39076,
27907,
692,
41144,
7078,
11,
1848,
1669,
43144,
78976,
52607,
488,
330,
2911,
79,
10,
7141,
6411,
79,
1138,
17957,
35699,
115... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_BeginAuth(t *testing.T) {
t.Parallel()
a := assert.New(t)
provider := googleProvider()
session, err := provider.BeginAuth("test_state")
s := session.(*google.Session)
a.NoError(err)
a.Contains(s.AuthURL, "accounts.google.com/o/oauth2/auth")
a.Contains(s.AuthURL, fmt.Sprintf("client_id=%s", domain.Env.GoogleKey))
a.Contains(s.AuthURL, "state=test_state")
a.Contains(s.AuthURL, "scope=email")
} | explode_data.jsonl/19856 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 171
} | [
2830,
3393,
93447,
5087,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
11323,
1669,
2060,
7121,
1155,
692,
197,
19979,
1669,
11558,
5179,
741,
25054,
11,
1848,
1669,
9109,
28467,
5087,
445,
1944,
4387,
1138,
1903,
1669,
3797,
41... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInstructionIteratorInvalid(t *testing.T) {
cnt := 0
script, _ := hex.DecodeString("6100")
it := NewInstructionIterator(script)
for it.Next() {
cnt++
}
if it.Error() == nil {
t.Errorf("Expected an error, but got %v instead.", cnt)
}
} | explode_data.jsonl/21935 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 100
} | [
2830,
3393,
16664,
11951,
7928,
1155,
353,
8840,
836,
8,
341,
60553,
1669,
220,
15,
198,
86956,
11,
716,
1669,
12371,
56372,
703,
445,
21,
16,
15,
15,
5130,
23374,
1669,
1532,
16664,
11951,
42795,
340,
2023,
432,
18501,
368,
341,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestGenManTree(t *testing.T) {
c := &cobra.Command{Use: "do [OPTIONS] arg1 arg2"}
header := &GenManHeader{Section: "2"}
tmpdir, err := ioutil.TempDir("", "test-gen-man-tree")
if err != nil {
t.Fatalf("Failed to create tmpdir: %s", err.Error())
}
defer os.RemoveAll(tmpdir)
if err := GenManTree(c, header, tmpdir); err != nil {
t.Fatalf("GenManTree failed: %s", err.Error())
}
if _, err := os.Stat(filepath.Join(tmpdir, "do.2")); err != nil {
t.Fatalf("Expected file 'do.2' to exist")
}
if header.Title != "" {
t.Fatalf("Expected header.Title to be unmodified")
}
} | explode_data.jsonl/21960 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 246
} | [
2830,
3393,
9967,
1658,
6533,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
609,
59410,
12714,
90,
10253,
25,
330,
2982,
508,
56929,
60,
1392,
16,
1392,
17,
16707,
20883,
1669,
609,
9967,
1658,
4047,
90,
9620,
25,
330,
17,
16707,
20082,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestConfiguration_FindCommandByName_OSSpecific_Templating(t *testing.T) {
tplName := stringP("base1")
commandName := stringP("command")
baseTplName := stringP("base")
baseCommand := stringP("base-cmd")
baseEntryPoint := stringP("base-entry-point")
windowsCommand := stringP("windows-cmd")
linuxCommand := stringP("linux-cmd")
template1 := CommandDefinition{Name: baseTplName, Command: baseCommand, EntryPoint: baseEntryPoint}
template2 := CommandDefinition{Name: tplName, Template: baseTplName, OS: stringP("windows"), Command: windowsCommand}
template3 := CommandDefinition{Name: tplName, Template: baseTplName, OS: stringP("linux"), Command: linuxCommand}
command := CommandDefinition{Name: commandName, Template: tplName, OS: stringP("linux")}
cfg := Configuration{
osNameMatcher: fakeOSMatcher("linux"),
Command: []CommandDefinition{
template1,
template2,
template3,
command,
},
}
commandDef, err := cfg.FindCommandByName("command")
if err != nil {
t.Fatalf("Did not expect cgf.FindCommandByName to return an error, but got: %v", err)
}
assert.Equal(t, CommandDefinition{
OS: nil,
EntryPoint: baseEntryPoint,
Command: linuxCommand,
Name: commandName,
}, commandDef)
} | explode_data.jsonl/17912 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 420
} | [
2830,
3393,
7688,
95245,
4062,
16898,
2232,
1220,
15564,
1139,
25892,
1095,
1155,
353,
8840,
836,
8,
341,
3244,
500,
675,
1669,
914,
47,
445,
3152,
16,
1138,
45566,
675,
1669,
914,
47,
445,
5631,
1138,
24195,
87137,
675,
1669,
914,
47... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestEncodeQuotedMapKeys(t *testing.T) {
var buf bytes.Buffer
if err := NewEncoder(&buf).QuoteMapKeys(true).Encode(mapsTestData); err != nil {
t.Fatal(err)
}
result := buf.Bytes()
expected := mapsTestToml
if !bytes.Equal(result, expected) {
t.Errorf("Bad maps marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result)
}
} | explode_data.jsonl/46328 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 143
} | [
2830,
3393,
32535,
2183,
9253,
2227,
8850,
1155,
353,
8840,
836,
8,
341,
2405,
6607,
5820,
22622,
198,
743,
1848,
1669,
1532,
19921,
2099,
5909,
568,
19466,
2227,
8850,
3715,
568,
32535,
1255,
2625,
83920,
1215,
1848,
961,
2092,
341,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestExpressionLessThanEqualTo(t *testing.T) {
x := NewVariable("x")
solver := NewSolver()
err := solver.AddConstraint(NewExpression(100).LessThanOrEqualToVariable(x))
assert.NoError(t, err)
solver.UpdateVariables()
assert.True(t, 100 <= x.Value)
err = solver.AddConstraint(x.EqualsFloat(110))
assert.NoError(t, err)
solver.UpdateVariables()
assert.InDelta(t, x.Value, 110, Epsilon)
} | explode_data.jsonl/36236 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 159
} | [
2830,
3393,
9595,
27451,
26067,
22759,
1155,
353,
8840,
836,
8,
341,
10225,
1669,
1532,
7827,
445,
87,
1138,
1903,
7921,
1669,
1532,
63830,
741,
9859,
1669,
28961,
1904,
17890,
35063,
9595,
7,
16,
15,
15,
568,
27451,
89387,
7827,
2075,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetUserByEmail(t *testing.T) {
th := Setup(t)
defer th.TearDown()
user := th.CreateUser()
userWithSlash, _, err := th.SystemAdminClient.CreateUser(&model.User{
Email: "email/with/slashes@example.com",
Username: GenerateTestUsername(),
Password: "Pa$$word11",
})
require.NoError(t, err)
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PrivacySettings.ShowEmailAddress = true
*cfg.PrivacySettings.ShowFullName = true
})
th.TestForAllClients(t, func(t *testing.T, client *model.Client4) {
t.Run("should be able to get another user by email", func(t *testing.T) {
ruser, _, err := client.GetUserByEmail(user.Email, "")
require.NoError(t, err)
CheckUserSanitization(t, ruser)
require.Equal(t, user.Email, ruser.Email)
})
t.Run("Get user with a / character in the email", func(t *testing.T) {
ruser, _, err := client.GetUserByEmail(userWithSlash.Email, "")
require.NoError(t, err)
require.Equal(t, ruser.Id, userWithSlash.Id)
})
t.Run("should return not modified when provided with a matching etag", func(t *testing.T) {
_, resp, err := client.GetUserByEmail(user.Email, "")
require.NoError(t, err)
ruser, resp, _ := client.GetUserByEmail(user.Email, resp.Etag)
CheckEtag(t, ruser, resp)
})
t.Run("should return bad request when given an invalid email", func(t *testing.T) {
_, resp, err := client.GetUserByEmail(GenerateTestUsername(), "")
require.Error(t, err)
CheckBadRequestStatus(t, resp)
})
t.Run("should return 404 when given a non-existent email", func(t *testing.T) {
_, resp, err := client.GetUserByEmail(th.GenerateTestEmail(), "")
require.Error(t, err)
CheckNotFoundStatus(t, resp)
})
})
t.Run("should sanitize full name for non-admin based on privacy settings", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PrivacySettings.ShowEmailAddress = true
*cfg.PrivacySettings.ShowFullName = false
})
ruser, _, err := th.Client.GetUserByEmail(user.Email, "")
require.NoError(t, err)
assert.Equal(t, "", ruser.FirstName, "first name should be blank")
assert.Equal(t, "", ruser.LastName, "last name should be blank")
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PrivacySettings.ShowFullName = true
})
ruser, _, err = th.Client.GetUserByEmail(user.Email, "")
require.NoError(t, err)
assert.NotEqual(t, "", ruser.FirstName, "first name should be set")
assert.NotEqual(t, "", ruser.LastName, "last name should be set")
})
t.Run("should return forbidden for non-admin when privacy settings hide email", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PrivacySettings.ShowEmailAddress = false
})
_, resp, err := th.Client.GetUserByEmail(user.Email, "")
require.Error(t, err)
CheckForbiddenStatus(t, resp)
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PrivacySettings.ShowEmailAddress = true
})
ruser, _, err := th.Client.GetUserByEmail(user.Email, "")
require.NoError(t, err)
assert.Equal(t, user.Email, ruser.Email, "email should be set")
})
th.TestForSystemAdminAndLocal(t, func(t *testing.T, client *model.Client4) {
t.Run("should not sanitize full name for admin, regardless of privacy settings", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PrivacySettings.ShowEmailAddress = true
*cfg.PrivacySettings.ShowFullName = false
})
ruser, _, err := client.GetUserByEmail(user.Email, "")
require.NoError(t, err)
assert.NotEqual(t, "", ruser.FirstName, "first name should be set")
assert.NotEqual(t, "", ruser.LastName, "last name should be set")
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PrivacySettings.ShowFullName = true
})
ruser, _, err = client.GetUserByEmail(user.Email, "")
require.NoError(t, err)
assert.NotEqual(t, "", ruser.FirstName, "first name should be set")
assert.NotEqual(t, "", ruser.LastName, "last name should be set")
})
t.Run("should always return email for admin, regardless of privacy settings", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PrivacySettings.ShowEmailAddress = false
})
ruser, _, err := client.GetUserByEmail(user.Email, "")
require.NoError(t, err)
assert.Equal(t, user.Email, ruser.Email, "email should be set")
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PrivacySettings.ShowEmailAddress = true
})
ruser, _, err = client.GetUserByEmail(user.Email, "")
require.NoError(t, err)
assert.Equal(t, user.Email, ruser.Email, "email should be set")
})
})
} | explode_data.jsonl/47494 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1774
} | [
2830,
3393,
1949,
1474,
87197,
1155,
353,
8840,
836,
8,
341,
70479,
1669,
18626,
1155,
340,
16867,
270,
836,
682,
4454,
2822,
19060,
1669,
270,
7251,
1474,
741,
19060,
2354,
88004,
11,
8358,
1848,
1669,
270,
16620,
7210,
2959,
7251,
147... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTeamsService_AddTeamRepoBySlug_noAccess(t *testing.T) {
client, mux, _, teardown := setup()
defer teardown()
mux.HandleFunc("/orgs/org/teams/slug/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, "PUT")
w.WriteHeader(http.StatusUnprocessableEntity)
})
ctx := context.Background()
_, err := client.Teams.AddTeamRepoBySlug(ctx, "org", "slug", "owner", "repo", nil)
if err == nil {
t.Errorf("Expcted error to be returned")
}
} | explode_data.jsonl/4537 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 195
} | [
2830,
3393,
60669,
1860,
21346,
14597,
25243,
1359,
54968,
6536,
6054,
1155,
353,
8840,
836,
8,
341,
25291,
11,
59807,
11,
8358,
49304,
1669,
6505,
741,
16867,
49304,
2822,
2109,
2200,
63623,
4283,
1775,
82,
41361,
14,
38496,
2687,
43213,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestJobsRunner(t *testing.T) {
c := newTestClient(t)
testCases := []*struct {
dirname string
wantErr bool
}{
{
"testdata/goodjobs01/*.yml",
false,
},
{
"testdata/badjobs01/*.yml",
true,
},
}
for _, tc := range testCases {
if err := c.RunJobsInFolder(tc.dirname); tc.wantErr != (err != nil) {
t.Errorf("got err: %v, wantErr: %v", err, tc.wantErr)
}
}
} | explode_data.jsonl/35691 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 194
} | [
2830,
3393,
40667,
19486,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
501,
2271,
2959,
1155,
340,
18185,
37302,
1669,
29838,
1235,
341,
197,
48532,
606,
914,
198,
197,
50780,
7747,
1807,
198,
197,
59403,
197,
197,
515,
298,
197,
1,
9242... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestSetTiFlashConfigDefault(t *testing.T) {
g := NewGomegaWithT(t)
type testcase struct {
name string
config v1alpha1.TiFlashConfig
expect v1alpha1.TiFlashConfig
}
tests := []*testcase{
{
name: "nil config",
config: v1alpha1.TiFlashConfig{},
expect: defaultTiFlashConfig,
},
{
name: "custom config",
config: customTiFlashConfig,
expect: customTiFlashConfig,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
setTiFlashConfigDefault(&test.config, "test", "test")
g.Expect(test.config).To(Equal(test.expect))
})
}
} | explode_data.jsonl/76414 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 255
} | [
2830,
3393,
1649,
45351,
23876,
2648,
3675,
1155,
353,
8840,
836,
8,
341,
3174,
1669,
1532,
38,
32696,
2354,
51,
1155,
692,
13158,
70080,
2036,
341,
197,
11609,
256,
914,
198,
197,
25873,
348,
16,
7141,
16,
836,
72,
23876,
2648,
198,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestManifestRewrite(t *testing.T) {
dir, err := ioutil.TempDir("", "badger-test")
require.NoError(t, err)
defer os.RemoveAll(dir)
deletionsThreshold := 10
mf, m, err := helpOpenOrCreateManifestFile(dir, false, deletionsThreshold)
defer func() {
if mf != nil {
mf.close()
}
}()
require.NoError(t, err)
require.Equal(t, 0, m.Creations)
require.Equal(t, 0, m.Deletions)
err = mf.addChanges([]*pb.ManifestChange{
newCreateChange(0, 0, nil),
})
require.NoError(t, err)
for i := uint64(0); i < uint64(deletionsThreshold*3); i++ {
ch := []*pb.ManifestChange{
newCreateChange(i+1, 0, nil),
newDeleteChange(i),
}
err := mf.addChanges(ch)
require.NoError(t, err)
}
err = mf.close()
require.NoError(t, err)
mf = nil
mf, m, err = helpOpenOrCreateManifestFile(dir, false, deletionsThreshold)
require.NoError(t, err)
require.Equal(t, map[uint64]TableManifest{
uint64(deletionsThreshold * 3): {Level: 0, Checksum: []byte{}},
}, m.Tables)
} | explode_data.jsonl/53657 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 428
} | [
2830,
3393,
38495,
58465,
1247,
1155,
353,
8840,
836,
8,
341,
48532,
11,
1848,
1669,
43144,
65009,
6184,
19814,
330,
13855,
1389,
16839,
1138,
17957,
35699,
1155,
11,
1848,
340,
16867,
2643,
84427,
14161,
340,
58351,
1149,
908,
37841,
166... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestRuleTooManyKeys(t *testing.T) {
common.Log.Debug("Entering function: %s", common.GetFunctionName())
sqls := []string{
"create table tbl ( a char(10), b int, primary key (`a`)) engine=InnoDB;",
"create table tbl ( a varchar(64) not null, b int, PRIMARY KEY (`a`), key `idx_a_b` (`a`,`b`)) engine=InnoDB",
}
for _, sql := range sqls {
q, err := NewQuery4Audit(sql)
if err == nil {
common.Config.MaxIdxCount = 0
rule := q.RuleTooManyKeys()
if rule.Item != "KEY.005" {
t.Error("Rule not match:", rule.Item, "Expect : KEY.005")
}
} else {
t.Error("sqlparser.Parse Error:", err)
}
}
common.Log.Debug("Exiting function: %s", common.GetFunctionName())
} | explode_data.jsonl/76829 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 292
} | [
2830,
3393,
11337,
31246,
8441,
8850,
1155,
353,
8840,
836,
8,
341,
83825,
5247,
20345,
445,
82867,
729,
25,
1018,
82,
497,
4185,
2234,
5152,
675,
2398,
30633,
82,
1669,
3056,
917,
515,
197,
197,
1,
3182,
1965,
21173,
320,
264,
1161,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestTruncateMessages(t *testing.T) {
config := tabletenv.NewDefaultConfig()
config.TerseErrors = false
// Sanitize the log messages, which means that the bind vars are omitted
config.SanitizeLogMessages = true
tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{})
tl := newTestLogger()
defer tl.Close()
*sqlparser.TruncateErrLen = 52
sql := "select * from test_table where xyz = :vtg1 order by abc desc"
sqlErr := mysql.NewSQLError(10, "HY000", "sensitive message")
sqlErr.Query = "select * from test_table where xyz = 'this is kinda long eh'"
err := tsv.convertAndLogError(
ctx,
sql,
map[string]*querypb.BindVariable{"vtg1": sqltypes.StringBindVariable("this is kinda long eh")},
sqlErr,
nil,
)
// Error not truncated
wantErr := "sensitive message (errno 10) (sqlstate HY000): Sql: \"select * from test_table where xyz = :vtg1 order by abc desc\", BindVars: {vtg1: \"type:VARCHAR value:\\\"this is kinda long eh\\\"\"}"
if err == nil || err.Error() != wantErr {
t.Errorf("error got '%v', want '%s'", err, wantErr)
}
// but log *is* truncated, and sanitized
wantLog := "sensitive message (errno 10) (sqlstate HY000): Sql: \"select * from test_table where xyz = :vt [TRUNCATED]\", BindVars: {[REDACTED]}"
if wantLog != tl.getLog(0) {
t.Errorf("log got '%s', want '%s'", tl.getLog(0), wantLog)
}
*sqlparser.TruncateErrLen = 140
err = tsv.convertAndLogError(
ctx,
sql,
map[string]*querypb.BindVariable{"vtg1": sqltypes.StringBindVariable("this is kinda long eh")},
sqlErr,
nil,
)
// Error not truncated
wantErr = "sensitive message (errno 10) (sqlstate HY000): Sql: \"select * from test_table where xyz = :vtg1 order by abc desc\", BindVars: {vtg1: \"type:VARCHAR value:\\\"this is kinda long eh\\\"\"}"
if err == nil || err.Error() != wantErr {
t.Errorf("error got '%v', want '%s'", err, wantErr)
}
// Log not truncated, since our limit is large enough now, but it is still sanitized
wantLog = "sensitive message (errno 10) (sqlstate HY000): Sql: \"select * from test_table where xyz = :vtg1 order by abc desc\", BindVars: {[REDACTED]}"
if wantLog != tl.getLog(1) {
t.Errorf("log got '%s', want '%s'", tl.getLog(1), wantLog)
}
*sqlparser.TruncateErrLen = 0
} | explode_data.jsonl/80020 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 868
} | [
2830,
3393,
1282,
26900,
15820,
1155,
353,
8840,
836,
8,
341,
25873,
1669,
1965,
1960,
85,
7121,
3675,
2648,
741,
25873,
836,
261,
325,
13877,
284,
895,
198,
197,
322,
5836,
26310,
279,
1487,
6605,
11,
892,
3363,
429,
279,
10719,
1994... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestPrintNodeExternalIP(t *testing.T) {
table := []struct {
node api.Node
expected []metav1.TableRow
}{
{
node: api.Node{
ObjectMeta: metav1.ObjectMeta{Name: "foo1"},
Status: api.NodeStatus{Addresses: []api.NodeAddress{{Type: api.NodeExternalIP, Address: "1.1.1.1"}}},
},
// Columns: Name, Status, Roles, Age, KubeletVersion, NodeInternalIP, NodeExternalIP, OSImage, KernelVersion, ContainerRuntimeVersion
expected: []metav1.TableRow{
{
Cells: []interface{}{"foo1", "Unknown", "<none>", "<unknown>", "", "<none>", "1.1.1.1", "<unknown>", "<unknown>", "<unknown>"},
},
},
},
{
node: api.Node{
ObjectMeta: metav1.ObjectMeta{Name: "foo2"},
Status: api.NodeStatus{Addresses: []api.NodeAddress{{Type: api.NodeInternalIP, Address: "1.1.1.1"}}},
},
// Columns: Name, Status, Roles, Age, KubeletVersion, NodeInternalIP, NodeExternalIP, OSImage, KernelVersion, ContainerRuntimeVersion
expected: []metav1.TableRow{
{
Cells: []interface{}{"foo2", "Unknown", "<none>", "<unknown>", "", "1.1.1.1", "<none>", "<unknown>", "<unknown>", "<unknown>"},
},
},
},
{
node: api.Node{
ObjectMeta: metav1.ObjectMeta{Name: "foo3"},
Status: api.NodeStatus{Addresses: []api.NodeAddress{
{Type: api.NodeExternalIP, Address: "2.2.2.2"},
{Type: api.NodeInternalIP, Address: "3.3.3.3"},
{Type: api.NodeExternalIP, Address: "4.4.4.4"},
}},
},
expected: []metav1.TableRow{
{
Cells: []interface{}{"foo3", "Unknown", "<none>", "<unknown>", "", "3.3.3.3", "2.2.2.2", "<unknown>", "<unknown>", "<unknown>"},
},
},
},
}
for i, test := range table {
rows, err := printNode(&test.node, printers.GenerateOptions{Wide: true})
if err != nil {
t.Fatalf("An error occurred generating table rows Node: %#v", err)
}
for i := range rows {
rows[i].Object.Object = nil
}
if !reflect.DeepEqual(test.expected, rows) {
t.Errorf("%d mismatch: %s", i, diff.ObjectReflectDiff(test.expected, rows))
}
}
} | explode_data.jsonl/21593 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 873
} | [
2830,
3393,
8994,
1955,
25913,
3298,
1155,
353,
8840,
836,
8,
1476,
26481,
1669,
3056,
1235,
341,
197,
20831,
257,
6330,
21714,
198,
197,
42400,
3056,
4059,
402,
16,
18257,
3102,
198,
197,
59403,
197,
197,
515,
298,
20831,
25,
6330,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestKubectlWaitForDeletionsFails(t *testing.T) {
testutil.Run(t, "", func(t *testutil.T) {
tmpDir := t.NewTempDir().Write("deployment-web.yaml", DeploymentWebYAML)
t.Override(&client.Client, deployutil.MockK8sClient)
t.Override(&util.DefaultExecCommand, testutil.
CmdRunOut("kubectl version --client -ojson", KubectlVersion112).
AndRunOut("kubectl --context kubecontext create --dry-run -oyaml -f "+tmpDir.Path("deployment-web.yaml"), DeploymentWebYAML).
AndRunInputOut("kubectl --context kubecontext get -f - --ignore-not-found -ojson", DeploymentWebYAMLv1, `{
"items":[
{"metadata":{"deletionTimestamp":"2020-07-24T12:40:32Z","name":"leeroy-web"}},
{"metadata":{"deletionTimestamp":"2020-07-24T12:40:32Z","name":"leeroy-app"}}
]
}`),
)
deployer, err := NewDeployer(&kubectlConfig{
workingDir: tmpDir.Root(),
waitForDeletions: config.WaitForDeletions{
Enabled: true,
Delay: 10 * time.Second,
Max: 100 * time.Millisecond,
},
}, &label.DefaultLabeller{}, &latestV1.KubectlDeploy{Manifests: []string{tmpDir.Path("deployment-web.yaml")}})
t.RequireNoError(err)
err = deployer.Deploy(context.Background(), ioutil.Discard, []graph.Artifact{
{ImageName: "leeroy-web", Tag: "leeroy-web:v1"},
})
t.CheckErrorContains(`2 resources failed to complete their deletion before a new deployment: "leeroy-web", "leeroy-app"`, err)
})
} | explode_data.jsonl/52879 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 587
} | [
2830,
3393,
42,
53380,
14190,
2461,
1912,
1149,
908,
37,
6209,
1155,
353,
8840,
836,
8,
341,
18185,
1314,
16708,
1155,
11,
7342,
2915,
1155,
353,
1944,
1314,
836,
8,
341,
197,
20082,
6184,
1669,
259,
7121,
12151,
6184,
1005,
7985,
445... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTdStorage(t *testing.T) {
db := NewMemoryDatabase()
// Create a test TD to move around the database and make sure it's really new
hash, td := common.Hash{}, big.NewInt(314)
if entry := ReadTd(db, hash, 0); entry != nil {
t.Fatalf("Non existent TD returned: %v", entry)
}
// Write and verify the TD in the database
WriteTd(db, hash, 0, td)
if entry := ReadTd(db, hash, 0); entry == nil {
t.Fatalf("Stored TD not found")
} else if entry.Cmp(td) != 0 {
t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td)
}
// Delete the TD and verify the execution
DeleteTd(db, hash, 0)
if entry := ReadTd(db, hash, 0); entry != nil {
t.Fatalf("Deleted TD returned: %v", entry)
}
} | explode_data.jsonl/72783 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 264
} | [
2830,
3393,
95976,
5793,
1155,
353,
8840,
836,
8,
341,
20939,
1669,
1532,
10642,
5988,
2822,
197,
322,
4230,
264,
1273,
27716,
311,
3271,
2163,
279,
4625,
323,
1281,
2704,
432,
594,
2167,
501,
198,
50333,
11,
17941,
1669,
4185,
15103,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestAddPodOrphan(t *testing.T) {
for _, strategy := range updateStrategies() {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
ds1 := newDaemonSet("foo1")
ds1.Spec.UpdateStrategy = *strategy
ds2 := newDaemonSet("foo2")
ds2.Spec.UpdateStrategy = *strategy
ds3 := newDaemonSet("foo3")
ds3.Spec.UpdateStrategy = *strategy
ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2
manager.dsStore.Add(ds1)
manager.dsStore.Add(ds2)
manager.dsStore.Add(ds3)
// Make pod an orphan. Expect matching sets to be queued.
pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil)
manager.addPod(pod)
if got, want := manager.queue.Len(), 2; got != want {
t.Fatalf("queue.Len() = %v, want %v", got, want)
}
if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) {
t.Errorf("getQueuedKeys() = %v, want %v", got, want)
}
}
} | explode_data.jsonl/50354 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 404
} | [
2830,
3393,
2212,
23527,
2195,
9943,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
8282,
1669,
2088,
2647,
2580,
69388,
368,
341,
197,
92272,
11,
8358,
8358,
1848,
1669,
501,
2271,
2051,
741,
197,
743,
1848,
961,
2092,
341,
298,
3244,
307... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestManifest_ResolveImageMapMissingKey(t *testing.T) {
cxt := context.NewTestContext(t)
m := &Manifest{
Name: "mybundle",
ImageMap: map[string]MappedImage{
"something": MappedImage{
Repository: "blah/blah",
Digest: "sha1234:cafebab",
},
},
}
rm := NewRuntimeManifest(cxt.Context, ActionInstall, m)
s := &Step{
Data: map[string]interface{}{
"description": "a test step exercising bundle image interpolation",
"Arguments": []string{
"{{ bundle.images.something.Fake }}",
},
},
}
err := rm.ResolveStep(s)
assert.Error(t, err)
} | explode_data.jsonl/37728 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 242
} | [
2830,
3393,
38495,
62,
56808,
1906,
2227,
25080,
1592,
1155,
353,
8840,
836,
8,
1476,
1444,
2252,
1669,
2266,
7121,
2271,
1972,
1155,
340,
2109,
1669,
609,
38495,
515,
197,
21297,
25,
330,
2408,
34518,
756,
197,
53397,
2227,
25,
2415,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPhysicalPlanClone(t *testing.T) {
ctx := mock.NewContext()
col, cst := &expression.Column{RetType: types.NewFieldType(mysql.TypeString)}, &expression.Constant{RetType: types.NewFieldType(mysql.TypeLonglong)}
stats := &property.StatsInfo{RowCount: 1000}
schema := expression.NewSchema(col)
tblInfo := &model.TableInfo{}
idxInfo := &model.IndexInfo{}
hist := &statistics.Histogram{Bounds: chunk.New(nil, 0, 0)}
aggDesc1, err := aggregation.NewAggFuncDesc(ctx, ast.AggFuncAvg, []expression.Expression{col}, false)
require.NoError(t, err)
aggDesc2, err := aggregation.NewAggFuncDesc(ctx, ast.AggFuncCount, []expression.Expression{cst}, true)
require.NoError(t, err)
aggDescs := []*aggregation.AggFuncDesc{aggDesc1, aggDesc2}
// table scan
tableScan := &PhysicalTableScan{
AccessCondition: []expression.Expression{col, cst},
Table: tblInfo,
Hist: hist,
}
tableScan = tableScan.Init(ctx, 0)
tableScan.SetSchema(schema)
require.NoError(t, checkPhysicalPlanClone(tableScan))
// table reader
tableReader := &PhysicalTableReader{
tablePlan: tableScan,
TablePlans: []PhysicalPlan{tableScan},
StoreType: kv.TiFlash,
}
tableReader = tableReader.Init(ctx, 0)
require.NoError(t, checkPhysicalPlanClone(tableReader))
// index scan
indexScan := &PhysicalIndexScan{
AccessCondition: []expression.Expression{col, cst},
Table: tblInfo,
Index: idxInfo,
Hist: hist,
dataSourceSchema: schema,
}
indexScan = indexScan.Init(ctx, 0)
indexScan.SetSchema(schema)
require.NoError(t, checkPhysicalPlanClone(indexScan))
// index reader
indexReader := &PhysicalIndexReader{
indexPlan: indexScan,
IndexPlans: []PhysicalPlan{indexScan},
OutputColumns: []*expression.Column{col, col},
}
indexReader = indexReader.Init(ctx, 0)
require.NoError(t, checkPhysicalPlanClone(indexReader))
// index lookup
indexLookup := &PhysicalIndexLookUpReader{
IndexPlans: []PhysicalPlan{indexReader},
indexPlan: indexScan,
TablePlans: []PhysicalPlan{tableReader},
tablePlan: tableScan,
ExtraHandleCol: col,
PushedLimit: &PushedDownLimit{1, 2},
}
indexLookup = indexLookup.Init(ctx, 0)
require.NoError(t, checkPhysicalPlanClone(indexLookup))
// selection
sel := &PhysicalSelection{Conditions: []expression.Expression{col, cst}}
sel = sel.Init(ctx, stats, 0)
require.NoError(t, checkPhysicalPlanClone(sel))
// projection
proj := &PhysicalProjection{Exprs: []expression.Expression{col, cst}}
proj = proj.Init(ctx, stats, 0)
require.NoError(t, checkPhysicalPlanClone(proj))
// limit
lim := &PhysicalLimit{Count: 1, Offset: 2}
lim = lim.Init(ctx, stats, 0)
require.NoError(t, checkPhysicalPlanClone(lim))
// sort
byItems := []*util.ByItems{{Expr: col}, {Expr: cst}}
sort := &PhysicalSort{ByItems: byItems}
sort = sort.Init(ctx, stats, 0)
require.NoError(t, checkPhysicalPlanClone(sort))
// topN
topN := &PhysicalTopN{ByItems: byItems, Offset: 2333, Count: 2333}
topN = topN.Init(ctx, stats, 0)
require.NoError(t, checkPhysicalPlanClone(topN))
// stream agg
streamAgg := &PhysicalStreamAgg{basePhysicalAgg{
AggFuncs: aggDescs,
GroupByItems: []expression.Expression{col, cst},
}}
streamAgg = streamAgg.initForStream(ctx, stats, 0)
streamAgg.SetSchema(schema)
require.NoError(t, checkPhysicalPlanClone(streamAgg))
// hash agg
hashAgg := &PhysicalHashAgg{basePhysicalAgg{
AggFuncs: aggDescs,
GroupByItems: []expression.Expression{col, cst},
}}
hashAgg = hashAgg.initForHash(ctx, stats, 0)
hashAgg.SetSchema(schema)
require.NoError(t, checkPhysicalPlanClone(hashAgg))
// hash join
hashJoin := &PhysicalHashJoin{
Concurrency: 4,
UseOuterToBuild: true,
}
hashJoin = hashJoin.Init(ctx, stats, 0)
hashJoin.SetSchema(schema)
require.NoError(t, checkPhysicalPlanClone(hashJoin))
// merge join
mergeJoin := &PhysicalMergeJoin{
CompareFuncs: []expression.CompareFunc{expression.CompareInt},
Desc: true,
}
mergeJoin = mergeJoin.Init(ctx, stats, 0)
mergeJoin.SetSchema(schema)
require.NoError(t, checkPhysicalPlanClone(mergeJoin))
} | explode_data.jsonl/25789 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1577
} | [
2830,
3393,
39253,
20485,
37677,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
7860,
7121,
1972,
741,
46640,
11,
93804,
1669,
609,
28099,
6153,
90,
12020,
929,
25,
4494,
7121,
63733,
41546,
83236,
42419,
609,
28099,
58940,
90,
12020,
929,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestApplyCapsToLimitRange(t *testing.T) {
limitRange := apiv1.LimitRangeItem{
Type: apiv1.LimitTypeContainer,
Max: apiv1.ResourceList{
apiv1.ResourceCPU: resource.MustParse("1"),
},
Min: apiv1.ResourceList{
apiv1.ResourceMemory: resource.MustParse("500M"),
},
}
recommendation := vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: "container",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: resource.MustParse("2"),
apiv1.ResourceMemory: resource.MustParse("200M"),
},
},
},
}
pod := apiv1.Pod{
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: "container",
Resources: apiv1.ResourceRequirements{
Requests: apiv1.ResourceList{
apiv1.ResourceCPU: resource.MustParse("1"),
apiv1.ResourceMemory: resource.MustParse("1G"),
},
Limits: apiv1.ResourceList{
apiv1.ResourceCPU: resource.MustParse("1"),
apiv1.ResourceMemory: resource.MustParse("1G"),
},
},
},
},
},
}
expectedRecommendation := vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: "container",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: resource.MustParse("1000m"),
apiv1.ResourceMemory: resource.MustParse("500000000000m"),
},
},
},
}
calculator := fakeLimitRangeCalculator{containerLimitRange: limitRange}
processor := NewCappingRecommendationProcessor(&calculator)
processedRecommendation, annotations, err := processor.Apply(&recommendation, nil, nil, &pod)
assert.NoError(t, err)
assert.Contains(t, annotations, "container")
assert.ElementsMatch(t, []string{"cpu capped to fit Max in container LimitRange", "memory capped to fit Min in container LimitRange"}, annotations["container"])
assert.Equal(t, expectedRecommendation, *processedRecommendation)
} | explode_data.jsonl/10249 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 761
} | [
2830,
3393,
28497,
60741,
1249,
16527,
6046,
1155,
353,
8840,
836,
8,
341,
8810,
2353,
6046,
1669,
1443,
344,
16,
1214,
2353,
6046,
1234,
515,
197,
27725,
25,
1443,
344,
16,
1214,
2353,
929,
4502,
345,
197,
197,
5974,
25,
1443,
344,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestWithCommandLineParameters(t *testing.T) {
var bundle *bundle.Bundle
actual := map[string]string{
"overridden": "foo",
}
err := WithCommandLineParameters([]string{"param1.param2=value1", "param3=3", "overridden=bar"})(
&MergeBundleConfig{
bundle: bundle,
params: actual,
})
assert.NilError(t, err)
expected := map[string]string{
"param1.param2": "value1",
"param3": "3",
"overridden": "bar",
}
assert.Assert(t, cmp.DeepEqual(actual, expected))
} | explode_data.jsonl/17701 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 203
} | [
2830,
3393,
2354,
71885,
9706,
1155,
353,
8840,
836,
8,
341,
2405,
12894,
353,
34518,
14757,
198,
88814,
1669,
2415,
14032,
30953,
515,
197,
197,
1,
1975,
42185,
788,
330,
7975,
756,
197,
630,
9859,
1669,
3085,
71885,
9706,
10556,
917,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDeleteScenario(t *testing.T) {
app := model.App{
API: map[string][]model.API{
"POST": []model.API{model.API{
Endpoint: "/post",
Description: "Here is the description",
Scenarios: []model.Scenario{
model.Scenario{
Request: model.Request{
Header: map[string][]string{},
Query: map[string][]string{},
Payload: model.Payload{
Type: "",
Data: ``,
},
},
Response: model.Response{
Payload: model.Payload{
Type: "text",
Data: "Hello World",
},
StatusCode: 201,
},
Delete: true,
},
model.Scenario{
Request: model.Request{
Header: map[string][]string{},
Query: map[string][]string{},
Payload: model.Payload{
Type: "",
Data: ``,
},
},
Response: model.Response{
Payload: model.Payload{
Type: "text",
Data: "Hello World",
},
StatusCode: 201,
},
Delete: true,
},
model.Scenario{
Request: model.Request{
Header: map[string][]string{},
Query: map[string][]string{},
Payload: model.Payload{
Type: "",
Data: ``,
},
},
Response: model.Response{
Payload: model.Payload{
Type: "text",
Data: "Hello World",
},
StatusCode: 201,
},
Delete: true,
},
},
},
},
},
}
req := httptest.NewRequest("POST", "/post", bytes.NewReader([]byte(``)))
w := httptest.NewRecorder()
s := Server{}
s.SetWatcher(types.TestWatcher{})
s.SetComparer(comparer.NewRegexComparer())
s.SetApp(app)
t.Log(s.app)
t.Log(len(s.app.API["POST"][0].Scenarios))
s.ServeHTTP(w, req)
t.Log(len(s.app.API["POST"][0].Scenarios))
t.Log(s.app)
t.Log(s.app)
t.Log(len(s.app.API["POST"][0].Scenarios))
s.ServeHTTP(w, req)
t.Log(len(s.app.API["POST"][0].Scenarios))
t.Log(s.app)
t.Log(s.app)
t.Log(len(s.app.API["POST"][0].Scenarios))
s.ServeHTTP(w, req)
t.Log(len(s.app.API["POST"][0].Scenarios))
t.Log(s.app)
} | explode_data.jsonl/17761 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1116
} | [
2830,
3393,
6435,
54031,
1155,
353,
8840,
836,
8,
341,
28236,
1669,
1614,
5105,
515,
197,
197,
7082,
25,
2415,
14032,
45725,
2528,
24922,
515,
298,
197,
1,
2946,
788,
3056,
2528,
24922,
90,
2528,
24922,
515,
571,
197,
27380,
25,
262,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDownloadArchiveMissing(t *testing.T) {
a, err := DownloadArchive(time.Date(2020, time.September, 6, 12, 0, 0, 0, time.UTC))
if err != nil {
t.Fatal(err)
}
if a != nil {
t.Fatal("Time travel!", a)
}
} | explode_data.jsonl/11761 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 93
} | [
2830,
3393,
11377,
42502,
25080,
1155,
353,
8840,
836,
8,
341,
11323,
11,
1848,
1669,
8577,
42502,
9730,
8518,
7,
17,
15,
17,
15,
11,
882,
5732,
417,
1377,
11,
220,
21,
11,
220,
16,
17,
11,
220,
15,
11,
220,
15,
11,
220,
15,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestRateReset(t *testing.T) {
r := NewRate(100)
r.Sleep()
if r.actualCycleTime.IsZero() {
t.Fail()
}
r.Reset()
if !r.actualCycleTime.IsZero() {
t.Fail()
}
} | explode_data.jsonl/6814 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 86
} | [
2830,
3393,
11564,
14828,
1155,
353,
8840,
836,
8,
341,
7000,
1669,
1532,
11564,
7,
16,
15,
15,
340,
7000,
31586,
2822,
743,
435,
68120,
44820,
1462,
4506,
17999,
368,
341,
197,
3244,
57243,
741,
197,
532,
7000,
36660,
741,
743,
753,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 3 |
func TestNoMeter(t *testing.T) {
cr := NewChargeRater(util.NewLogger("foo"), nil)
clck := clock.NewMock()
cr.clck = clck
cr.StartCharge(false)
// 1kWh
clck.Add(time.Hour)
cr.SetChargePower(1e3)
cr.SetChargePower(1e3) // should be ignored as time is identical
// 0kWh
clck.Add(time.Hour)
cr.SetChargePower(0)
cr.StopCharge()
// 1kWh - not counted
clck.Add(time.Hour)
cr.SetChargePower(1e3)
if f, err := cr.ChargedEnergy(); f != 1 || err != nil {
t.Errorf("energy: %.1f %v", f, err)
}
// continue
cr.StartCharge(true)
// 1kWh
clck.Add(2 * time.Hour)
cr.SetChargePower(1e3)
cr.StopCharge()
if f, err := cr.ChargedEnergy(); f != 3 || err != nil {
t.Errorf("energy: %.1f %v", f, err)
}
} | explode_data.jsonl/46521 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 319
} | [
2830,
3393,
2753,
68224,
1155,
353,
8840,
836,
8,
341,
91492,
1669,
1532,
55363,
49,
962,
67811,
7121,
7395,
445,
7975,
3975,
2092,
340,
39407,
377,
1669,
8866,
7121,
11571,
741,
91492,
6751,
377,
284,
1185,
377,
271,
91492,
12101,
5536... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestReadingJsField(t *testing.T) {
a := StructWithJsField1{Object: js.Global.Get("Array").New(42)}
b := &StructWithJsField2{object: js.Global.Get("Array").New(42)}
wa := Wrapper1{StructWithJsField1: a}
wb := Wrapper2{innerStruct: b}
if a.Length != 42 || b.Length != 42 || wa.Length != 42 || wa.WrapperLength != 42 || wb.WrapperLength != 42 {
t.Fail()
}
} | explode_data.jsonl/56781 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 141
} | [
2830,
3393,
31899,
30480,
1877,
1155,
353,
8840,
836,
8,
341,
11323,
1669,
16139,
2354,
30480,
1877,
16,
90,
1190,
25,
6994,
27381,
2234,
445,
1857,
1827,
3564,
7,
19,
17,
10569,
2233,
1669,
609,
9422,
2354,
30480,
1877,
17,
90,
1700,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestGetPodVolumesFromDisk(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
plug := &volume.FakeVolumePlugin{PluginName: "fake", Host: nil}
kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plug}, &volumeHost{kubelet})
volsOnDisk := []struct {
podUID types.UID
volName string
}{
{"pod1", "vol1"},
{"pod1", "vol2"},
{"pod2", "vol1"},
}
expectedPaths := []string{}
for i := range volsOnDisk {
fv := volume.FakeVolume{PodUID: volsOnDisk[i].podUID, VolName: volsOnDisk[i].volName, Plugin: plug}
fv.SetUp(nil)
expectedPaths = append(expectedPaths, fv.GetPath())
}
volumesFound := kubelet.getPodVolumesFromDisk()
if len(volumesFound) != len(expectedPaths) {
t.Errorf("Expected to find %d cleaners, got %d", len(expectedPaths), len(volumesFound))
}
for _, ep := range expectedPaths {
found := false
for _, cl := range volumesFound {
if ep == cl.Cleaner.GetPath() {
found = true
break
}
}
if !found {
t.Errorf("Could not find a volume with path %s", ep)
}
}
if plug.NewDetacherCallCount != len(volsOnDisk) {
t.Errorf("Expected plugin NewDetacher to be called %d times but got %d", len(volsOnDisk), plug.NewDetacherCallCount)
}
} | explode_data.jsonl/43299 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 493
} | [
2830,
3393,
1949,
23527,
96325,
3830,
47583,
1155,
353,
8840,
836,
8,
341,
18185,
42,
3760,
1149,
1669,
501,
2271,
42,
3760,
1149,
1155,
340,
16463,
3760,
1149,
1669,
1273,
42,
3760,
1149,
5202,
3760,
1149,
198,
197,
47474,
1669,
609,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestWriteFileToUserHomeDir(t *testing.T) {
hds := NewHomedirService()
content := []byte(`t`)
pathToFile := "testfile"
user, _ := user.Current()
err := hds.WriteFileToUserHomeDir(content, pathToFile)
assert.FileExists(t, filepath.Join(user.HomeDir, pathToFile))
assert.Nil(t, err)
os.RemoveAll(filepath.Join(user.HomeDir, pathToFile))
} | explode_data.jsonl/35861 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 136
} | [
2830,
3393,
7985,
1703,
1249,
1474,
7623,
6184,
1155,
353,
8840,
836,
8,
341,
9598,
5356,
1669,
1532,
39,
24139,
404,
1860,
741,
27751,
1669,
3056,
3782,
5809,
83,
24183,
26781,
41550,
1669,
330,
1944,
1192,
698,
19060,
11,
716,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRenamenx(t *testing.T) {
s, err := Run()
ok(t, err)
defer s.Close()
c, err := redis.Dial("tcp", s.Addr())
ok(t, err)
// Non-existing key
{
_, err := redis.Int(c.Do("RENAMENX", "nosuch", "to"))
assert(t, err != nil, "do RENAMENX error")
}
// Same key
{
_, err := redis.Int(c.Do("RENAMENX", "from", "from"))
assert(t, err != nil, "do RENAMENX error")
}
// Move a string key
{
s.Set("from", "value")
n, err := redis.Int(c.Do("RENAMENX", "from", "to"))
ok(t, err)
equals(t, 1, n)
equals(t, false, s.Exists("from"))
equals(t, true, s.Exists("to"))
s.CheckGet(t, "to", "value")
}
// Move over something which exists
{
s.Set("from", "string value")
s.Set("to", "value")
n, err := redis.Int(c.Do("RENAMENX", "from", "to"))
ok(t, err)
equals(t, 0, n)
equals(t, true, s.Exists("from"))
equals(t, true, s.Exists("to"))
s.CheckGet(t, "from", "string value")
s.CheckGet(t, "to", "value")
}
// Wrong usage
{
_, err := redis.Int(c.Do("RENAMENX"))
assert(t, err != nil, "do RENAMENX error")
_, err = redis.Int(c.Do("RENAMENX", "too few"))
assert(t, err != nil, "do RENAMENX error")
_, err = redis.Int(c.Do("RENAMENX", "some", "spurious", "arguments"))
assert(t, err != nil, "do RENAMENX error")
}
} | explode_data.jsonl/23154 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 618
} | [
2830,
3393,
34625,
24774,
87,
1155,
353,
8840,
836,
8,
341,
1903,
11,
1848,
1669,
6452,
741,
59268,
1155,
11,
1848,
340,
16867,
274,
10421,
741,
1444,
11,
1848,
1669,
20870,
98462,
445,
27161,
497,
274,
93626,
2398,
59268,
1155,
11,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRename(t *testing.T) {
s, err := Run()
ok(t, err)
defer s.Close()
c, err := redis.Dial("tcp", s.Addr())
ok(t, err)
// Non-existing key
{
_, err := redis.Int(c.Do("RENAME", "nosuch", "to"))
assert(t, err != nil, "do RENAME error")
}
// Same key
{
_, err := redis.Int(c.Do("RENAME", "from", "from"))
assert(t, err != nil, "do RENAME error")
}
// Move a string key
{
s.Set("from", "value")
str, err := redis.String(c.Do("RENAME", "from", "to"))
ok(t, err)
equals(t, "OK", str)
equals(t, false, s.Exists("from"))
equals(t, true, s.Exists("to"))
s.CheckGet(t, "to", "value")
}
// Move a hash key
{
s.HSet("from", "key", "value")
str, err := redis.String(c.Do("RENAME", "from", "to"))
ok(t, err)
equals(t, "OK", str)
equals(t, false, s.Exists("from"))
equals(t, true, s.Exists("to"))
equals(t, "value", s.HGet("to", "key"))
}
// Move over something which exists
{
s.Set("from", "string value")
s.HSet("to", "key", "value")
s.SetExpire("from", 999999)
str, err := redis.String(c.Do("RENAME", "from", "to"))
ok(t, err)
equals(t, "OK", str)
equals(t, false, s.Exists("from"))
equals(t, true, s.Exists("to"))
s.CheckGet(t, "to", "string value")
equals(t, 0, s.Expire("from"))
equals(t, 999999, s.Expire("to"))
}
// Wrong usage
{
_, err := redis.Int(c.Do("RENAME"))
assert(t, err != nil, "do RENAME error")
_, err = redis.Int(c.Do("RENAME", "too few"))
assert(t, err != nil, "do RENAME error")
_, err = redis.Int(c.Do("RENAME", "some", "spurious", "arguments"))
assert(t, err != nil, "do RENAME error")
}
} | explode_data.jsonl/23152 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 764
} | [
2830,
3393,
88757,
1155,
353,
8840,
836,
8,
341,
1903,
11,
1848,
1669,
6452,
741,
59268,
1155,
11,
1848,
340,
16867,
274,
10421,
741,
1444,
11,
1848,
1669,
20870,
98462,
445,
27161,
497,
274,
93626,
2398,
59268,
1155,
11,
1848,
692,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestHandleResourceExhausted(t *testing.T) {
serviceName := "test-service"
procedureName := "test-procedure"
port := uint16(8000)
resourceExhaustedHandler := &types.UnaryHandler{
Handler: api.UnaryHandlerFunc(func(context.Context, *transport.Request, transport.ResponseWriter) error {
// eg: simulating a rate limiter that's reached its limit
return yarpcerrors.Newf(yarpcerrors.CodeResourceExhausted, "resource exhausted: rate limit exceeded")
})}
service := yarpctest.TChannelService(
yarpctest.Name(serviceName),
yarpctest.Port(port),
yarpctest.Proc(yarpctest.Name(procedureName), resourceExhaustedHandler),
)
require.NoError(t, service.Start(t))
defer func() { require.NoError(t, service.Stop(t)) }()
requests := yarpctest.ConcurrentAction(
yarpctest.TChannelRequest(
yarpctest.Service(serviceName),
yarpctest.Port(port),
yarpctest.Procedure(procedureName),
yarpctest.GiveTimeout(time.Millisecond*100),
// all TChannel requests should timeout and never actually receive
// the resource exhausted error
yarpctest.WantError("timeout"),
),
10,
)
requests.Run(t)
} | explode_data.jsonl/77435 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 409
} | [
2830,
3393,
6999,
4783,
840,
15074,
291,
1155,
353,
8840,
836,
8,
341,
52934,
675,
1669,
330,
1944,
23461,
698,
197,
21141,
675,
1669,
330,
1944,
9838,
13196,
698,
52257,
1669,
2622,
16,
21,
7,
23,
15,
15,
15,
692,
50346,
840,
15074... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_getIssueFromRef(t *testing.T) {
assert.NoError(t, PrepareTestDatabase())
repo := AssertExistsAndLoadBean(t, &Repository{ID: 1}).(*Repository)
for _, test := range []struct {
Ref string
ExpectedIssueID int64
}{
{"#2", 2},
{"reopen #2", 2},
{"user2/repo2#1", 4},
{"fixes user2/repo2#1", 4},
} {
issue, err := getIssueFromRef(repo, test.Ref)
assert.NoError(t, err)
if assert.NotNil(t, issue) {
assert.EqualValues(t, test.ExpectedIssueID, issue.ID)
}
}
for _, badRef := range []string{
"doesnotexist/doesnotexist#1",
fmt.Sprintf("#%d", NonexistentID),
} {
issue, err := getIssueFromRef(repo, badRef)
assert.NoError(t, err)
assert.Nil(t, issue)
}
} | explode_data.jsonl/74209 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 323
} | [
2830,
3393,
3062,
42006,
3830,
3945,
1155,
353,
8840,
836,
8,
341,
6948,
35699,
1155,
11,
31166,
2271,
5988,
2398,
17200,
5368,
1669,
5319,
15575,
3036,
5879,
10437,
1155,
11,
609,
4624,
90,
915,
25,
220,
16,
16630,
4071,
4624,
340,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestDisconnect(t *testing.T) {
assert := assert.New(t)
//
testScheduler := &KubernetesScheduler{
offers: offers.CreateRegistry(offers.RegistryConfig{
Compat: func(o *mesos.Offer) bool {
return true
},
// remember expired offers so that we can tell if a previously scheduler offer relies on one
LingerTTL: schedcfg.DefaultOfferLingerTTL,
TTL: schedcfg.DefaultOfferTTL,
ListenerDelay: schedcfg.DefaultListenerDelay,
}),
slaves: newSlaveStorage(),
}
hostname := "h1"
offer1 := &mesos.Offer{Id: util.NewOfferID("test1"), Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
offers1 := []*mesos.Offer{offer1}
testScheduler.ResourceOffers(nil, offers1)
offer2 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
offers2 := []*mesos.Offer{offer2}
testScheduler.ResourceOffers(nil, offers2)
//add another offer from different slaveID
hostname2 := "h2"
offer3 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname2, SlaveId: util.NewSlaveID(hostname2)}
offers3 := []*mesos.Offer{offer3}
testScheduler.ResourceOffers(nil, offers3)
//disconnect
testScheduler.Disconnected(nil)
//all offers should be removed
assert.Equal(0, getNumberOffers(testScheduler.offers))
//slave hostnames should still be all present
assert.Equal(2, len(testScheduler.slaves.getSlaveIds()))
} | explode_data.jsonl/63775 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 545
} | [
2830,
3393,
60651,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
692,
197,
2289,
18185,
38878,
1669,
609,
42,
29827,
38878,
515,
197,
197,
71879,
25,
6081,
7251,
15603,
7,
71879,
89142,
2648,
515,
298,
197,
8712,
25,
291... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCachedConnQueryRow(t *testing.T) {
r, clean, err := redistest.CreateRedis()
assert.Nil(t, err)
defer clean()
const (
key = "user"
value = "any"
)
var conn trackedConn
var user string
var ran bool
c := NewNodeConn(&conn, r, cache.WithExpiry(time.Second*30))
err = c.QueryRow(&user, key, func(conn sqlx.SqlConn, v interface{}) error {
ran = true
user = value
return nil
})
assert.Nil(t, err)
actualValue, err := r.Get(key)
assert.Nil(t, err)
var actual string
assert.Nil(t, json.Unmarshal([]byte(actualValue), &actual))
assert.Equal(t, value, actual)
assert.Equal(t, value, user)
assert.True(t, ran)
} | explode_data.jsonl/64129 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 265
} | [
2830,
3393,
70293,
9701,
2859,
3102,
1155,
353,
8840,
836,
8,
341,
7000,
11,
4240,
11,
1848,
1669,
2518,
380,
477,
7251,
48137,
741,
6948,
59678,
1155,
11,
1848,
340,
16867,
4240,
2822,
4777,
2399,
197,
23634,
256,
284,
330,
872,
698,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetChannelStateNoOperationsOnThisChannelYet(t *testing.T) {
channelData := stateServiceTest.defaultChannelData
channelData.AuthorizedAmount = nil
channelData.Signature = nil
stateServiceTest.channelServiceMock.Put(
stateServiceTest.defaultChannelKey,
channelData,
)
defer stateServiceTest.channelServiceMock.Clear()
reply, err := stateServiceTest.service.GetChannelState(
nil,
stateServiceTest.defaultRequest,
)
assert.Nil(t, err)
expectedReply := stateServiceTest.defaultReply
expectedReply.CurrentSignedAmount = nil
expectedReply.CurrentSignature = nil
expectedReply.OldNonceSignature = nil
expectedReply.OldNonceSignedAmount = nil
assert.Equal(t, expectedReply, reply)
} | explode_data.jsonl/6077 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 217
} | [
2830,
3393,
1949,
9629,
1397,
2753,
35120,
1925,
1986,
9629,
28074,
1155,
353,
8840,
836,
8,
341,
71550,
1043,
1669,
1584,
1860,
2271,
8764,
9629,
1043,
198,
71550,
1043,
33858,
1506,
10093,
284,
2092,
198,
71550,
1043,
41152,
1568,
284,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNew(t *testing.T) {
type args struct {
opts []Option
}
type want struct {
want Handler
}
type test struct {
name string
args args
want want
checkFunc func(want, Handler) error
beforeFunc func(args)
afterFunc func(args)
}
defaultCheckFunc := func(w want, got Handler) error {
if !reflect.DeepEqual(got, w.want) {
return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want)
}
return nil
}
tests := []test{
// TODO test cases
/*
{
name: "test_case_1",
args: args {
opts: nil,
},
want: want{},
checkFunc: defaultCheckFunc,
},
*/
// TODO test cases
/*
func() test {
return test {
name: "test_case_2",
args: args {
opts: nil,
},
want: want{},
checkFunc: defaultCheckFunc,
}
}(),
*/
}
for _, tc := range tests {
test := tc
t.Run(test.name, func(tt *testing.T) {
tt.Parallel()
defer goleak.VerifyNone(tt, goleak.IgnoreCurrent())
if test.beforeFunc != nil {
test.beforeFunc(test.args)
}
if test.afterFunc != nil {
defer test.afterFunc(test.args)
}
if test.checkFunc == nil {
test.checkFunc = defaultCheckFunc
}
got := New(test.args.opts...)
if err := test.checkFunc(test.want, got); err != nil {
tt.Errorf("error = %v", err)
}
})
}
} | explode_data.jsonl/40268 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 739
} | [
2830,
3393,
3564,
1155,
353,
8840,
836,
8,
341,
13158,
2827,
2036,
341,
197,
64734,
3056,
5341,
198,
197,
532,
13158,
1366,
2036,
341,
197,
50780,
19954,
198,
197,
532,
13158,
1273,
2036,
341,
197,
11609,
981,
914,
198,
197,
31215,
98... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestEvTimeUpdate(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
ch1 := make(chan *cdcpb.ChangeDataEvent, 10)
srv1 := newMockChangeDataService(t, ch1)
server1, addr1 := newMockService(ctx, t, srv1, wg)
defer func() {
close(ch1)
server1.Stop()
wg.Wait()
}()
rpcClient, cluster, pdClient, err := testutils.NewMockTiKV("", mockcopr.NewCoprRPCHandler())
require.Nil(t, err)
pdClient = &mockPDClient{Client: pdClient, versionGen: defaultVersionGen}
kvStorage, err := tikv.NewTestTiKVStore(rpcClient, pdClient, nil, nil, 0)
require.Nil(t, err)
defer kvStorage.Close() //nolint:errcheck
cluster.AddStore(1, addr1)
cluster.Bootstrap(3, []uint64{1}, []uint64{4}, 4)
originalReconnectInterval := reconnectInterval
reconnectInterval = 1500 * time.Millisecond
err = failpoint.Enable("github.com/pingcap/tiflow/cdc/kv/kvClientCheckUnInitRegionInterval", "return(2)")
require.Nil(t, err)
defer func() {
_ = failpoint.Disable("github.com/pingcap/tiflow/cdc/kv/kvClientCheckUnInitRegionInterval")
reconnectInterval = originalReconnectInterval
}()
baseAllocatedID := currentRequestID()
lockResolver := txnutil.NewLockerResolver(kvStorage,
model.DefaultChangeFeedID("changefeed-test"),
util.RoleTester)
isPullInit := &mockPullerInit{}
grpcPool := NewGrpcPoolImpl(ctx, &security.Credential{})
defer grpcPool.Close()
regionCache := tikv.NewRegionCache(pdClient)
defer regionCache.Close()
cdcClient := NewCDCClient(
ctx, pdClient, grpcPool, regionCache, pdutil.NewClock4Test(),
model.DefaultChangeFeedID(""),
config.GetDefaultServerConfig().KVClient)
eventCh := make(chan model.RegionFeedEvent, 50)
wg.Add(1)
go func() {
defer wg.Done()
err := cdcClient.EventFeed(ctx,
regionspan.ComparableSpan{Start: []byte("a"), End: []byte("b")},
100, lockResolver, isPullInit, eventCh)
require.Equal(t, context.Canceled, errors.Cause(err))
}()
// wait request id allocated with: new session, new request
waitRequestID(t, baseAllocatedID+1)
eventCount := 20
for i := 0; i < eventCount; i++ {
events := &cdcpb.ChangeDataEvent{Events: []*cdcpb.Event{
{
RegionId: 3,
RequestId: currentRequestID(),
Event: &cdcpb.Event_Entries_{
Entries: &cdcpb.Event_Entries{
Entries: []*cdcpb.Event_Row{{
Type: cdcpb.Event_COMMITTED,
OpType: cdcpb.Event_Row_PUT,
Key: []byte("aaaa"),
Value: []byte("committed put event before init"),
StartTs: 105,
CommitTs: 115,
}},
},
},
},
}}
ch1 <- events
time.Sleep(time.Millisecond * 100)
}
expected := []model.RegionFeedEvent{
{
Resolved: &model.ResolvedSpan{
Span: regionspan.ComparableSpan{Start: []byte("a"), End: []byte("b")},
ResolvedTs: 100,
},
RegionID: 3,
},
{
Val: &model.RawKVEntry{
OpType: model.OpTypePut,
Key: []byte("aaaa"),
Value: []byte("committed put event before init"),
StartTs: 105,
CRTs: 115,
RegionID: 3,
},
RegionID: 3,
},
}
for i := 0; i < eventCount+1; i++ {
select {
case event := <-eventCh:
if i == 0 {
require.Equal(t, expected[0], event)
} else {
require.Equal(t, expected[1], event)
}
case <-time.After(time.Second):
require.Fail(t, fmt.Sprintf("expected event not received, %d received", i))
}
}
cancel()
} | explode_data.jsonl/32886 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1457
} | [
2830,
3393,
34112,
1462,
4289,
1155,
353,
8840,
836,
8,
341,
20985,
11,
9121,
1669,
2266,
26124,
9269,
5378,
19047,
2398,
72079,
1669,
609,
12996,
28384,
2808,
31483,
23049,
16,
1669,
1281,
35190,
353,
4385,
4672,
65,
39348,
1043,
1556,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_configDirectory_StripsOnlyTheLastOccurrenceOfApp(t *testing.T) {
monkey.Patch(os.Getwd, func() (string, error) { return "/app/something/app/ab/app/token", nil })
defer monkey.UnpatchAll()
dir := configDirectory()
assertlib.Equal(t, "/app/something/app/ab/conf", dir)
} | explode_data.jsonl/61951 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 101
} | [
2830,
3393,
5332,
9310,
46171,
3077,
7308,
785,
5842,
21764,
20559,
2124,
2164,
1155,
353,
8840,
836,
8,
341,
197,
96016,
1069,
754,
9638,
2234,
6377,
11,
2915,
368,
320,
917,
11,
1465,
8,
314,
470,
3521,
676,
2687,
11532,
10640,
14,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestUserEntityGetAllSUCCESS(t *testing.T) {
resp := sendGet("http://localhost:8080/Search/UsersAll")
response := responseToString(resp)
compareResults(t, response, ResponseUserEntityGetAll)
} | explode_data.jsonl/59350 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 64
} | [
2830,
3393,
1474,
3030,
1949,
2403,
39308,
1155,
353,
8840,
836,
8,
341,
34653,
1669,
3624,
1949,
445,
1254,
1110,
8301,
25,
23,
15,
23,
15,
78893,
97821,
2403,
1138,
21735,
1669,
2033,
5870,
20267,
340,
197,
18948,
9801,
1155,
11,
20... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestGetPortfolioSummary(t *testing.T) {
newbase := Base{}
// Personal holdings
newbase.AddAddress("someaddress", "LTC", PortfolioAddressPersonal, 1)
newbase.AddAddress("someaddress2", "LTC", PortfolioAddressPersonal, 2)
newbase.AddAddress("someaddress3", "BTC", PortfolioAddressPersonal, 100)
newbase.AddAddress("0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae", "ETH",
PortfolioAddressPersonal, 865346880000000000)
newbase.AddAddress("0x9edc81c813b26165f607a8d1b8db87a02f34307f", "ETH",
PortfolioAddressPersonal, 165346880000000000)
// Exchange holdings
newbase.AddExchangeAddress("Bitfinex", "LTC", 20)
newbase.AddExchangeAddress("Bitfinex", "BTC", 100)
newbase.AddExchangeAddress("ANX", "ETH", 42)
portfolio := GetPortfolio()
portfolio.SeedPortfolio(newbase)
value := portfolio.GetPortfolioSummary()
getTotalsVal := func(s string) Coin {
for x := range value.Totals {
if value.Totals[x].Coin == s {
return value.Totals[x]
}
}
return Coin{}
}
if getTotalsVal("LTC").Coin != "LTC" {
t.Error("Test Failed - portfolio_test.go - TestGetPortfolioSummary error")
}
if getTotalsVal("ETH").Coin != "ETH" {
t.Error("Test Failed - portfolio_test.go - TestGetPortfolioSummary error")
}
if getTotalsVal("LTC").Balance != 23 {
t.Error("Test Failed - portfolio_test.go - TestGetPortfolioSummary error")
}
if getTotalsVal("BTC").Balance != 200 {
t.Error("Test Failed - portfolio_test.go - TestGetPortfolioSummary error")
}
} | explode_data.jsonl/33746 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 548
} | [
2830,
3393,
1949,
66675,
19237,
1155,
353,
8840,
836,
8,
341,
8638,
3152,
1669,
5351,
16094,
197,
322,
19207,
57248,
198,
8638,
3152,
1904,
4286,
445,
14689,
4995,
497,
330,
43,
7749,
497,
46192,
4286,
34027,
11,
220,
16,
340,
8638,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestMultiLevelFields(t *testing.T) {
data := `{ "Level1Field1":3,
"Level1Field4":4,
"Level1Field2":{ "Level2Field1":[ "value1", "value2" ],
"Level2Field2":{ "Level3Field1":[ { "key1":"value1" } ] } } }`
paths := []string{"Level1Field1", "Level1Field2.Level2Field1",
"Level1Field2.Level2Field2.Level3Field1", "Level1Field4"}
expected := []string{"3", `[value1 value2]`,
`[map[key1:value1]]`, "4"}
t.Run("SingleMany", func(t *testing.T) {
testMany(t, data, paths,
expected)
})
} | explode_data.jsonl/62352 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 289
} | [
2830,
3393,
20358,
4449,
8941,
1155,
353,
8840,
836,
8,
341,
262,
821,
1669,
53692,
330,
4449,
16,
1877,
16,
788,
18,
11,
715,
2290,
330,
4449,
16,
1877,
19,
788,
19,
11,
715,
2290,
330,
4449,
16,
1877,
17,
788,
90,
330,
4449,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetVirtualProjectNoMatch(t *testing.T) {
fixture.EnsureCleanState(t)
err := createAndConfigGlobalProject()
assert.NoError(t, err)
//Create project which does not match global project settings
projectName := "proj-" + fixture.Name()
_, err = fixture.RunCli("proj", "create", projectName,
"--description", "Test description",
"-d", fmt.Sprintf("%s,*", common.KubernetesInternalAPIServerAddr),
"-s", "*",
"--orphaned-resources")
assert.NoError(t, err)
proj, err := fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.ArgoCDNamespace).Get(context.Background(), projectName, metav1.GetOptions{})
assert.NoError(t, err)
//Create an app belongs to proj project
_, err = fixture.RunCli("app", "create", fixture.Name(), "--repo", fixture.RepoURL(fixture.RepoURLTypeFile),
"--path", guestbookPath, "--project", proj.Name, "--dest-server", common.KubernetesInternalAPIServerAddr, "--dest-namespace", fixture.DeploymentNamespace())
assert.NoError(t, err)
//App trying to sync a resource which is not blacked listed anywhere
_, err = fixture.RunCli("app", "sync", fixture.Name(), "--resource", "apps:Deployment:guestbook-ui", "--timeout", fmt.Sprintf("%v", 10))
assert.NoError(t, err)
//app trying to sync a resource which is black listed by global project
_, err = fixture.RunCli("app", "sync", fixture.Name(), "--resource", ":Service:guestbook-ui", "--timeout", fmt.Sprintf("%v", 10))
assert.NoError(t, err)
} | explode_data.jsonl/58448 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 502
} | [
2830,
3393,
1949,
33026,
7849,
2753,
8331,
1155,
353,
8840,
836,
8,
341,
1166,
12735,
22834,
19098,
27529,
1397,
1155,
340,
9859,
1669,
1855,
3036,
2648,
11646,
7849,
741,
6948,
35699,
1155,
11,
1848,
692,
197,
322,
4021,
2390,
892,
155... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCreatePassiveSellOfferValidateAmount(t *testing.T) {
kp0 := newKeypair0()
kp1 := newKeypair1()
sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761100))
createPassiveOffer := CreatePassiveSellOffer{
Selling: CreditAsset{"ABCD", kp0.Address()},
Buying: NativeAsset{},
Amount: "-3",
Price: "1.0",
}
_, err := NewTransaction(
TransactionParams{
SourceAccount: &sourceAccount,
IncrementSequenceNum: false,
Operations: []Operation{&createPassiveOffer},
BaseFee: MinBaseFee,
Timebounds: NewInfiniteTimeout(),
},
)
if assert.Error(t, err) {
expected := `validation failed for *txnbuild.CreatePassiveSellOffer operation: Field: Amount, Error: amount can not be negative`
assert.Contains(t, err.Error(), expected)
}
} | explode_data.jsonl/29368 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 334
} | [
2830,
3393,
4021,
12187,
533,
68533,
39462,
17926,
10093,
1155,
353,
8840,
836,
8,
341,
16463,
79,
15,
1669,
501,
6608,
1082,
1310,
15,
741,
16463,
79,
16,
1669,
501,
6608,
1082,
1310,
16,
741,
47418,
7365,
1669,
1532,
16374,
7365,
59... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGrpc_GetBalance(t *testing.T) {
qapi.On("StoreGet", mock.Anything).Return(nil, types.ErrInvalidParam)
_, err := g.GetBalance(getOkCtx(), &types.ReqBalance{})
assert.Equal(t, err, types.ErrInvalidParam)
} | explode_data.jsonl/328 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 84
} | [
2830,
3393,
6464,
3992,
13614,
21190,
1155,
353,
8840,
836,
8,
341,
18534,
2068,
8071,
445,
6093,
1949,
497,
7860,
13311,
1596,
568,
5598,
27907,
11,
4494,
27862,
7928,
2001,
340,
197,
6878,
1848,
1669,
342,
2234,
21190,
5433,
11578,
23... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestParseZpool(t *testing.T) {
t.Parallel()
line := "big 3.62T 2.69T 959G - - 2% 74% 1.00x ONLINE -"
require.Equal(
t,
Zpool{
name: "big",
props: map[string]interface{}{
"size": 3.98023209254912e+12,
"alloc": 2.95768627871744e+12,
"free": 1.029718409216e+12,
"frag": 2,
"cap": 74,
"dedup": 1.0,
"health": 0,
},
},
parseZpool(line, header),
)
} | explode_data.jsonl/18988 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 265
} | [
2830,
3393,
14463,
57,
10285,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
27109,
1669,
330,
16154,
262,
220,
18,
13,
21,
17,
51,
220,
220,
17,
13,
21,
24,
51,
256,
220,
24,
20,
24,
38,
286,
481,
260,
481,
257,
220,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBaseInitBase(t *testing.T) {
t.Skip() // This still need sudo
err := InitBase()
if err != nil {
log.Println(err.Error())
assert.Equal(t, true, false, "Should not reach here")
}
cmdStr := "du -s /var/cache/pbuilder/base.tgz | "
cmdStr += "cut -d '/' -f1 | head -n 1 | sed 's/ //g' | "
cmdStr += "tr -d '\n' | tr -d '\t' "
cmd := exec.Command("bash", "-c", cmdStr)
out, _ := cmd.CombinedOutput()
cmd.Run()
size, err := strconv.Atoi(string(out))
if err != nil {
log.Println(err.Error())
assert.Equal(t, true, false, "Should not reach here")
}
assert.NotEqual(t, size, int(0))
} | explode_data.jsonl/45424 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 258
} | [
2830,
3393,
3978,
3803,
3978,
1155,
353,
8840,
836,
8,
341,
3244,
57776,
368,
442,
1096,
2058,
1184,
25408,
198,
9859,
1669,
15690,
3978,
741,
743,
1848,
961,
2092,
341,
197,
6725,
12419,
3964,
6141,
2398,
197,
6948,
12808,
1155,
11,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestNilWrapper(t *testing.T) {
assert := New(new(testing.T))
if !assert.Nil(nil) {
t.Error("Nil should return true: object is nil")
}
if assert.Nil(new(AssertionTesterConformingObject)) {
t.Error("Nil should return false: object is not nil")
}
} | explode_data.jsonl/54967 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 102
} | [
2830,
3393,
19064,
11542,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
1532,
1755,
8623,
287,
836,
4390,
743,
753,
2207,
59678,
27907,
8,
341,
197,
3244,
6141,
445,
19064,
1265,
470,
830,
25,
1633,
374,
2092,
1138,
197,
532,
743,
2060,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestWriteContents(t *testing.T) {
t.Run("mismatched size", func(t *testing.T) {
partition := Partition{
Start: 2048,
End: 3048,
Size: 500,
Name: "EFI System",
GUID: "5CA3360B-5DE6-4FCF-B4CE-419CEE433B51",
Attributes: 0,
Type: EFISystemPartition,
}
var b bytes.Buffer
reader := bufio.NewReader(&b)
expected := "Cannot reconcile partition size"
f := &testhelper.FileImpl{}
written, err := partition.WriteContents(f, reader)
if written != 0 {
t.Errorf("Returned %d bytes written instead of 0", written)
}
if err == nil {
t.Errorf("Returned nil error instead of actual errors")
}
if !strings.HasPrefix(err.Error(), expected) {
t.Errorf("Error type %s instead of expected %s", err.Error(), expected)
}
})
t.Run("error writing file", func(t *testing.T) {
size := 512000
partition := Partition{
Start: 2048,
End: 3047,
Size: uint64(size),
Name: "EFI System",
GUID: "5CA3360B-5DE6-4FCF-B4CE-419CEE433B51",
Attributes: 0,
Type: EFISystemPartition,
}
b := make([]byte, size, size)
rand.Read(b)
reader := bytes.NewReader(b)
expected := "Error writing to file"
f := &testhelper.FileImpl{
Writer: func(b []byte, offset int64) (int, error) {
return 0, fmt.Errorf(expected)
},
}
written, err := partition.WriteContents(f, reader)
if written != 0 {
t.Errorf("Returned %d bytes written instead of 0", written)
}
if err == nil {
t.Errorf("Returned nil error instead of actual errors")
return
}
if !strings.HasPrefix(err.Error(), expected) {
t.Errorf("Error type %s instead of expected %s", err.Error(), expected)
}
})
t.Run("too large for partition", func(t *testing.T) {
partition := Partition{
Start: 2048,
End: 2048,
Size: uint64(1),
Name: "EFI System",
GUID: "5CA3360B-5DE6-4FCF-B4CE-419CEE433B51",
Attributes: 0,
Type: EFISystemPartition,
}
// make a byte array that is too big
b := make([]byte, 2*512, 2*512)
rand.Read(b)
reader := bytes.NewReader(b)
expected := "Requested to write at least"
f := &testhelper.FileImpl{
Writer: func(b []byte, offset int64) (int, error) {
return len(b), nil
},
}
read, err := partition.WriteContents(f, reader)
if read != 0 {
t.Errorf("Returned %d bytes read instead of 0", read)
}
if err == nil {
t.Errorf("Returned nil error instead of actual errors")
return
}
if !strings.HasPrefix(err.Error(), expected) {
t.Errorf("Error type %s instead of expected %s", err.Error(), expected)
}
})
t.Run("successful write", func(t *testing.T) {
size := 512000
partition := Partition{
Start: 2048,
End: 3047,
Size: uint64(size),
Name: "EFI System",
GUID: "5CA3360B-5DE6-4FCF-B4CE-419CEE433B51",
Attributes: 0,
Type: EFISystemPartition,
}
b := make([]byte, size, size)
rand.Read(b)
b2 := make([]byte, 0, size)
reader := bytes.NewReader(b)
f := &testhelper.FileImpl{
Writer: func(b []byte, offset int64) (int, error) {
b2 = append(b2, b...)
return len(b), nil
},
}
written, err := partition.WriteContents(f, reader)
if written != uint64(size) {
t.Errorf("Returned %d bytes written instead of %d", written, size)
}
if err != nil {
t.Errorf("Returned error instead of nil")
return
}
if bytes.Compare(b2, b) != 0 {
t.Errorf("Bytes mismatch")
t.Log(b)
t.Log(b2)
}
})
} | explode_data.jsonl/39146 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1598
} | [
2830,
3393,
7985,
14803,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
76,
24976,
291,
1379,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
72872,
680,
1669,
54626,
515,
298,
65999,
25,
414,
220,
17,
15,
19,
23,
345,
298,
38407,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestContextCookie(t *testing.T) {
e := New()
req := test.NewRequest(GET, "/", nil)
theme := "theme=light"
user := "user=Jon Snow"
req.Header().Add(HeaderCookie, theme)
req.Header().Add(HeaderCookie, user)
rec := test.NewResponseRecorder()
c := e.NewContext(req, rec).(*echoContext)
// Read single
cookie, err := c.Cookie("theme")
if assert.NoError(t, err) {
assert.Equal(t, "theme", cookie.Name())
assert.Equal(t, "light", cookie.Value())
}
// Read multiple
for _, cookie := range c.Cookies() {
switch cookie.Name() {
case "theme":
assert.Equal(t, "light", cookie.Value())
case "user":
assert.Equal(t, "Jon Snow", cookie.Value())
}
}
// Write
cookie = &test.Cookie{Cookie: &http.Cookie{
Name: "SSID",
Value: "Ap4PGTEq",
Domain: "labstack.com",
Path: "/",
Expires: time.Now(),
Secure: true,
HttpOnly: true,
}}
c.SetCookie(cookie)
assert.Contains(t, rec.Header().Get(HeaderSetCookie), "SSID")
assert.Contains(t, rec.Header().Get(HeaderSetCookie), "Ap4PGTEq")
assert.Contains(t, rec.Header().Get(HeaderSetCookie), "labstack.com")
assert.Contains(t, rec.Header().Get(HeaderSetCookie), "Secure")
assert.Contains(t, rec.Header().Get(HeaderSetCookie), "HttpOnly")
} | explode_data.jsonl/35870 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 505
} | [
2830,
3393,
1972,
20616,
1155,
353,
8840,
836,
8,
341,
7727,
1669,
1532,
741,
24395,
1669,
1273,
75274,
62918,
11,
64657,
2092,
340,
197,
9047,
1669,
330,
9047,
28,
4145,
698,
19060,
1669,
330,
872,
28,
37152,
18901,
698,
24395,
15753,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func Test(t *testing.T) {
reexec.Init() // This is required for external graphdriver tests
if !isLocalDaemon {
fmt.Println("INFO: Testing against a remote daemon")
} else {
fmt.Println("INFO: Testing against a local daemon")
}
check.TestingT(t)
} | explode_data.jsonl/73730 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 89
} | [
2830,
3393,
1155,
353,
8840,
836,
8,
341,
17200,
11748,
26849,
368,
442,
1096,
374,
2567,
369,
9250,
4771,
12521,
7032,
271,
743,
753,
285,
7319,
89177,
341,
197,
11009,
12419,
445,
6637,
25,
26768,
2348,
264,
8699,
39293,
1138,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestClient_IndexJobSpecs(t *testing.T) {
t.Parallel()
app, cleanup := cltest.NewApplication(t, cltest.EthMockRegisterChainID)
defer cleanup()
require.NoError(t, app.Start())
j1 := cltest.NewJob()
app.Store.CreateJob(&j1)
j2 := cltest.NewJob()
app.Store.CreateJob(&j2)
client, r := app.NewClientAndRenderer()
require.Nil(t, client.IndexJobSpecs(cltest.EmptyCLIContext()))
jobs := *r.Renders[0].(*[]models.JobSpec)
assert.Equal(t, 2, len(jobs))
assert.Equal(t, j1.ID, jobs[0].ID)
} | explode_data.jsonl/78832 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 209
} | [
2830,
3393,
2959,
50361,
12245,
8327,
82,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
28236,
11,
21290,
1669,
1185,
1944,
7121,
4988,
1155,
11,
1185,
1944,
5142,
339,
11571,
8690,
18837,
915,
340,
16867,
21290,
741,
17957,
35... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestWaitLockKill(t *testing.T) {
// Test kill command works on waiting pessimistic lock.
store, clean := createMockStoreAndSetup(t)
defer clean()
tk := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk2.MustExec("use test")
tk.MustExec("drop table if exists test_kill")
tk.MustExec("create table test_kill (id int primary key, c int)")
tk.MustExec("insert test_kill values (1, 1)")
tk.MustExec("begin pessimistic")
tk2.MustExec("set innodb_lock_wait_timeout = 50")
tk2.MustExec("begin pessimistic")
tk.MustQuery("select * from test_kill where id = 1 for update")
var wg sync.WaitGroup
wg.Add(1)
go func() {
time.Sleep(500 * time.Millisecond)
sessVars := tk2.Session().GetSessionVars()
succ := atomic.CompareAndSwapUint32(&sessVars.Killed, 0, 1)
require.True(t, succ)
wg.Wait()
}()
_, err := tk2.Exec("update test_kill set c = c + 1 where id = 1")
wg.Done()
require.Error(t, err)
require.True(t, terror.ErrorEqual(err, storeerr.ErrQueryInterrupted))
tk.MustExec("rollback")
} | explode_data.jsonl/12464 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 405
} | [
2830,
3393,
14190,
11989,
53734,
1155,
353,
8840,
836,
8,
341,
197,
322,
3393,
5505,
3210,
4278,
389,
8580,
72523,
4532,
5296,
624,
57279,
11,
4240,
1669,
1855,
11571,
6093,
3036,
21821,
1155,
340,
16867,
4240,
2822,
3244,
74,
1669,
127... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMainCreateRuntimeAppInvalidSubCommand(t *testing.T) {
assert := assert.New(t)
exitStatus := 0
savedBefore := runtimeBeforeSubcommands
savedExitFunc := exitFunc
exitFunc = func(status int) { exitStatus = status }
// disable
runtimeBeforeSubcommands = nil
defer func() {
runtimeBeforeSubcommands = savedBefore
exitFunc = savedExitFunc
}()
// calls fatal() so no return
_ = createRuntimeApp(context.Background(), []string{name, "i-am-an-invalid-sub-command"})
assert.NotEqual(exitStatus, 0)
} | explode_data.jsonl/52203 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 174
} | [
2830,
3393,
6202,
4021,
15123,
2164,
7928,
3136,
4062,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
692,
14519,
2522,
1669,
220,
15,
271,
1903,
4141,
10227,
1669,
15592,
10227,
3136,
24270,
198,
1903,
4141,
15339,
9626,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestResultOutputs_GetMetadata(t *testing.T) {
metadataKey := "so-meta"
outputName := "test1"
t.Run("output has metadata", func(t *testing.T) {
value := "that's so meta"
outputs := OutputMetadata{}
err := outputs.SetMetadata(outputName, metadataKey, value)
require.NoError(t, err, "SetMetadata failed")
gotValue, ok := outputs.GetMetadata(outputName, metadataKey)
require.True(t, ok, "GetMetadata should find the value")
assert.Equal(t, value, gotValue, "GetMetadata should return the value that we set")
})
t.Run("output not found", func(t *testing.T) {
outputs := OutputMetadata{}
gotValue, ok := outputs.GetMetadata(outputName, metadataKey)
require.False(t, ok, "GetMetadata should report that it did not find the value")
assert.Empty(t, gotValue, "GetMetadata should return an empty value when one isn't found")
})
t.Run("output has no metadata", func(t *testing.T) {
outputs := OutputMetadata{
outputName: map[string]string{
"other": "stuff",
},
}
gotValue, ok := outputs.GetMetadata(outputName, metadataKey)
require.False(t, ok, "GetMetadata should report that it did not find the value")
assert.Empty(t, gotValue, "GetMetadata should return an empty value when one isn't found")
})
t.Run("output has different structure", func(t *testing.T) {
outputs := OutputMetadata{
outputName: map[string]interface{}{
"other": struct{}{},
},
}
gotValue, ok := outputs.GetMetadata(outputName, metadataKey)
require.False(t, ok, "GetMetadata should report that it did not find the value")
assert.Empty(t, gotValue, "GetMetadata should return an empty value when one isn't found")
})
} | explode_data.jsonl/70425 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 570
} | [
2830,
3393,
2077,
61438,
13614,
14610,
1155,
353,
8840,
836,
8,
341,
2109,
7603,
1592,
1669,
330,
704,
54017,
698,
21170,
675,
1669,
330,
1944,
16,
698,
3244,
16708,
445,
3006,
702,
11160,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestClientCertificate(t *testing.T) {
// Check for leaking routines
report := test.CheckRoutines(t)
defer report()
srvCert, err := selfsign.GenerateSelfSigned()
if err != nil {
t.Fatal(err)
}
srvCAPool := x509.NewCertPool()
srvCertificate, err := x509.ParseCertificate(srvCert.Certificate[0])
if err != nil {
t.Fatal(err)
}
srvCAPool.AddCert(srvCertificate)
cert, err := selfsign.GenerateSelfSigned()
if err != nil {
t.Fatal(err)
}
certificate, err := x509.ParseCertificate(cert.Certificate[0])
if err != nil {
t.Fatal(err)
}
caPool := x509.NewCertPool()
caPool.AddCert(certificate)
t.Run("parallel", func(t *testing.T) { // sync routines to check routine leak
tests := map[string]struct {
clientCfg *Config
serverCfg *Config
wantErr bool
}{
"NoClientCert": {
clientCfg: &Config{RootCAs: srvCAPool},
serverCfg: &Config{
Certificates: []tls.Certificate{srvCert},
ClientAuth: NoClientCert,
ClientCAs: caPool,
},
},
"NoClientCert_cert": {
clientCfg: &Config{RootCAs: srvCAPool, Certificates: []tls.Certificate{cert}},
serverCfg: &Config{
Certificates: []tls.Certificate{srvCert},
ClientAuth: RequireAnyClientCert,
},
},
"RequestClientCert_cert": {
clientCfg: &Config{RootCAs: srvCAPool, Certificates: []tls.Certificate{cert}},
serverCfg: &Config{
Certificates: []tls.Certificate{srvCert},
ClientAuth: RequestClientCert,
},
},
"RequestClientCert_no_cert": {
clientCfg: &Config{RootCAs: srvCAPool},
serverCfg: &Config{
Certificates: []tls.Certificate{srvCert},
ClientAuth: RequestClientCert,
ClientCAs: caPool,
},
},
"RequireAnyClientCert": {
clientCfg: &Config{RootCAs: srvCAPool, Certificates: []tls.Certificate{cert}},
serverCfg: &Config{
Certificates: []tls.Certificate{srvCert},
ClientAuth: RequireAnyClientCert,
},
},
"RequireAnyClientCert_error": {
clientCfg: &Config{RootCAs: srvCAPool},
serverCfg: &Config{
Certificates: []tls.Certificate{srvCert},
ClientAuth: RequireAnyClientCert,
},
wantErr: true,
},
"VerifyClientCertIfGiven_no_cert": {
clientCfg: &Config{RootCAs: srvCAPool},
serverCfg: &Config{
Certificates: []tls.Certificate{srvCert},
ClientAuth: VerifyClientCertIfGiven,
ClientCAs: caPool,
},
},
"VerifyClientCertIfGiven_cert": {
clientCfg: &Config{RootCAs: srvCAPool, Certificates: []tls.Certificate{cert}},
serverCfg: &Config{
Certificates: []tls.Certificate{srvCert},
ClientAuth: VerifyClientCertIfGiven,
ClientCAs: caPool,
},
},
"VerifyClientCertIfGiven_error": {
clientCfg: &Config{RootCAs: srvCAPool, Certificates: []tls.Certificate{cert}},
serverCfg: &Config{
Certificates: []tls.Certificate{srvCert},
ClientAuth: VerifyClientCertIfGiven,
},
wantErr: true,
},
"RequireAndVerifyClientCert": {
clientCfg: &Config{RootCAs: srvCAPool, Certificates: []tls.Certificate{cert}},
serverCfg: &Config{
Certificates: []tls.Certificate{srvCert},
ClientAuth: RequireAndVerifyClientCert,
ClientCAs: caPool,
},
},
}
for name, tt := range tests {
tt := tt
t.Run(name, func(t *testing.T) {
t.Parallel()
ca, cb := dpipe.Pipe()
type result struct {
c *Conn
err error
}
c := make(chan result)
go func() {
client, err := Client(ca, tt.clientCfg)
c <- result{client, err}
}()
server, err := Server(cb, tt.serverCfg)
res := <-c
defer func() {
if err == nil {
_ = server.Close()
}
if res.err == nil {
_ = res.c.Close()
}
}()
if tt.wantErr {
if err != nil {
// Error expected, test succeeded
return
}
t.Error("Error expected")
}
if err != nil {
t.Errorf("Server failed(%v)", err)
}
if res.err != nil {
t.Errorf("Client failed(%v)", res.err)
}
actualClientCert := server.ConnectionState().PeerCertificates
if tt.serverCfg.ClientAuth == RequireAnyClientCert || tt.serverCfg.ClientAuth == RequireAndVerifyClientCert {
if actualClientCert == nil {
t.Errorf("Client did not provide a certificate")
}
if len(actualClientCert) != len(tt.clientCfg.Certificates[0].Certificate) || !bytes.Equal(tt.clientCfg.Certificates[0].Certificate[0], actualClientCert[0]) {
t.Errorf("Client certificate was not communicated correctly")
}
}
if tt.serverCfg.ClientAuth == NoClientCert {
if actualClientCert != nil {
t.Errorf("Client certificate wasn't expected")
}
}
actualServerCert := res.c.ConnectionState().PeerCertificates
if actualServerCert == nil {
t.Errorf("Server did not provide a certificate")
}
if len(actualServerCert) != len(tt.serverCfg.Certificates[0].Certificate) || !bytes.Equal(tt.serverCfg.Certificates[0].Certificate[0], actualServerCert[0]) {
t.Errorf("Server certificate was not communicated correctly")
}
})
}
})
} | explode_data.jsonl/40936 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2291
} | [
2830,
3393,
2959,
33202,
1155,
353,
8840,
836,
8,
341,
197,
322,
4248,
369,
51829,
29497,
198,
69931,
1669,
1273,
10600,
49,
28628,
1155,
340,
16867,
1895,
2822,
1903,
10553,
36934,
11,
1848,
1669,
656,
7752,
57582,
12092,
49312,
741,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetFundingRates(t *testing.T) {
t.Parallel()
// optional params
_, err := f.GetFundingRates(context.Background(), time.Time{}, time.Time{}, "")
if err != nil {
t.Error(err)
}
_, err = f.GetFundingRates(context.Background(),
time.Now().Add(-time.Hour), time.Now(), "BTC-PERP")
if err != nil {
t.Error(err)
}
} | explode_data.jsonl/15163 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 138
} | [
2830,
3393,
1949,
37,
37189,
82623,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
197,
322,
10101,
3628,
198,
197,
6878,
1848,
1669,
282,
2234,
37,
37189,
82623,
5378,
19047,
1507,
882,
16299,
22655,
882,
16299,
22655,
14676,
74... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.