text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func TestIsVirtual(t *testing.T) {
var info os.FileInfo
info = createPlaceholderFileInfo("foo", true)
if !isVirtual(info) {
t.Error()
}
info = createPlaceholderFileInfoNoCase("foo", true)
if !isVirtual(info) {
t.Error()
}
baseInfo := dummyFileInfo{name: "foo"}
if isVirtual(baseInfo) {
t.Error()
}
info = createRenamedFileInfo("bar", baseInfo)
if !isVirtual(info) {
t.Error()
}
info = createRenamedFileInfoNoCase("bar", baseInfo)
if !isVirtual(info) {
t.Error()
}
} | explode_data.jsonl/39140 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 198
} | [
2830,
3393,
3872,
33026,
1155,
353,
8840,
836,
8,
341,
2405,
3546,
2643,
8576,
1731,
271,
27043,
284,
1855,
48305,
45430,
445,
7975,
497,
830,
340,
743,
753,
285,
33026,
14208,
8,
341,
197,
3244,
6141,
741,
197,
532,
27043,
284,
1855,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestHTTPS(t *testing.T) {
reset, err := GetHTTPS()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if reset == "" {
OffHTTPS()
} else {
OnHTTPS(reset)
}
})
address := "127.0.0.1:1080"
err = OnHTTPS(address)
if err != nil {
t.Fatal(err)
}
got, err := GetHTTPS()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(address, got) {
t.Fatalf("want %q, got %q", address, got)
}
err = OffHTTPS()
if err != nil {
t.Fatal(err)
}
ori, err := GetHTTPS()
if err != nil {
t.Fatal(err)
}
if ori != "" {
t.Fatalf("want empty, got %q", ori)
}
} | explode_data.jsonl/45937 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 291
} | [
2830,
3393,
82354,
1155,
353,
8840,
836,
8,
341,
70343,
11,
1848,
1669,
2126,
82354,
741,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
532,
3244,
727,
60639,
18552,
368,
341,
197,
743,
7585,
621,
1591,
341,
298,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestPanicLog(t *testing.T) {
// Creating a temp file to collect logs
tmpfile, err := ioutil.TempFile("", "mlog")
if err != nil {
require.NoError(t, err)
}
defer func() {
require.NoError(t, tmpfile.Close())
require.NoError(t, os.Remove(tmpfile.Name()))
}()
// This test requires Zap file target for now.
mlog.EnableZap()
defer mlog.DisableZap()
// Creating logger to log to console and temp file
logger := mlog.NewLogger(&mlog.LoggerConfiguration{
EnableConsole: true,
ConsoleJson: true,
EnableFile: true,
FileLocation: tmpfile.Name(),
FileLevel: mlog.LevelInfo,
})
// Creating a server with logger
s, err := NewServer(SetLogger(logger))
require.NoError(t, err)
// Route for just panicing
s.Router.HandleFunc("/panic", func(writer http.ResponseWriter, request *http.Request) {
s.Log.Info("inside panic handler")
panic("log this panic")
})
testDir, _ := fileutils.FindDir("tests")
s.UpdateConfig(func(cfg *model.Config) {
*cfg.ServiceSettings.ListenAddress = ":0"
*cfg.ServiceSettings.ConnectionSecurity = "TLS"
*cfg.ServiceSettings.TLSKeyFile = path.Join(testDir, "tls_test_key.pem")
*cfg.ServiceSettings.TLSCertFile = path.Join(testDir, "tls_test_cert.pem")
})
serverErr := s.Start()
require.NoError(t, serverErr)
// Calling panic route
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
client.Get("https://localhost:" + strconv.Itoa(s.ListenAddr.Port) + "/panic")
err = s.Shutdown()
require.NoError(t, err)
// Checking whether panic was logged
var panicLogged = false
var infoLogged = false
_, err = tmpfile.Seek(0, 0)
require.NoError(t, err)
scanner := bufio.NewScanner(tmpfile)
for scanner.Scan() {
if !infoLogged && strings.Contains(scanner.Text(), "inside panic handler") {
infoLogged = true
}
if strings.Contains(scanner.Text(), "log this panic") {
panicLogged = true
break
}
}
if !infoLogged {
t.Error("Info log line was supposed to be logged")
}
if !panicLogged {
t.Error("Panic was supposed to be logged")
}
} | explode_data.jsonl/47832 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 796
} | [
2830,
3393,
47,
31270,
2201,
1155,
353,
8840,
836,
8,
341,
197,
322,
31306,
264,
2730,
1034,
311,
6530,
18422,
198,
20082,
1192,
11,
1848,
1669,
43144,
65009,
1703,
19814,
330,
76,
839,
1138,
743,
1848,
961,
2092,
341,
197,
17957,
356... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestConcurrencySetGet(t *testing.T) {
config := safeConfig{
Viper: viper.New(),
}
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
for n := 0; n <= 1000; n++ {
config.GetString("foo")
}
}()
go func() {
defer wg.Done()
for n := 0; n <= 1000; n++ {
config.Set("foo", "bar")
}
}()
wg.Wait()
assert.Equal(t, config.GetString("foo"), "bar")
} | explode_data.jsonl/609 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 183
} | [
2830,
3393,
79611,
1649,
1949,
1155,
353,
8840,
836,
8,
341,
25873,
1669,
6092,
2648,
515,
197,
17446,
12858,
25,
95132,
7121,
3148,
197,
630,
2405,
63581,
12811,
28384,
2808,
271,
72079,
1904,
7,
17,
340,
30680,
2915,
368,
341,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestStringsSimilarity(t *testing.T) {
var tests = []struct {
s1 string
s2 string
expected float64
}{
{
s1: "word",
s2: "two words",
expected: 0.363636,
},
{
s1: "1600 Pennsylvania Ave",
s2: "1600 Penna Avenue",
expected: 0.428571,
},
}
for _, tt := range tests {
t.Run(tt.s1, func(t *testing.T) {
similarity := StringsSimilarity(tt.s1, tt.s2)
assert.InDelta(t, tt.expected, similarity, .0001)
})
}
} | explode_data.jsonl/30743 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 252
} | [
2830,
3393,
20859,
34402,
487,
1155,
353,
8840,
836,
8,
1476,
2405,
7032,
284,
3056,
1235,
341,
197,
1903,
16,
981,
914,
198,
197,
1903,
17,
981,
914,
198,
197,
42400,
2224,
21,
19,
198,
197,
59403,
197,
197,
515,
298,
1903,
16,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAltKeyring_List(t *testing.T) {
dir := t.TempDir()
keyring, err := New(t.Name(), BackendTest, dir, nil)
require.NoError(t, err)
list, err := keyring.List()
require.NoError(t, err)
require.Empty(t, list)
// Fails on creating unsupported pubKeyType
_, _, err = keyring.NewMnemonic("failing", English, sdk.FullFundraiserPath, notSupportedAlgo{})
require.EqualError(t, err, ErrUnsupportedSigningAlgo.Error())
// Create 3 keys
uid1, uid2, uid3 := "Zkey", "Bkey", "Rkey"
_, _, err = keyring.NewMnemonic(uid1, English, sdk.FullFundraiserPath, hd.Secp256k1)
require.NoError(t, err)
_, _, err = keyring.NewMnemonic(uid2, English, sdk.FullFundraiserPath, hd.Secp256k1)
require.NoError(t, err)
_, _, err = keyring.NewMnemonic(uid3, English, sdk.FullFundraiserPath, hd.Secp256k1)
require.NoError(t, err)
list, err = keyring.List()
require.NoError(t, err)
require.Len(t, list, 3)
// Check they are in alphabetical order
require.Equal(t, uid2, list[0].GetName())
require.Equal(t, uid3, list[1].GetName())
require.Equal(t, uid1, list[2].GetName())
} | explode_data.jsonl/73452 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 444
} | [
2830,
3393,
26017,
1592,
12640,
27104,
1155,
353,
8840,
836,
8,
341,
48532,
1669,
259,
65009,
6184,
2822,
23634,
12640,
11,
1848,
1669,
1532,
1155,
2967,
1507,
55260,
2271,
11,
5419,
11,
2092,
340,
17957,
35699,
1155,
11,
1848,
692,
144... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestClusterAdminListAcls(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
defer seedBroker.Close()
seedBroker.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetController(seedBroker.BrokerID()).
SetBroker(seedBroker.Addr(), seedBroker.BrokerID()),
"DescribeAclsRequest": NewMockListAclsResponse(t),
"CreateAclsRequest": NewMockCreateAclsResponse(t),
})
config := NewTestConfig()
config.Version = V1_0_0_0
admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
r := Resource{ResourceType: AclResourceTopic, ResourceName: "my_topic"}
a := Acl{Host: "localhost", Operation: AclOperationAlter, PermissionType: AclPermissionAny}
err = admin.CreateACL(r, a)
if err != nil {
t.Fatal(err)
}
resourceName := "my_topic"
filter := AclFilter{
ResourceType: AclResourceTopic,
Operation: AclOperationRead,
ResourceName: &resourceName,
}
rAcls, err := admin.ListAcls(filter)
if err != nil {
t.Fatal(err)
}
if len(rAcls) <= 0 {
t.Fatal("no acls present")
}
err = admin.Close()
if err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/40800 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 472
} | [
2830,
3393,
28678,
7210,
852,
32,
18074,
1155,
353,
8840,
836,
8,
341,
197,
22602,
65545,
1669,
1532,
11571,
65545,
1155,
11,
220,
16,
340,
16867,
10320,
65545,
10421,
2822,
197,
22602,
65545,
4202,
3050,
1359,
2227,
9147,
14032,
60,
11... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestFlatfsConfig(t *testing.T) {
config := new(config.Datastore)
err := json.Unmarshal(defaultConfig, config)
if err != nil {
t.Fatal(err)
}
dir, err := ioutil.TempDir("", "ipfs-datastore-config-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir) // clean up
spec := make(map[string]interface{})
err = json.Unmarshal(flatfsConfig, &spec)
if err != nil {
t.Fatal(err)
}
dsc, err := fsrepo.AnyDatastoreConfig(spec)
if err != nil {
t.Fatal(err)
}
expected := `{"path":"blocks","shardFunc":"/repo/flatfs/shard/v1/next-to-last/2","type":"flatfs"}`
if dsc.DiskSpec().String() != expected {
t.Errorf("expected '%s' got '%s' as DiskId", expected, dsc.DiskSpec().String())
}
ds, err := dsc.Create(dir)
if err != nil {
t.Fatal(err)
}
if typ := reflect.TypeOf(ds).String(); typ != "*flatfs.Datastore" {
t.Errorf("expected '*flatfs.Datastore' got '%s'", typ)
}
} | explode_data.jsonl/27797 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 392
} | [
2830,
3393,
31019,
3848,
2648,
1155,
353,
8840,
836,
8,
341,
25873,
1669,
501,
8754,
3336,
4314,
340,
9859,
1669,
2951,
38097,
18978,
2648,
11,
2193,
340,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
532,
48532,
11,
18... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func TestTLSCodec_1(t *testing.T) {
t.Parallel()
l, p, _ := RandomListen("")
l0 := NewEventListener(l)
l0.AddCodecFactory(func(ctx Context) Codec {
c0 := NewTLSServerCodec()
// server certificates
c0.AddCertificate(testCert, testKEY)
c0.AddCertificate(demoCert, demoKEY)
// client ca
c0.RequireClientAuth(true)
c0.AddClientCa(demoCert)
c0.AddClientCa(testCert)
c0.AddClientCa(helloCert)
return c0
})
l0.OnAccept(func(ctx Context, c net.Conn) {
_, err := c.Write([]byte("hello"))
assert.NoError(t, err)
})
l0.OnAcceptError(func(ctx Context, err error) {
t.Log(err)
})
l0.Start()
time.Sleep(time.Second)
conn, _ := net.Dial("tcp", "127.0.0.1:"+p)
conn0 := NewConn(conn)
c1 := NewTLSClientCodec()
c1.AddCertificate(helloCert, helloKEY)
c1.SetServerName("demo.com")
c1.AddServerCa(demoCert)
c1.SkipVerify(false)
conn0.AddCodec(c1)
d, err := Read(conn0, 5)
time.Sleep(time.Second)
assert.NoError(t, err)
assert.Equal(t, "hello", d)
} | explode_data.jsonl/34689 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 442
} | [
2830,
3393,
45439,
36913,
62,
16,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
8810,
11,
281,
11,
716,
1669,
10612,
38714,
31764,
8810,
15,
1669,
1532,
7765,
2333,
340,
8810,
15,
1904,
36913,
4153,
18552,
7502,
9608,
8,
67077... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) {
tests := []*validateTest{
{
name: "valid",
},
{
name: "name invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].Name = "invalid"
},
wantErr: "400: InvalidParameter: properties.workerProfiles['invalid'].name: The provided worker name 'invalid' is invalid.",
},
{
name: "vmSize invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].VMSize = "invalid"
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'invalid' is invalid.",
},
{
name: "vmSize too small (prod)",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3"
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v3' is invalid.",
},
{
name: "vmSize too big (dev)",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].VMSize = "Standard_D4s_v3"
},
deploymentMode: deployment.Development,
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D4s_v3' is invalid.",
},
{
name: "disk too small",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].DiskSizeGB = 127
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].diskSizeGB: The provided worker disk size '127' is invalid.",
},
{
name: "subnetId invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].SubnetID = "invalid"
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].subnetId: The provided worker VM subnet 'invalid' is invalid.",
},
{
name: "master and worker subnets not in same vnet",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].SubnetID = fmt.Sprintf("/subscriptions/%s/resourceGroups/vnet/providers/Microsoft.Network/virtualNetworks/different-vnet/subnets/worker", subscriptionID)
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].subnetId: The provided worker VM subnet '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/vnet/providers/Microsoft.Network/virtualNetworks/different-vnet/subnets/worker' is invalid: must be in the same vnet as master VM subnet '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/vnet/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/master'.",
},
{
name: "master and worker subnets not different",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].SubnetID = oc.Properties.MasterProfile.SubnetID
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].subnetId: The provided worker VM subnet '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/vnet/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/master' is invalid: must be different to master VM subnet '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/vnet/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/master'.",
},
{
name: "count too small",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].Count = 2
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].count: The provided worker count '2' is invalid.",
},
{
name: "count too big",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].Count = 21
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].count: The provided worker count '21' is invalid.",
},
}
// We do not perform this validation on update
runTests(t, testModeCreate, tests)
} | explode_data.jsonl/39084 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1401
} | [
2830,
3393,
5002,
24841,
28678,
11690,
17926,
21936,
8526,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
29838,
7067,
2271,
515,
197,
197,
515,
298,
11609,
25,
330,
1891,
756,
197,
197,
1583,
197,
197,
515,
298,
11609,
25,
330,
606,
8318... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestThirdEmployees (t *testing.T) {
hc, cfg := newHouseCall (t)
ctx, cancel := context.WithTimeout (context.Background(), time.Minute) // this should take < 1 minute
defer cancel()
// get our list of employees
employees, err := hc.ListEmployees (ctx, cfg.Token)
if err != nil { t.Fatal (err) }
assert.Equal (t, true, len(employees) > 0, "expecting at least 1 employee")
assert.NotEqual (t, "", employees[0].Id, "not filled in")
assert.NotEqual (t, "", employees[0].FirstName, "not filled in")
assert.NotEqual (t, "", employees[0].LastName, "not filled in")
assert.NotEqual (t, "", employees[0].Email, "not filled in")
assert.NotEqual (t, "", employees[0].Mobile, "not filled in")
assert.NotEqual (t, "", employees[0].Color, "not filled in")
/*
for _, e := range employees {
t.Logf ("%+v\n", e)
}
*/
} | explode_data.jsonl/76441 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 310
} | [
2830,
3393,
36975,
54252,
320,
83,
353,
8840,
836,
8,
341,
9598,
66,
11,
13286,
1669,
501,
28607,
7220,
320,
83,
692,
20985,
11,
9121,
1669,
2266,
26124,
7636,
320,
2147,
19047,
1507,
882,
75770,
8,
442,
419,
1265,
1896,
366,
220,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestTemplateVars(t *testing.T) {
plugin := Plugin{
Repo: Repo{
Name: "go-hello",
Namespace: "appleboy",
},
Commit: Commit{
Sha: "e7c4f0a63ceeb42a39ac7806f7b51f3f0d204fd2",
Author: "Bo-Yi Wu",
Branch: "master",
Message: "This is a test commit msg",
},
Build: Build{
Number: 101,
Status: "success",
Link: "https://github.com/appleboy/go-hello",
},
Config: Config{
Token: os.Getenv("TELEGRAM_TOKEN"),
To: []string{os.Getenv("TELEGRAM_TO")},
Format: formatMarkdown,
MessageFile: "tests/message_template.txt",
TemplateVars: `{"env":"testing","version":"1.2.0-SNAPSHOT"}`,
},
}
err := plugin.Exec()
assert.Nil(t, err)
} | explode_data.jsonl/17940 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 355
} | [
2830,
3393,
7275,
28305,
1155,
353,
8840,
836,
8,
341,
197,
9138,
1669,
21245,
515,
197,
197,
25243,
25,
71509,
515,
298,
21297,
25,
414,
330,
3346,
2832,
4791,
756,
298,
90823,
25,
330,
22377,
17184,
756,
197,
197,
1583,
197,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestFromCLIContext_SetsProviderConfig(t *testing.T) {
i := fmt.Sprintf("%v", rand.Int())
os.Setenv("TRAVIS_WORKER_FAKE_FOO", i)
runAppTest(t, []string{
"--provider-name=fake",
}, func(c *cli.Context) error {
cfg := FromCLIContext(c)
assert.NotNil(t, cfg.ProviderConfig)
assert.Equal(t, i, cfg.ProviderConfig.Get("FOO"))
return nil
})
} | explode_data.jsonl/6897 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 157
} | [
2830,
3393,
3830,
63959,
1972,
1098,
1415,
5179,
2648,
1155,
353,
8840,
836,
8,
341,
8230,
1669,
8879,
17305,
4430,
85,
497,
10382,
7371,
2398,
25078,
4202,
3160,
445,
2378,
98716,
34044,
640,
19058,
3390,
1400,
19499,
497,
600,
692,
56... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBulkInsertionQueryBuild(t *testing.T) {
var mockDB dal.DBClient
testMockDB := MockDBClient{}
// let's do some plumbing here before we send away our mock
testMockDB.ExpectedBulkQuery = insertStatments
testMockDB.TestObject = t
mockDB = &testMockDB
var rm dal.RequestModel
rm.IntializeDBSession(&mockDB)
rm.CreateRequestStats(requests)
} | explode_data.jsonl/48652 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 128
} | [
2830,
3393,
88194,
13780,
290,
2859,
11066,
1155,
353,
8840,
836,
8,
341,
2405,
7860,
3506,
24738,
22537,
2959,
271,
18185,
11571,
3506,
1669,
14563,
3506,
2959,
16094,
197,
322,
1077,
594,
653,
1045,
43188,
1588,
1573,
582,
3624,
3123,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDownloadBatchDownloadCallbackFailed(t *testing.T) {
dm, cancel := newTestDownloadManager(t)
defer cancel()
reader := ioutil.NopCloser(strings.NewReader("some batch data"))
mss := dm.sharedstorage.(*sharedstoragemocks.Plugin)
mss.On("DownloadData", mock.Anything, "ref1").Return(reader, nil)
mci := dm.callbacks.(*shareddownloadmocks.Callbacks)
mci.On("SharedStorageBatchDownloaded", "ns1", "ref1", []byte("some batch data")).Return(nil, fmt.Errorf("pop"))
_, _, err := dm.downloadBatch(dm.ctx, downloadBatchData{
Namespace: "ns1",
PayloadRef: "ref1",
})
assert.Regexp(t, "pop", err)
mss.AssertExpectations(t)
mci.AssertExpectations(t)
} | explode_data.jsonl/45629 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 257
} | [
2830,
3393,
11377,
21074,
11377,
7494,
9408,
1155,
353,
8840,
836,
8,
1476,
2698,
76,
11,
9121,
1669,
501,
2271,
11377,
2043,
1155,
340,
16867,
9121,
2822,
61477,
1669,
43144,
2067,
453,
51236,
799,
51442,
68587,
445,
14689,
7162,
821,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestExecuteCommandsNoFailures(t *testing.T) {
n := newNetwork(4, 1)
cmd := command.NewTestingCommand("a")
n.duobftPeers[0].onRequest(cmd)
if !n.waitExecuteInstance(cmd, false /* quorum */) {
t.Fatalf("command execution failed, cmd %+v never committed", cmd)
}
} | explode_data.jsonl/49439 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 103
} | [
2830,
3393,
17174,
30479,
2753,
19524,
1413,
1155,
353,
8840,
836,
8,
341,
9038,
1669,
501,
12320,
7,
19,
11,
220,
16,
692,
25920,
1669,
3210,
7121,
16451,
4062,
445,
64,
1138,
9038,
950,
84,
674,
723,
10197,
388,
58,
15,
936,
263,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestManyCommandsNoFailuresFast(t *testing.T) {
count := 13
maxFailures := 4
n := newNetwork(count, maxFailures)
cmds := make([]*commandpb.Command, count*5)
for i := range cmds {
cmd := command.NewTestingCommand(fmt.Sprintf("e-%d", i))
cmd.Meta = []byte{1}
cmds[i] = cmd
n.duobftPeers[0].onRequest(cmd)
}
for _, cmd := range cmds {
if !n.waitExecuteInstance(cmd, false /* quorum */) {
t.Fatalf("command execution failed, instance %+v never installed", cmd)
}
}
} | explode_data.jsonl/49442 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 199
} | [
2830,
3393,
8441,
30479,
2753,
19524,
1413,
32174,
1155,
353,
8840,
836,
8,
341,
18032,
1669,
220,
16,
18,
198,
22543,
19524,
1413,
1669,
220,
19,
271,
9038,
1669,
501,
12320,
11512,
11,
1932,
19524,
1413,
340,
25920,
82,
1669,
1281,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestListAccounts(t *testing.T) {
tableConn := testNewConnection(t, table.NewWriter())
JSONConn := testNewConnection(t, json.NewWriter())
tests := []struct {
name string
conn Connection
exp []byte
}{
{
name: "should write table output correctly",
conn: tableConn,
exp: []byte(testListAccountsTable),
},
{
name: "should write json output correctly",
conn: JSONConn,
exp: []byte(testListAccountsJSON),
},
}
var buf bytes.Buffer
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
tc.conn.writer.SetOutputMirror(&buf)
if err := tc.conn.ListAccounts(&cli.Context{}); err != nil {
t.Errorf("error running test: %v", err)
}
got := buf.Bytes()
if bytes.Compare(got, tc.exp) != 0 {
t.Errorf("unexpected bytes: got %s, exp %s", got, tc.exp)
}
})
buf.Reset()
}
} | explode_data.jsonl/64646 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 347
} | [
2830,
3393,
852,
41369,
1155,
353,
8840,
836,
8,
341,
26481,
9701,
1669,
1273,
3564,
4526,
1155,
11,
1965,
7121,
6492,
2398,
197,
5370,
9701,
1669,
1273,
3564,
4526,
1155,
11,
2951,
7121,
6492,
12367,
78216,
1669,
3056,
1235,
341,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestDockerKeyringLookup(t *testing.T) {
empty := docker.AuthConfiguration{}
ada := docker.AuthConfiguration{
Username: "ada",
Password: "smash",
Email: "ada@example.com",
}
grace := docker.AuthConfiguration{
Username: "grace",
Password: "squash",
Email: "grace@example.com",
}
dk := newDockerKeyring()
dk.add("bar.example.com/pong", grace)
dk.add("bar.example.com", ada)
tests := []struct {
image string
match docker.AuthConfiguration
ok bool
}{
// direct match
{"bar.example.com", ada, true},
// direct match deeper than other possible matches
{"bar.example.com/pong", grace, true},
// no direct match, deeper path ignored
{"bar.example.com/ping", ada, true},
// match first part of path token
{"bar.example.com/pongz", grace, true},
// match regardless of sub-path
{"bar.example.com/pong/pang", grace, true},
// no host match
{"example.com", empty, false},
{"foo.example.com", empty, false},
}
for i, tt := range tests {
match, ok := dk.lookup(tt.image)
if tt.ok != ok {
t.Errorf("case %d: expected ok=%t, got %t", i, tt.ok, ok)
}
if !reflect.DeepEqual(tt.match, match) {
t.Errorf("case %d: expected match=%#v, got %#v", i, tt.match, match)
}
}
} | explode_data.jsonl/23977 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 499
} | [
2830,
3393,
35,
13659,
1592,
12640,
34247,
1155,
353,
8840,
836,
8,
341,
197,
3194,
1669,
26588,
25233,
7688,
31483,
197,
2584,
1669,
26588,
25233,
7688,
515,
197,
197,
11115,
25,
330,
2584,
756,
197,
197,
4876,
25,
330,
3563,
988,
75... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestLegacyReplicaCalcEmptyMetrics(t *testing.T) {
tc := legacyReplicaCalcTestCase{
currentReplicas: 4,
expectedError: fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from heapster"),
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: makePodMetricLevels(),
targetUtilization: 100,
},
}
tc.runTest(t)
} | explode_data.jsonl/26690 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 181
} | [
2830,
3393,
77415,
18327,
15317,
47168,
3522,
27328,
1155,
353,
8840,
836,
8,
341,
78255,
1669,
19588,
18327,
15317,
47168,
16458,
515,
197,
20121,
18327,
52210,
25,
220,
19,
345,
197,
42400,
1454,
25,
256,
8879,
13080,
445,
45928,
311,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestZCashEncodeToBytes(t *testing.T) {
tests := []TestcaseEncode{
{
name: "Normal",
input: "t1RygJmrLdNGgi98gUgEJDTVaELTAYWoMBy",
output: "76a91458e71790e51ab7558c05a6067cfc4926aa8c44dd88ac",
},
{
name: "Normal2",
input: "t1TWk2mmvESDnE4dmCfT7MQ97ij6ZqLpNVU",
output: "76a91469bf38acef973293c07f05c778eb1209748e8d5288ac",
},
{
name: "Normal3",
input: "t3RD6RFKhWSotNbPEY4Vw7Ku9QCfKkzrbBL",
output: "a91448e71790e51ab7558c05a6067cfc4926aa8c44dd87",
},
{
name: "Normal4",
input: "t1Wg9uPPAfwhBWeRjtDPa5ZHNzyBx9rJVKY",
output: "76a9148c6f453157897ce2e6de413f329d995fe0d8f90288ac",
},
{
name: "Normal5",
input: "t1gaySCXCYtXE3ygP38YuWtVZczsEbdjG49",
output: "76a914f925b59e1dc043ad7f0b7e85ea05b06dfe83413888ac",
},
{
name: "Invalid Base58",
input: "t1RygJmrLdNGgi98+UgEJDTVaELTAYWoMBy",
err: errors.New("Bad Base58 string"),
},
{
name: "Too short",
input: "t1RygJmrLdNGgi98gUgEJDTVaELTAYW",
err: errors.New("Bad Base58 checksum"),
},
{
name: "Correct length, but bad checksum",
input: "t1RygJmrLdNGgi98gUgEJDTVaELTAYWoMBz",
err: errors.New("Bad Base58 checksum"),
},
{
name: "Valid base58 but too short",
input: "TJRyWwFs9wTFGZg3JbrVriFbNfCug5tDeC",
err: errors.New("Invalid decoded length"),
},
{
name: "Valid base 58 and checksum, but prefix is bad",
input: "2NRbuP5YfzRNEa1RibT5kXay1VgvQHnydZY1",
err: errors.New("Invalid prefix"),
},
}
RunTestsEncode(t, slip44.ZCASH, tests)
} | explode_data.jsonl/9964 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 878
} | [
2830,
3393,
57,
47441,
32535,
1249,
7078,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
2271,
5638,
32535,
515,
197,
197,
515,
298,
11609,
25,
256,
330,
12206,
756,
298,
22427,
25,
220,
330,
83,
16,
49,
48765,
41,
20946,
43,
67,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestReadInputDefaultValue(t *testing.T) {
scan = func() string {
return " "
}
defaultValue := "default"
value := readInput("text", defaultValue, false)
assert.Equal(t, defaultValue, value)
} | explode_data.jsonl/67956 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 69
} | [
2830,
3393,
4418,
2505,
41533,
1155,
353,
8840,
836,
8,
341,
1903,
4814,
284,
2915,
368,
914,
341,
197,
853,
330,
6228,
197,
532,
11940,
1130,
1669,
330,
2258,
698,
16309,
1669,
1349,
2505,
445,
1318,
497,
20163,
11,
895,
692,
6948,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestLastIndexOf(t *testing.T) {
const SCRIPT = `
"abcabab".lastIndexOf("ab", 3)
`
testScript1(SCRIPT, intToValue(3), t)
} | explode_data.jsonl/10452 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 57
} | [
2830,
3393,
5842,
27376,
1155,
353,
8840,
836,
8,
341,
4777,
53679,
284,
1565,
271,
197,
1,
13683,
370,
370,
3263,
4259,
27376,
445,
370,
497,
220,
18,
340,
197,
19324,
18185,
5910,
16,
7,
24787,
11,
526,
1249,
1130,
7,
18,
701,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestJSONMarshal(t *testing.T) {
input := &ContainerService{}
result, _ := JSONMarshal(input, false)
expected := "{\"id\":\"\",\"location\":\"\",\"name\":\"\"}\n"
if string(result) != expected {
t.Fatalf("JSONMarshal returned unexpected result: expected %s but got %s", expected, string(result))
}
result, _ = JSONMarshalIndent(input, "", "", false)
expected = "{\n\"id\": \"\",\n\"location\": \"\",\n\"name\": \"\"\n}\n"
if string(result) != expected {
t.Fatalf("JSONMarshal returned unexpected result: expected \n%sbut got \n%s", expected, result)
}
} | explode_data.jsonl/7023 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 198
} | [
2830,
3393,
5370,
55438,
1155,
353,
8840,
836,
8,
341,
22427,
1669,
609,
4502,
1860,
16094,
9559,
11,
716,
1669,
4718,
55438,
5384,
11,
895,
340,
42400,
1669,
54734,
307,
23488,
34333,
2527,
23488,
34333,
606,
23488,
2105,
11035,
77,
69... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestInstance_ForEach_False(t *testing.T) {
g := NewGomegaWithT(t)
inst := collection.New(basicmeta.K8SCollection1)
inst.Set(data.EntryN1I1V2)
inst.Set(data.EntryN2I2V2)
inst.Set(data.EntryN3I3V1)
var fe []*resource.Instance
inst.ForEach(func(r *resource.Instance) bool {
fe = append(fe, r)
return false
})
g.Expect(fe).To(HaveLen(1))
fe = nil
inst.ForEach(func(r *resource.Instance) bool {
fe = append(fe, r)
return len(fe) < 2
})
g.Expect(fe).To(HaveLen(2))
} | explode_data.jsonl/70420 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 219
} | [
2830,
3393,
2523,
84368,
4854,
1400,
710,
1155,
353,
8840,
836,
8,
341,
3174,
1669,
1532,
38,
32696,
2354,
51,
1155,
692,
88656,
1669,
4426,
7121,
1883,
5971,
5490,
11352,
23,
3540,
1908,
16,
340,
88656,
4202,
2592,
22330,
45,
16,
40,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestListVolumes(t *testing.T) {
testCases := []struct {
name string
testFunc func(t *testing.T)
}{
{
name: "Valid list without max_entries or starting_token",
testFunc: func(t *testing.T) {
req := csi.ListVolumesRequest{}
d, _ := NewFakeDriver(t)
fakeVolumeID := "test"
disk := compute.Disk{ID: &fakeVolumeID}
disks := []compute.Disk{}
disks = append(disks, disk)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
d.cloud.DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes()
expectedErr := error(nil)
listVolumesResponse, err := d.ListVolumes(context.TODO(), &req)
if !reflect.DeepEqual(err, expectedErr) {
t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr)
}
if listVolumesResponse.NextToken != "" {
t.Errorf("actualNextToken: (%v), expectedNextToken: (%v)", listVolumesResponse.NextToken, "")
}
},
},
{
name: "Valid list with max_entries",
testFunc: func(t *testing.T) {
req := csi.ListVolumesRequest{
MaxEntries: 1,
}
d, _ := NewFakeDriver(t)
fakeVolumeID := "test"
disk1, disk2 := compute.Disk{ID: &fakeVolumeID}, compute.Disk{ID: &fakeVolumeID}
disks := []compute.Disk{}
disks = append(disks, disk1, disk2)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
d.cloud.DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes()
expectedErr := error(nil)
listVolumesResponse, err := d.ListVolumes(context.TODO(), &req)
if !reflect.DeepEqual(err, expectedErr) {
t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr)
}
if len(listVolumesResponse.Entries) != int(req.MaxEntries) {
t.Errorf("Actual number of entries: (%v), Expected number of entries: (%v)", len(listVolumesResponse.Entries), req.MaxEntries)
}
if listVolumesResponse.NextToken != "1" {
t.Errorf("actualNextToken: (%v), expectedNextToken: (%v)", listVolumesResponse.NextToken, "1")
}
},
},
{
name: "Valid list with max_entries and starting_token",
testFunc: func(t *testing.T) {
req := csi.ListVolumesRequest{
StartingToken: "1",
MaxEntries: 1,
}
d, _ := NewFakeDriver(t)
fakeVolumeID1, fakeVolumeID12 := "test1", "test2"
disk1, disk2 := compute.Disk{ID: &fakeVolumeID1}, compute.Disk{ID: &fakeVolumeID12}
disks := []compute.Disk{}
disks = append(disks, disk1, disk2)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
d.cloud.DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes()
expectedErr := error(nil)
listVolumesResponse, err := d.ListVolumes(context.TODO(), &req)
if !reflect.DeepEqual(err, expectedErr) {
t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr)
}
if len(listVolumesResponse.Entries) != int(req.MaxEntries) {
t.Errorf("Actual number of entries: (%v), Expected number of entries: (%v)", len(listVolumesResponse.Entries), req.MaxEntries)
}
if listVolumesResponse.NextToken != "" {
t.Errorf("actualNextToken: (%v), expectedNextToken: (%v)", listVolumesResponse.NextToken, "")
}
if listVolumesResponse.Entries[0].Volume.VolumeId != fakeVolumeID12 {
t.Errorf("actualVolumeId: (%v), expectedVolumeId: (%v)", listVolumesResponse.Entries[0].Volume.VolumeId, fakeVolumeID12)
}
},
},
{
name: "ListVolumes request with starting token but no entries in response",
testFunc: func(t *testing.T) {
req := csi.ListVolumesRequest{
StartingToken: "1",
}
d, _ := NewFakeDriver(t)
disks := []compute.Disk{}
ctrl := gomock.NewController(t)
defer ctrl.Finish()
d.cloud.DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any()).Return(disks, nil).AnyTimes()
expectedErr := status.Error(codes.Aborted, "ListVolumes starting token(1) on rg(rg) is greater than total number of volumes")
_, err := d.ListVolumes(context.TODO(), &req)
if !reflect.DeepEqual(err, expectedErr) {
t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr)
}
},
},
{
name: "ListVolumes list resource error",
testFunc: func(t *testing.T) {
req := csi.ListVolumesRequest{
StartingToken: "1",
}
d, _ := NewFakeDriver(t)
disks := []compute.Disk{}
ctrl := gomock.NewController(t)
defer ctrl.Finish()
rerr := &retry.Error{
RawError: fmt.Errorf("test"),
}
d.cloud.DisksClient.(*mockdiskclient.MockInterface).EXPECT().ListByResourceGroup(gomock.Any(), gomock.Any()).Return(disks, rerr).AnyTimes()
expectedErr := status.Error(codes.Internal, "ListVolumes on rg(rg) failed with error: Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: test")
_, err := d.ListVolumes(context.TODO(), &req)
if !reflect.DeepEqual(err, expectedErr) {
t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr)
}
},
},
}
for _, tc := range testCases {
t.Run(tc.name, tc.testFunc)
}
} | explode_data.jsonl/59395 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2215
} | [
2830,
3393,
852,
96325,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
11609,
257,
914,
198,
197,
18185,
9626,
2915,
1155,
353,
8840,
836,
340,
197,
59403,
197,
197,
515,
298,
11609,
25,
330,
4088,
1140,
2041,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestGroupSerialize(t *testing.T) {
entries, err := ParseGroup(strings.NewReader(testGroup))
if err != nil {
t.Fatal(err)
}
out, err := GroupSerialize(entries)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal([]byte(testGroup), out) {
t.Errorf("group = %q, want %q", string(out), testGroup)
}
} | explode_data.jsonl/58842 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 132
} | [
2830,
3393,
2808,
15680,
1155,
353,
8840,
836,
8,
341,
197,
12940,
11,
1848,
1669,
14775,
2808,
51442,
68587,
8623,
2808,
1171,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
630,
13967,
11,
1848,
1669,
5737,
15680,
72009,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestBoxHit0(t *testing.T) {
// create two bodies for the solver.
slab := newBody(NewBox(50, 50, 50)).setProps(0, 0)
slab.World().Loc.SetS(0, -50, 0)
slab.updateInertiaTensor()
box := newBody(NewBox(1, 1, 1)).setProps(1, 0)
box.World().Loc.SetS(-5, 1.388006, -3)
box.World().Rot.SetS(0.1825742, 0.3651484, 0.5477226, 0.7302967)
box.lvel.SetS(0, -16.599991, 0)
box.lfor.SetS(0, -10, 0)
box.updateInertiaTensor()
// set up the solver input.
bodies := map[uint32]*body{0: slab, 1: box}
points := []*pointOfContact{newPoc()}
points[0].point.SetS(-5.2, -0.011994, -4)
points[0].normal.SetS(0, -1, 0)
points[0].depth = -0.011994
pair := newContactPair(slab, box)
pair.mergeContacts(points) // initialize solver info.
pairs := map[uint64]*contactPair{pair.pid: pair}
// run the solver once to get updated velocities.
sol := newSolver()
sol.solve(bodies, pairs)
lv, av := box.lvel, box.avel
// check the linear velocity
gotlv := fmt.Sprintf("lvel %+.4f %+.4f %+.4f", lv.X, lv.Y, lv.Z)
wantlv := "lvel +0.5168 -10.1059 +0.0000"
if gotlv != wantlv {
t.Errorf("Linv got %s, wanted %s", gotlv, wantlv)
}
// check the angular velocity
gotav := fmt.Sprintf("avel %+.4f %+.4f %+.4f", av.X, av.Y, av.Z)
wantav := "avel +10.0412 -0.7752 -0.9229"
if gotav != wantav {
t.Errorf("Angv got %s, wanted %s", gotav, wantav)
}
// check that the transform is updated correctly.
box.updateWorldTransform(sol.info.timestep)
bl := box.world.Loc
gotl := fmt.Sprintf("bloc %f %f %f", bl.X, bl.Y, bl.Z)
wantl := "bloc -4.989663 1.185889 -3.000000"
if gotl != wantl {
t.Errorf("Loc got %s, wanted %s", gotl, wantl)
}
qx, qy, qz, qw := box.world.Rot.GetS()
gotr := fmt.Sprintf("brot %f %f %f %f", qx, qy, qz, qw)
wantr := "brot 0.253972 0.301044 0.576211 0.716136"
if gotr != wantr {
t.Errorf("Rot got %s, wanted %s", gotr, wantr)
}
} | explode_data.jsonl/76169 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 869
} | [
2830,
3393,
1611,
19498,
15,
1155,
353,
8840,
836,
8,
1476,
197,
322,
1855,
1378,
12866,
369,
279,
28961,
624,
1903,
14380,
1669,
501,
5444,
35063,
1611,
7,
20,
15,
11,
220,
20,
15,
11,
220,
20,
15,
4579,
746,
5992,
7,
15,
11,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestStartCommandLineWithoutCommands(t *testing.T) {
want := "Please pass some command"
os.Args = []string{
"--test=123",
"--test2=456",
}
err := StartCommandLine(emptyOnlyOneCommand)
if err.Error() != want {
t.Errorf("Error handling error: want %s, got %s", want, err.Error())
}
} | explode_data.jsonl/30299 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 111
} | [
2830,
3393,
3479,
71885,
26040,
30479,
1155,
353,
8840,
836,
8,
341,
50780,
1669,
330,
5501,
1494,
1045,
3210,
698,
25078,
51015,
284,
3056,
917,
515,
197,
197,
74757,
1944,
28,
16,
17,
18,
756,
197,
197,
74757,
1944,
17,
28,
19,
20... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestLastAppliedReplicasExplicitlyDisabled(t *testing.T) {
t.Parallel()
deploymentSpec := apps_v1.Deployment{
TypeMeta: meta_v1.TypeMeta{
Kind: "Deployment",
APIVersion: apps_v1.SchemeGroupVersion.String(),
},
ObjectMeta: meta_v1.ObjectMeta{
Namespace: testNs,
Annotations: map[string]string{
LastAppliedReplicasAnnotation: "disabled",
},
},
Spec: apps_v1.DeploymentSpec{
Template: core_v1.PodTemplateSpec{
Spec: core_v1.PodSpec{
Containers: []core_v1.Container{
core_v1.Container{
Image: "some/image:tag",
},
},
},
},
},
}
spec := runtimeToUnstructured(t, &deploymentSpec)
logger := zaptest.NewLogger(t)
defer logger.Sync() // nolint: errcheck
store := speccheckertesting.FakeStore{Namespace: testNs}
updatedSpec, err := deployment{}.BeforeCreate(&specchecker.Context{Logger: logger, Store: store}, spec)
require.NoError(t, err)
deploymentCheck := updatedSpec.(*apps_v1.Deployment)
assert.Contains(t, deploymentCheck.Annotations, LastAppliedReplicasAnnotation)
assert.Equal(t, "disabled", deploymentCheck.Annotations[LastAppliedReplicasAnnotation])
} | explode_data.jsonl/78690 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 463
} | [
2830,
3393,
5842,
75856,
18327,
52210,
98923,
398,
25907,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
197,
82213,
8327,
1669,
10500,
2273,
16,
34848,
39130,
515,
197,
27725,
12175,
25,
8823,
2273,
16,
10184,
12175,
515,
298,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestActiveConfig(t *testing.T) {
common := `"labels": {
"region": "west"
},
"keys": {
"global_key": {
"algorithm": HS256,
"key": "secret"
},
"local_key": {
"private_key": "some_private_key"
}
},
"decision_logs": {
"service": "acmecorp",
"reporting": {
"min_delay_seconds": 300,
"max_delay_seconds": 600
}
},
"plugins": {
"some-plugin": {}
},
"discovery": {"name": "config"}`
serviceObj := `"services": {
"acmecorp": {
"url": "https://example.com/control-plane-api/v1",
"response_header_timeout_seconds": 5,
"headers": {"foo": "bar"},
"credentials": {"bearer": {"token": "test"}}
},
"opa.example.com": {
"url": "https://opa.example.com",
"headers": {"foo": "bar"},
"credentials": {"gcp_metadata": {"audience": "test"}}
}
},`
servicesList := `"services": [
{
"name": "acmecorp",
"url": "https://example.com/control-plane-api/v1",
"response_header_timeout_seconds": 5,
"headers": {"foo": "bar"},
"credentials": {"bearer": {"token": "test"}}
},
{
"name": "opa.example.com",
"url": "https://opa.example.com",
"headers": {"foo": "bar"},
"credentials": {"gcp_metadata": {"audience": "test"}}
}
],`
expectedCommon := fmt.Sprintf(`"labels": {
"id": "foo",
"version": %v,
"region": "west"
},
"keys": {
"global_key": {
"algorithm": HS256
},
"local_key": {}
},
"decision_logs": {
"service": "acmecorp",
"reporting": {
"min_delay_seconds": 300,
"max_delay_seconds": 600
}
},
"plugins": {
"some-plugin": {}
},
"default_authorization_decision": "/system/authz/allow",
"default_decision": "/system/main",
"discovery": {"name": "config"}`, version.Version)
expectedServiceObj := `"services": {
"acmecorp": {
"url": "https://example.com/control-plane-api/v1",
"response_header_timeout_seconds": 5,
"headers": {"foo": "bar"}
},
"opa.example.com": {
"url": "https://opa.example.com",
"headers": {"foo": "bar"}
}
},`
expectedServicesList := `"services": [
{
"name": "acmecorp",
"url": "https://example.com/control-plane-api/v1",
"response_header_timeout_seconds": 5,
"headers": {"foo": "bar"}
},
{
"name": "opa.example.com",
"url": "https://opa.example.com",
"headers": {"foo": "bar"}
}
],`
badKeysConfig := []byte(`{
"keys": [
{
"algorithm": "HS256"
}
]
}`)
badServicesConfig := []byte(`{
"services": {
"acmecorp": ["foo"]
}
}`)
tests := map[string]struct {
raw []byte
expected []byte
wantErr bool
err error
}{
"valid_config_with_svc_object": {
[]byte(fmt.Sprintf(`{ %v %v }`, serviceObj, common)),
[]byte(fmt.Sprintf(`{ %v %v }`, expectedServiceObj, expectedCommon)),
false,
nil,
},
"valid_config_with_svc_list": {
[]byte(fmt.Sprintf(`{ %v %v }`, servicesList, common)),
[]byte(fmt.Sprintf(`{ %v %v }`, expectedServicesList, expectedCommon)),
false,
nil,
},
"invalid_config_with_bad_keys": {
badKeysConfig,
nil,
true,
fmt.Errorf("illegal keys config type: []interface {}"),
},
"invalid_config_with_bad_creds": {
badServicesConfig,
nil,
true,
fmt.Errorf("type assertion error"),
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
conf, err := ParseConfig(tc.raw, "foo")
if err != nil {
t.Fatal(err)
}
actual, err := conf.ActiveConfig()
if tc.wantErr {
if err == nil {
t.Fatal("Expected error but got nil")
}
if tc.err != nil && tc.err.Error() != err.Error() {
t.Fatalf("Expected error message %v but got %v", tc.err.Error(), err.Error())
}
} else {
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
var expected map[string]interface{}
if err := util.Unmarshal(tc.expected, &expected); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("want %v got %v", expected, actual)
}
}
})
}
} | explode_data.jsonl/9371 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1897
} | [
2830,
3393,
5728,
2648,
1155,
353,
8840,
836,
8,
1476,
83825,
1669,
53305,
16873,
788,
341,
298,
197,
1,
3943,
788,
330,
11039,
698,
197,
197,
1583,
197,
197,
1,
10563,
788,
341,
298,
197,
1,
9752,
3097,
788,
341,
571,
197,
1,
196... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func Test_NewReaderWithQiniuLogFile(t *testing.T) {
createDir()
meta, err := NewMeta(metaDir, metaDir, testlogpath, ModeDir, "", defautFileRetention)
if err != nil {
t.Error(err)
}
createQiniuLogFile(dir)
createInvalidSuffixFile(dir)
defer destroyFile()
sf, err := NewSeqFile(meta, dir, false, []string{".pid"}, `logkit.log-*`, WhenceOldest)
if err != nil {
t.Error(err)
}
buffer := make([]byte, 8)
_, err = sf.Read(buffer)
if err != nil {
t.Error(err)
}
if string(buffer) != "12345678" {
t.Errorf("exp 12345678 but got %v", string(buffer))
}
} | explode_data.jsonl/19692 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 240
} | [
2830,
3393,
39582,
5062,
2354,
48,
6591,
84,
98857,
1155,
353,
8840,
836,
8,
341,
39263,
6184,
741,
84004,
11,
1848,
1669,
1532,
12175,
45119,
6184,
11,
8823,
6184,
11,
1273,
839,
2343,
11,
14562,
6184,
11,
7342,
707,
2717,
1703,
8632... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestTraceExporter_WithShutdown_ReturnError(t *testing.T) {
want := errors.New("my_error")
shutdownErr := func(context.Context) error { return want }
te, err := NewTraceExporter(fakeTraceExporterConfig, zap.NewNop(), newTraceDataPusher(nil), WithShutdown(shutdownErr))
assert.NotNil(t, te)
assert.NoError(t, err)
assert.Equal(t, te.Shutdown(context.Background()), want)
} | explode_data.jsonl/70315 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 141
} | [
2830,
3393,
6550,
88025,
62,
2354,
62004,
53316,
1454,
1155,
353,
8840,
836,
8,
341,
50780,
1669,
5975,
7121,
445,
2408,
4096,
1138,
36196,
18452,
7747,
1669,
2915,
5378,
9328,
8,
1465,
314,
470,
1366,
555,
197,
665,
11,
1848,
1669,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTransparentInit(t *testing.T) {
tests := []struct {
name string
dir string
config initconfig.Config
expectedError string
expectedExitCode int
doneResponse bool
}{
//TODO: mocked kompose test
{
name: "getting-started",
dir: "testdata/init/hello",
config: initconfig.Config{
Force: true,
Opts: config.SkaffoldOptions{
ConfigurationFile: "skaffold.yaml.out",
},
},
},
{
name: "ignore existing tags",
dir: "testdata/init/ignore-tags",
config: initconfig.Config{
Force: true,
Opts: config.SkaffoldOptions{
ConfigurationFile: "skaffold.yaml.out",
},
},
},
{
name: "microservices (backwards compatibility)",
dir: "testdata/init/microservices",
config: initconfig.Config{
Force: true,
CliArtifacts: []string{
"leeroy-app/Dockerfile=gcr.io/k8s-skaffold/leeroy-app",
"leeroy-web/Dockerfile=gcr.io/k8s-skaffold/leeroy-web",
},
Opts: config.SkaffoldOptions{
ConfigurationFile: "skaffold.yaml.out",
},
},
},
{
name: "error writing config file",
dir: "testdata/init/hello",
config: initconfig.Config{
Force: true,
Opts: config.SkaffoldOptions{
// erroneous config file as . is a directory
ConfigurationFile: ".",
},
},
expectedError: "writing config to file: open .: is a directory",
expectedExitCode: 1,
},
{
name: "error no builders",
dir: "testdata/init/no-builder",
config: initconfig.Config{
Force: true,
Opts: config.SkaffoldOptions{
ConfigurationFile: "skaffold.yaml.out",
},
},
expectedError: "please provide at least one build config",
expectedExitCode: 101,
},
{
name: "error no manifests",
dir: "testdata/init/hello-no-manifest",
config: initconfig.Config{
Force: true,
Opts: config.SkaffoldOptions{
ConfigurationFile: "skaffold.yaml.out",
},
},
expectedError: "one or more valid Kubernetes manifests are required to run skaffold",
expectedExitCode: 102,
},
{
name: "builder/image ambiguity",
dir: "testdata/init/microservices",
config: initconfig.Config{
Force: true,
Opts: config.SkaffoldOptions{
ConfigurationFile: "skaffold.yaml.out",
},
},
expectedError: "unable to automatically resolve builder/image pairs",
expectedExitCode: 104,
},
{
name: "kustomize",
dir: "testdata/init/getting-started-kustomize",
config: initconfig.Config{
Force: true,
Opts: config.SkaffoldOptions{
ConfigurationFile: "skaffold.yaml.out",
},
},
},
{
name: "helm fails",
dir: "testdata/init/helm-deployment",
config: initconfig.Config{
Opts: config.SkaffoldOptions{
ConfigurationFile: "skaffold.yaml.out",
},
},
expectedError: `Projects set up to deploy with helm must be manually configured.
See https://skaffold.dev/docs/pipeline-stages/deployers/helm/ for a detailed guide on setting your project up with skaffold.`,
expectedExitCode: 1,
},
{
name: "user selects 'no'",
dir: "testdata/init/hello",
config: initconfig.Config{
Opts: config.SkaffoldOptions{
ConfigurationFile: "skaffold.yaml.out",
},
},
doneResponse: true,
},
}
for _, test := range tests {
testutil.Run(t, test.name, func(t *testutil.T) {
t.Chdir(test.dir)
t.Override(&confirmInitOptions, func(_ io.Writer, _ *latestV1.SkaffoldConfig) (bool, error) {
return test.doneResponse, nil
})
got, err := Transparent(context.TODO(), os.Stdout, test.config)
switch {
case test.expectedError != "":
t.CheckErrorContains(test.expectedError, err)
t.CheckDeepEqual(exitCode(err), test.expectedExitCode)
case test.doneResponse == true:
t.CheckErrorAndDeepEqual(false, err, (*latestV1.SkaffoldConfig)(nil), got)
default:
t.CheckNoError(err)
checkGeneratedConfig(t, ".")
}
})
}
} | explode_data.jsonl/75803 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1705
} | [
2830,
3393,
57360,
3803,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
1797,
914,
198,
197,
48532,
1060,
914,
198,
197,
25873,
1843,
2930,
1676,
10753,
198,
197,
42400,
1454,
262,
914,
198,
197,
42400,
15339,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTx_Rollback_ErrTxClosed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
tx, err := db.Begin(true)
if err != nil {
t.Fatal(err)
}
if err := tx.Rollback(); err != nil {
t.Fatal(err)
}
if err := tx.Rollback(); err != bolt.ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
} | explode_data.jsonl/1682 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 138
} | [
2830,
3393,
31584,
2568,
965,
1419,
93623,
51,
12125,
9259,
1155,
353,
8840,
836,
8,
341,
20939,
1669,
15465,
5002,
3506,
741,
16867,
2927,
50463,
7925,
2822,
46237,
11,
1848,
1669,
2927,
28467,
3715,
340,
743,
1848,
961,
2092,
341,
197... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestParseFunctionDeclarationStatements(t *testing.T) {
checkParseStatement(
t,
"func f(x: int) -> int { return x }",
"(function-declaration f ((function-param x int)) int (block (return x)))",
)
// checkParseStatement(t, "export func f(x: int) -> int { return x }", "(exported-function-declaration f ((function-param x int)) int (block (return x)))")
} | explode_data.jsonl/33522 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 126
} | [
2830,
3393,
14463,
5152,
24489,
93122,
1155,
353,
8840,
836,
8,
341,
25157,
14463,
8636,
1006,
197,
3244,
345,
197,
197,
1,
2830,
282,
2075,
25,
526,
8,
1464,
526,
314,
470,
856,
335,
756,
197,
197,
29209,
1688,
6810,
16490,
282,
17... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestElasticsearchDescribe(t *testing.T) {
args := testutil.Args
for _, testcase := range []struct {
args []string
api mock.API
wantError string
wantOutput string
}{
{
args: args("logging elasticsearch describe --service-id 123 --version 1"),
wantError: "error parsing arguments: required flag --name not provided",
},
{
args: args("logging elasticsearch describe --service-id 123 --version 1 --name logs"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
GetElasticsearchFn: getElasticsearchError,
},
wantError: errTest.Error(),
},
{
args: args("logging elasticsearch describe --service-id 123 --version 1 --name logs"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
GetElasticsearchFn: getElasticsearchOK,
},
wantOutput: describeElasticsearchOutput,
},
} {
t.Run(strings.Join(testcase.args, " "), func(t *testing.T) {
var stdout bytes.Buffer
opts := testutil.NewRunOpts(testcase.args, &stdout)
opts.APIClient = mock.APIClient(testcase.api)
err := app.Run(opts)
testutil.AssertErrorContains(t, err, testcase.wantError)
testutil.AssertString(t, testcase.wantOutput, stdout.String())
})
}
} | explode_data.jsonl/6907 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 487
} | [
2830,
3393,
36,
51179,
1836,
74785,
1155,
353,
8840,
836,
8,
341,
31215,
1669,
1273,
1314,
51015,
198,
2023,
8358,
70080,
1669,
2088,
3056,
1235,
341,
197,
31215,
981,
3056,
917,
198,
197,
54299,
286,
7860,
24922,
198,
197,
50780,
1454,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCreateServiceWithServiceUser(t *testing.T) {
// An example service
service := kobject.ServiceConfig{
ContainerName: "name",
Image: "image",
Environment: []kobject.EnvVar{kobject.EnvVar{Name: "env", Value: "value"}},
Port: []kobject.Ports{kobject.Ports{HostPort: 123, ContainerPort: 456, Protocol: string(corev1.ProtocolTCP)}},
Command: []string{"cmd"},
WorkingDir: "dir",
Args: []string{"arg1", "arg2"},
VolList: []string{"/tmp/volume"},
Network: []string{"network1", "network2"}, // not supported
Labels: nil,
Annotations: map[string]string{"kompose.service.type": "nodeport"},
CPUQuota: 1, // not supported
CapAdd: []string{"cap_add"}, // not supported
CapDrop: []string{"cap_drop"}, // not supported
Expose: []string{"expose"}, // not supported
Privileged: true,
Restart: "always",
User: "1234",
}
komposeObject := kobject.KomposeObject{
ServiceConfigs: map[string]kobject.ServiceConfig{"app": service},
}
k := Kubernetes{}
objects, err := k.Transform(komposeObject, kobject.ConvertOptions{CreateD: true, Replicas: 1})
if err != nil {
t.Error(errors.Wrap(err, "k.Transform failed"))
}
for _, obj := range objects {
if deploy, ok := obj.(*appsv1.Deployment); ok {
uid := *deploy.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser
if strconv.FormatInt(uid, 10) != service.User {
t.Errorf("User in ServiceConfig is not matching user in PodSpec")
}
}
}
} | explode_data.jsonl/58956 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 656
} | [
2830,
3393,
4021,
1860,
2354,
1860,
1474,
1155,
353,
8840,
836,
8,
341,
197,
322,
1527,
3110,
2473,
198,
52934,
1669,
595,
1700,
13860,
2648,
515,
197,
197,
4502,
675,
25,
330,
606,
756,
197,
53397,
25,
260,
330,
1805,
756,
197,
197... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestMutex(t *testing.T) {
m := Lock("player1")
if m == nil {
t.Fatal("cannot lock")
}
t.Log(m)
if !m.Unlock() {
t.Fatal("cannot unlock")
}
} | explode_data.jsonl/41414 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 78
} | [
2830,
3393,
38099,
1155,
353,
8840,
836,
8,
341,
2109,
1669,
15701,
445,
3434,
16,
1138,
743,
296,
621,
2092,
341,
197,
3244,
26133,
445,
33260,
5296,
1138,
197,
630,
3244,
5247,
1255,
692,
743,
753,
76,
39188,
368,
341,
197,
3244,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 3 |
func TestServerRestartAndQueueSubs(t *testing.T) {
srvA, srvB, optsA, optsB := runServers(t)
urlA := fmt.Sprintf("nats://%s:%d/", optsA.Host, optsA.Port)
urlB := fmt.Sprintf("nats://%s:%d/", optsB.Host, optsB.Port)
// Client options
opts := nats.DefaultOptions
opts.Timeout = (5 * time.Second)
opts.ReconnectWait = (50 * time.Millisecond)
opts.MaxReconnect = 1000
opts.NoRandomize = true
// Allow us to block on a reconnect completion.
reconnectsDone := make(chan bool)
opts.ReconnectedCB = func(nc *nats.Conn) {
reconnectsDone <- true
}
// Helper to wait on a reconnect.
waitOnReconnect := func() {
var rcs int64
for {
select {
case <-reconnectsDone:
atomic.AddInt64(&rcs, 1)
if rcs >= 2 {
return
}
case <-time.After(2 * time.Second):
t.Fatalf("Expected a reconnect, timedout!\n")
}
}
}
// Create two clients..
opts.Servers = []string{urlA}
nc1, err := opts.Connect()
if err != nil {
t.Fatalf("Failed to create connection for nc1: %v\n", err)
}
opts.Servers = []string{urlB}
nc2, err := opts.Connect()
if err != nil {
t.Fatalf("Failed to create connection for nc2: %v\n", err)
}
c1, _ := nats.NewEncodedConn(nc1, "json")
defer c1.Close()
c2, _ := nats.NewEncodedConn(nc2, "json")
defer c2.Close()
// Flusher helper function.
flush := func() {
// Wait for processing.
c1.Flush()
c2.Flush()
// Wait for a short bit for cluster propogation.
time.Sleep(50 * time.Millisecond)
}
// To hold queue results.
results := make(map[int]int)
var mu sync.Mutex
// This corresponds to the subsriptions below.
const ExpectedMsgCount = 3
// Make sure we got what we needed, 1 msg only and all seqnos accounted for..
checkResults := func(numSent int) {
mu.Lock()
defer mu.Unlock()
for i := 0; i < numSent; i++ {
if results[i] != ExpectedMsgCount {
t.Fatalf("Received incorrect number of messages, [%d] for seq: %d\n", results[i], i)
}
}
// Auto reset results map
results = make(map[int]int)
}
subj := "foo.bar"
qgroup := "workers"
cb := func(seqno int) {
mu.Lock()
defer mu.Unlock()
results[seqno] = results[seqno] + 1
}
// Create queue subscribers
c1.QueueSubscribe(subj, qgroup, cb)
c2.QueueSubscribe(subj, qgroup, cb)
// Do a wildcard subscription.
c1.Subscribe("foo.*", cb)
c2.Subscribe("foo.*", cb)
// Wait for processing.
flush()
sendAndCheckMsgs := func(numToSend int) {
for i := 0; i < numToSend; i++ {
if i%2 == 0 {
c1.Publish(subj, i)
} else {
c2.Publish(subj, i)
}
}
// Wait for processing.
flush()
// Check Results
checkResults(numToSend)
}
////////////////////////////////////////////////////////////////////////////
// Base Test
////////////////////////////////////////////////////////////////////////////
// Now send 10 messages, from each client..
sendAndCheckMsgs(10)
////////////////////////////////////////////////////////////////////////////
// Now restart SrvA and srvB, re-run test
////////////////////////////////////////////////////////////////////////////
srvA.Shutdown()
srvA = RunServer(optsA)
defer srvA.Shutdown()
srvB.Shutdown()
srvB = RunServer(optsB)
defer srvB.Shutdown()
waitOnReconnect()
time.Sleep(50 * time.Millisecond)
// Now send another 10 messages, from each client..
sendAndCheckMsgs(10)
} | explode_data.jsonl/53509 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1289
} | [
2830,
3393,
5475,
59354,
3036,
7554,
3136,
82,
1155,
353,
8840,
836,
8,
341,
1903,
10553,
32,
11,
43578,
33,
11,
12185,
32,
11,
12185,
33,
1669,
1598,
78139,
1155,
692,
19320,
32,
1669,
8879,
17305,
445,
77,
1862,
86791,
82,
7533,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEncoderWriterLarge(t *testing.T) {
testX := func(x int) string {
result := ""
for i := 0; i < x; i++ {
result = result + "-foo-"
}
return result
}
var buf bytes.Buffer
e := velocypack.NewEncoder(&buf)
for i := 0; i < 1000; i++ {
must(e.Encode(testX(i)))
}
r := bytes.NewReader(buf.Bytes())
d := velocypack.NewDecoder(r)
for i := 0; i < 1000; i++ {
var v string
must(d.Decode(&v))
ASSERT_EQ(v, testX(i), t)
}
} | explode_data.jsonl/52697 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 207
} | [
2830,
3393,
19921,
6492,
34253,
1155,
353,
8840,
836,
8,
341,
18185,
55,
1669,
2915,
2075,
526,
8,
914,
341,
197,
9559,
1669,
8389,
197,
2023,
600,
1669,
220,
15,
26,
600,
366,
856,
26,
600,
1027,
341,
298,
9559,
284,
1102,
488,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestListAllL7Policies(t *testing.T) {
th.SetupHTTP()
defer th.TeardownHTTP()
HandleL7PolicyListSuccessfully(t)
allPages, err := l7policies.List(fake.ServiceClient(), l7policies.ListOpts{}).AllPages()
th.AssertNoErr(t, err)
actual, err := l7policies.ExtractL7Policies(allPages)
th.AssertNoErr(t, err)
th.CheckDeepEquals(t, L7PolicyToURL, actual[0])
th.CheckDeepEquals(t, L7PolicyToPool, actual[1])
} | explode_data.jsonl/79634 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 177
} | [
2830,
3393,
852,
2403,
43,
22,
47,
42038,
1155,
353,
8840,
836,
8,
341,
70479,
39820,
9230,
741,
16867,
270,
94849,
37496,
9230,
741,
197,
6999,
43,
22,
13825,
852,
35959,
1155,
692,
50960,
17713,
11,
1848,
1669,
326,
22,
79,
42038,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestResourceVersioner(t *testing.T) {
roleBinding := rbac.RoleBinding{ObjectMeta: api.ObjectMeta{ResourceVersion: "10"}}
version, err := accessor.ResourceVersion(&roleBinding)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if version != "10" {
t.Errorf("unexpected version %v", version)
}
roleBindingList := rbac.RoleBindingList{ListMeta: unversioned.ListMeta{ResourceVersion: "10"}}
version, err = accessor.ResourceVersion(&roleBindingList)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if version != "10" {
t.Errorf("unexpected version %v", version)
}
} | explode_data.jsonl/648 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 220
} | [
2830,
3393,
4783,
5637,
261,
1155,
353,
8840,
836,
8,
341,
197,
5778,
15059,
1669,
18717,
580,
35955,
15059,
90,
1190,
12175,
25,
6330,
80222,
90,
4783,
5637,
25,
330,
16,
15,
95642,
74954,
11,
1848,
1669,
44384,
20766,
5637,
2099,
57... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestGetTypeName(t *testing.T) {
val := &a{}
name, err := getTypeName(reflect.TypeOf(val))
require.Nil(t, err)
require.EqualValues(t, "a", name)
val1 := []a{}
name, err = getTypeName(reflect.TypeOf(val1))
require.Nil(t, err)
require.EqualValues(t, "a", name)
} | explode_data.jsonl/71804 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 119
} | [
2830,
3393,
1949,
39429,
1155,
353,
8840,
836,
8,
341,
19302,
1669,
609,
64,
31483,
11609,
11,
1848,
1669,
36581,
675,
13321,
767,
73921,
9098,
1171,
17957,
59678,
1155,
11,
1848,
340,
17957,
12808,
6227,
1155,
11,
330,
64,
497,
829,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestVConv(t *testing.T) {
goodCases := []struct {
s string
to reflect.Type
value interface{}
}{
{"test", reflect.TypeOf(""), "test"},
{"true", reflect.TypeOf(true), true},
{"false", reflect.TypeOf(true), false},
{"42", reflect.TypeOf(42), 42},
{"a,b", reflect.TypeOf([]string{}), []string{"a", "b"}},
{"7,42", reflect.TypeOf([]int{}), []int{7, 42}},
}
for _, cas := range goodCases {
v, err := vconv(cas.s, cas.to)
require.Nil(t, err)
require.Equal(t, cas.value, v.Interface())
}
badCases := []struct {
s string
to reflect.Type
}{
{"42", reflect.TypeOf(true)},
{"xx", reflect.TypeOf(true)},
{"aa", reflect.TypeOf(42)},
{"a,b", reflect.TypeOf([]int{})},
{"7,42c", reflect.TypeOf([]int{})},
}
for _, cas := range badCases {
_, err := vconv(cas.s, cas.to)
require.NotNil(t, err)
}
} | explode_data.jsonl/77192 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 385
} | [
2830,
3393,
53,
34892,
1155,
353,
8840,
836,
8,
341,
3174,
1386,
37302,
1669,
3056,
1235,
341,
197,
1903,
257,
914,
198,
197,
31709,
262,
8708,
10184,
198,
197,
16309,
3749,
16094,
197,
59403,
197,
197,
4913,
1944,
497,
8708,
73921,
8... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestAddDeletionFinalizer(t *testing.T) {
t.Run("ArgoCD resource present", func(t *testing.T) {
a := makeTestArgoCD()
r := makeTestReconciler(t, a)
err := r.addDeletionFinalizer(a)
assert.NilError(t, err)
if !a.IsDeletionFinalizerPresent() {
t.Fatal("Expected deletion finalizer to be added")
}
})
t.Run("ArgoCD resource absent", func(t *testing.T) {
a := makeTestArgoCD()
r := makeTestReconciler(t)
err := r.addDeletionFinalizer(a)
assert.Error(t, err, `failed to add deletion finalizer for argocd: argocds.argoproj.io "argocd" not found`)
})
} | explode_data.jsonl/11934 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 243
} | [
2830,
3393,
2212,
1912,
52625,
19357,
3135,
1155,
353,
8840,
836,
8,
341,
3244,
16708,
445,
2735,
78,
6484,
5101,
3042,
497,
2915,
1155,
353,
8840,
836,
8,
341,
197,
11323,
1669,
1281,
2271,
2735,
78,
6484,
741,
197,
7000,
1669,
1281,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestScannerTiming(t *testing.T) {
defer leaktest.AfterTest(t)
const count = 3
const runTime = 100 * time.Millisecond
const maxError = 7500 * time.Microsecond
durations := []time.Duration{
10 * time.Millisecond,
25 * time.Millisecond,
}
for i, duration := range durations {
iter := newTestIterator(count)
q := &testQueue{}
s := newRangeScanner(duration, 0, iter, nil)
s.AddQueues(q)
mc := hlc.NewManualClock(0)
clock := hlc.NewClock(mc.UnixNano)
stopper := util.NewStopper()
defer stopper.Stop()
s.Start(clock, stopper)
time.Sleep(runTime)
avg := iter.avgScan()
log.Infof("%d: average scan: %s\n", i, avg)
if avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||
duration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {
t.Errorf("expected %s, got %s: exceeds max error of %s", duration, avg, maxError)
}
}
} | explode_data.jsonl/39875 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 372
} | [
2830,
3393,
31002,
62805,
1155,
353,
8840,
836,
8,
341,
16867,
23352,
1944,
36892,
2271,
1155,
340,
4777,
1760,
284,
220,
18,
198,
4777,
1598,
1462,
284,
220,
16,
15,
15,
353,
882,
71482,
198,
4777,
1932,
1454,
284,
220,
22,
20,
15,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestUpdateTeamMemberRolesChangingGuest(t *testing.T) {
th := Setup(t).InitBasic()
defer th.TearDown()
t.Run("from guest to user", func(t *testing.T) {
user := model.User{Email: strings.ToLower(model.NewId()) + "success+test@example.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""}
ruser, _ := th.App.CreateGuest(&user)
_, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "")
require.Nil(t, err)
_, err = th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_user")
require.NotNil(t, err, "Should fail when try to modify the guest role")
})
t.Run("from user to guest", func(t *testing.T) {
user := model.User{Email: strings.ToLower(model.NewId()) + "success+test@example.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""}
ruser, _ := th.App.CreateUser(&user)
_, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "")
require.Nil(t, err)
_, err = th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_guest")
require.NotNil(t, err, "Should fail when try to modify the guest role")
})
t.Run("from user to admin", func(t *testing.T) {
user := model.User{Email: strings.ToLower(model.NewId()) + "success+test@example.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""}
ruser, _ := th.App.CreateUser(&user)
_, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "")
require.Nil(t, err)
_, err = th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_user team_admin")
require.Nil(t, err, "Should work when you not modify guest role")
})
t.Run("from guest to guest plus custom", func(t *testing.T) {
user := model.User{Email: strings.ToLower(model.NewId()) + "success+test@example.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""}
ruser, _ := th.App.CreateGuest(&user)
_, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "")
require.Nil(t, err)
_, err = th.App.CreateRole(&model.Role{Name: "custom", DisplayName: "custom", Description: "custom"})
require.Nil(t, err)
_, err = th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_guest custom")
require.Nil(t, err, "Should work when you not modify guest role")
})
t.Run("a guest cant have user role", func(t *testing.T) {
user := model.User{Email: strings.ToLower(model.NewId()) + "success+test@example.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""}
ruser, _ := th.App.CreateGuest(&user)
_, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "")
require.Nil(t, err)
_, err = th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_guest team_user")
require.NotNil(t, err, "Should work when you not modify guest role")
})
} | explode_data.jsonl/30280 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1111
} | [
2830,
3393,
4289,
14597,
9366,
25116,
59046,
37804,
1155,
353,
8840,
836,
8,
341,
70479,
1669,
18626,
1155,
568,
3803,
15944,
741,
16867,
270,
836,
682,
4454,
2822,
3244,
16708,
445,
1499,
8640,
311,
1196,
497,
2915,
1155,
353,
8840,
83... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMetaBackend_configureInterpolation(t *testing.T) {
// Create a temporary working directory that is empty
td := tempDir(t)
copy.CopyDir(testFixturePath("backend-new-interp"), td)
defer os.RemoveAll(td)
defer testChdir(t, td)()
// Setup the meta
m := testMetaBackend(t, nil)
// Get the backend
_, err := m.Backend(&BackendOpts{Init: true})
if err == nil {
t.Fatal("should error")
}
} | explode_data.jsonl/34312 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 153
} | [
2830,
3393,
12175,
29699,
75887,
3306,
44686,
1155,
353,
8840,
836,
8,
341,
197,
322,
4230,
264,
13340,
3238,
6220,
429,
374,
4287,
198,
76373,
1669,
2730,
6184,
1155,
340,
49124,
31770,
6184,
8623,
18930,
1820,
445,
20942,
22269,
44894,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestExcelizeOneMerge(t *testing.T) {
f := excelize.NewFile()
sheet := "Sheet1"
// 设置单元格的值
f.SetCellValue(sheet, "A1", 100)
f.SetCellValue(sheet, "B2", 1)
f.MergeCell(sheet, "A1", "A2")
f.RemoveRow(sheet, 1)
//f.DuplicateRowTo(sheet, 2, 3)
//f.RemoveRow(sheet, 2)
// 根据指定路径保存文件
f.SaveAs(pathPrefix + "BookOneMerge_out.xlsx")
} | explode_data.jsonl/476 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 197
} | [
2830,
3393,
20055,
551,
3966,
52096,
1155,
353,
8840,
836,
8,
341,
1166,
1669,
3438,
19412,
7121,
1703,
741,
1903,
3674,
1669,
330,
10541,
16,
698,
197,
322,
53054,
106251,
33983,
9370,
25511,
198,
1166,
4202,
23885,
61680,
11,
330,
32,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestStrictRemove(t *testing.T) {
tests := []struct {
id uint
want error
}{
{1, nil},
{2, nil},
{3, nil},
{4, nil},
{5, nil},
{6, nil},
}
tmpdir, err := ioutil.TempDir("", "tmpdir")
if err != nil {
t.Errorf("Unexpected error: TestStrictRemove(%v)", err)
}
defer os.RemoveAll(tmpdir)
if err := exec.Command("cp", "-r", index, tmpdir).Run(); err != nil {
t.Errorf("Unexpected error: TestStrictRemove(%v)", err)
}
ngt := New(path.Join(tmpdir, "index")).Open()
defer ngt.Close()
for _, tt := range tests {
if err := ngt.StrictRemove(tt.id); err != tt.want {
t.Errorf("TestStrictRemove(%v): %v, wanted: %v", tt.id, err, tt.want)
}
}
} | explode_data.jsonl/31022 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 309
} | [
2830,
3393,
41857,
13021,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
15710,
256,
2622,
198,
197,
50780,
1465,
198,
197,
59403,
197,
197,
90,
16,
11,
2092,
1583,
197,
197,
90,
17,
11,
2092,
1583,
197,
197,
90,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestMigrateAndQueryCount(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "TestMigrateAndQueryStride.*.sqlite")
if err != nil {
panic(err)
}
defer os.Remove(tmpfile.Name())
testutils.CreateTestDatabase(tmpfile.Name())
a, err := New(tmpfile.Name())
if err != nil {
panic(err)
}
err = a.Migrate()
if err != nil {
panic(err)
}
count, err := a.CountBlocks()
if err != nil {
panic(err)
}
if count <= 0 {
t.Fatal("zero count")
}
} | explode_data.jsonl/51540 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 194
} | [
2830,
3393,
44,
34479,
3036,
2859,
2507,
1155,
353,
8840,
836,
8,
341,
20082,
1192,
11,
1848,
1669,
43144,
65009,
1703,
19814,
330,
2271,
44,
34479,
3036,
2859,
64169,
4908,
13,
37042,
1138,
743,
1848,
961,
2092,
341,
197,
30764,
3964,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestPasswordEntry_Placeholder(t *testing.T) {
entry, window := setupPasswordTest(t)
defer teardownImageTest(window)
c := window.Canvas()
entry.SetPlaceHolder("Password")
test.AssertRendersToMarkup(t, "password_entry/placeholder_initial.xml", c)
test.Type(entry, "Hié™שרה")
assert.Equal(t, "Hié™שרה", entry.Text)
test.AssertRendersToMarkup(t, "password_entry/placeholder_typed.xml", c)
} | explode_data.jsonl/12384 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 155
} | [
2830,
3393,
4876,
5874,
1088,
26536,
4251,
1155,
353,
8840,
836,
8,
341,
48344,
11,
3241,
1669,
6505,
4876,
2271,
1155,
340,
16867,
49304,
1906,
2271,
15906,
340,
1444,
1669,
3241,
54121,
2822,
48344,
4202,
17371,
8589,
445,
4876,
1138,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCamputPermanode(t *testing.T) {
w := test.GetWorld(t)
br := w.NewPermanode(t)
out := test.MustRunCmd(t, w.Cmd("pk-get", br.String()))
mustHave := []string{
`{"camliVersion": 1,`,
`"camliSigner": "`,
`"camliType": "permanode",`,
`random": "`,
`,"camliSig":"`,
}
for _, str := range mustHave {
if !strings.Contains(out, str) {
t.Errorf("Expected permanode response to contain %q; it didn't. Got: %s", str, out)
}
}
} | explode_data.jsonl/6977 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 201
} | [
2830,
3393,
25406,
628,
3889,
1515,
534,
1155,
353,
8840,
836,
8,
341,
6692,
1669,
1273,
2234,
10134,
1155,
340,
80255,
1669,
289,
7121,
3889,
1515,
534,
1155,
692,
13967,
1669,
1273,
50463,
6727,
15613,
1155,
11,
289,
64512,
445,
20819... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestCheckSerializedHeight(t *testing.T) {
// Create an empty coinbase template to be used in the tests below.
coinbaseOutpoint := wire.NewOutPoint(&chainhash.Hash{}, math.MaxUint32)
coinbaseTx := wire.NewMsgTx(1)
coinbaseTx.AddTxIn(wire.NewTxIn(coinbaseOutpoint, nil, nil))
// Expected rule errors.
missingHeightError := RuleError{
ErrorCode: ErrMissingCoinbaseHeight,
}
badHeightError := RuleError{
ErrorCode: ErrBadCoinbaseHeight,
}
tests := []struct {
sigScript []byte // Serialized data
wantHeight int32 // Expected height
err error // Expected error type
}{
// No serialized height length.
{[]byte{}, 0, missingHeightError},
// Serialized height length with no height bytes.
{[]byte{0x02}, 0, missingHeightError},
// Serialized height length with too few height bytes.
{[]byte{0x02, 0x4a}, 0, missingHeightError},
// Serialized height that needs 2 bytes to encode.
{[]byte{0x02, 0x4a, 0x52}, 21066, nil},
// Serialized height that needs 2 bytes to encode, but backwards
// endianness.
{[]byte{0x02, 0x4a, 0x52}, 19026, badHeightError},
// Serialized height that needs 3 bytes to encode.
{[]byte{0x03, 0x40, 0x0d, 0x03}, 200000, nil},
// Serialized height that needs 3 bytes to encode, but backwards
// endianness.
{[]byte{0x03, 0x40, 0x0d, 0x03}, 1074594560, badHeightError},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
msgTx := coinbaseTx.Copy()
msgTx.TxIn[0].SignatureScript = test.sigScript
tx := floutil.NewTx(msgTx)
err := checkSerializedHeight(tx, test.wantHeight)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("checkSerializedHeight #%d wrong error type "+
"got: %v <%T>, want: %T", i, err, err, test.err)
continue
}
if rerr, ok := err.(RuleError); ok {
trerr := test.err.(RuleError)
if rerr.ErrorCode != trerr.ErrorCode {
t.Errorf("checkSerializedHeight #%d wrong "+
"error code got: %v, want: %v", i,
rerr.ErrorCode, trerr.ErrorCode)
continue
}
}
}
} | explode_data.jsonl/74722 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 784
} | [
2830,
3393,
3973,
77521,
3640,
1155,
353,
8840,
836,
8,
341,
197,
322,
4230,
458,
4287,
16254,
3152,
3811,
311,
387,
1483,
304,
279,
7032,
3685,
624,
197,
7160,
3152,
2662,
2768,
1669,
9067,
7121,
2662,
2609,
2099,
8819,
8296,
15103,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestGetV2Job(t *testing.T) {
th.SetupHTTP()
defer th.TeardownHTTP()
handleV2JobGet(t)
actual, err := jobs.Get(client.ServiceClient(), "986416ea-e26b-40f1-b371-cd7be87376a2",
"3c0cf394-5da2-46a7-92df-795d998edea7").Extract()
th.AssertNoErr(t, err)
th.AssertDeepEquals(t, expectedGetResponseData, actual)
} | explode_data.jsonl/52707 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 148
} | [
2830,
3393,
1949,
53,
17,
12245,
1155,
353,
8840,
836,
8,
341,
70479,
39820,
9230,
741,
16867,
270,
94849,
37496,
9230,
741,
53822,
53,
17,
12245,
1949,
1155,
692,
88814,
11,
1848,
1669,
6887,
2234,
12805,
13860,
2959,
1507,
330,
24,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestTerminate(t *testing.T) {
tests := []struct {
name string
tags []*ec2.Tag
inst *instance
expected error
}{
{
name: "no issue with terminate",
tags: []*ec2.Tag{},
inst: &instance{
Instance: &ec2.Instance{
InstanceId: aws.String("id1"),
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNameRunning),
},
},
region: ®ion{
services: connections{
ec2: mockEC2{
tierr: nil,
},
},
},
},
expected: nil,
},
{
name: "issue with terminate",
tags: []*ec2.Tag{},
inst: &instance{
Instance: &ec2.Instance{
InstanceId: aws.String("id1"),
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNameRunning),
},
},
region: ®ion{
services: connections{
ec2: mockEC2{
tierr: errors.New(""),
},
},
},
},
expected: errors.New(""),
},
}
for _, tt := range tests {
ret := tt.inst.terminate()
if ret != nil && ret.Error() != tt.expected.Error() {
t.Errorf("error actual: %s, expected: %s", ret.Error(), tt.expected.Error())
}
}
} | explode_data.jsonl/55202 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 561
} | [
2830,
3393,
62519,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
257,
914,
198,
197,
3244,
2032,
257,
29838,
757,
17,
23676,
198,
197,
88656,
257,
353,
4851,
198,
197,
42400,
1465,
198,
197,
59403,
197,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestActionCT(t *testing.T) {
var tests = []struct {
desc string
args string
action string
err error
}{
{
desc: "no arguments",
err: errCTNoArguments,
},
{
desc: "OK",
args: "commit,exec(set_field:1->ct_label,set_field:1->ct_mark)",
action: "ct(commit,exec(set_field:1->ct_label,set_field:1->ct_mark))",
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
action, err := ConnectionTracking(tt.args).MarshalText()
if want, got := tt.err, err; want != got {
t.Fatalf("unexpected error:\n- want: %v\n- got: %v",
want, got)
}
if err != nil {
return
}
if want, got := tt.action, string(action); want != got {
t.Fatalf("unexpected Action:\n- want: %q\n- got: %q",
want, got)
}
})
}
} | explode_data.jsonl/49508 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 380
} | [
2830,
3393,
2512,
1162,
1155,
353,
8840,
836,
8,
341,
2405,
7032,
284,
3056,
1235,
341,
197,
41653,
256,
914,
198,
197,
31215,
256,
914,
198,
197,
38933,
914,
198,
197,
9859,
262,
1465,
198,
197,
59403,
197,
197,
515,
298,
41653,
25... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestRealm(t *testing.T) {
f := `schema "account_a" {
}
table "t1" {
schema = schema.account_a
}
schema "account_b" {
}
table "t2" {
schema = schema.account_b
}
`
for _, tt := range []struct {
name string
schemaspec.Marshaler
schemaspec.Unmarshaler
}{
{
name: "mysql",
Marshaler: mysql.MarshalHCL,
Unmarshaler: mysql.UnmarshalHCL,
},
{
name: "postgres",
Marshaler: postgres.MarshalHCL,
Unmarshaler: postgres.UnmarshalHCL,
},
{
name: "sqlite",
Marshaler: sqlite.MarshalHCL,
Unmarshaler: sqlite.UnmarshalHCL,
},
} {
t.Run(tt.name, func(t *testing.T) {
var r schema.Realm
err := tt.UnmarshalSpec([]byte(f), &r)
require.NoError(t, err)
exp := &schema.Realm{
Schemas: []*schema.Schema{
{
Name: "account_a",
Tables: []*schema.Table{
{Name: "t1"},
},
},
{
Name: "account_b",
Tables: []*schema.Table{
{Name: "t2"},
},
},
},
}
exp.Schemas[0].Tables[0].Schema = exp.Schemas[0]
exp.Schemas[1].Tables[0].Schema = exp.Schemas[1]
require.EqualValues(t, exp, &r)
hcl, err := tt.MarshalSpec(&r)
require.NoError(t, err)
var after schema.Realm
err = tt.UnmarshalSpec(hcl, &after)
require.NoError(t, err)
require.EqualValues(t, exp, &after)
})
}
} | explode_data.jsonl/81316 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 717
} | [
2830,
3393,
64290,
1155,
353,
8840,
836,
8,
341,
1166,
1669,
1565,
17349,
330,
4608,
4306,
1,
341,
532,
2005,
330,
83,
16,
1,
341,
1903,
3416,
284,
10802,
18786,
4306,
198,
532,
17349,
330,
4608,
880,
1,
341,
532,
2005,
330,
83,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_PostConfig_CreatesConfig(t *testing.T) {
setup(t)
defer cleanup(t)
userID := makeUserID()
config := makeConfig()
for _, c := range allClients {
{
w := requestAsUser(t, userID, "POST", c.Endpoint, "", readerFromConfig(t, config))
assert.Equal(t, http.StatusNoContent, w.Code)
}
{
w := requestAsUser(t, userID, "GET", c.Endpoint, "", nil)
assert.Equal(t, config, parseView(t, w.Body.Bytes()).Config)
}
}
} | explode_data.jsonl/37310 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 188
} | [
2830,
3393,
66726,
2648,
920,
265,
973,
2648,
1155,
353,
8840,
836,
8,
341,
84571,
1155,
340,
16867,
21290,
1155,
692,
19060,
915,
1669,
1281,
36899,
741,
25873,
1669,
1281,
2648,
741,
2023,
8358,
272,
1669,
2088,
678,
47174,
341,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestTriggerReconciler(t *testing.T) {
eventing.RegisterAlternateBrokerConditionSet(broker.ConditionSet)
t.Parallel()
for _, f := range Formats {
triggerReconciliation(t, f, *DefaultConfigs)
}
} | explode_data.jsonl/48902 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 75
} | [
2830,
3393,
17939,
693,
40446,
5769,
1155,
353,
8840,
836,
8,
341,
28302,
287,
19983,
83042,
65545,
10547,
1649,
1883,
45985,
75134,
1649,
692,
3244,
41288,
7957,
2822,
2023,
8358,
282,
1669,
2088,
79348,
341,
197,
83228,
693,
98240,
1155... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestPullEngineSelectiveUpdates(t *testing.T) {
t.Parallel()
// Scenario: inst1 has {1, 3} and inst2 has {0,1,2,3}.
// inst1 initiates to inst2
// Expected outcome: inst1 asks for 0,2 and inst2 sends 0,2 only
peers := make(map[string]*pullTestInstance)
inst1 := newPushPullTestInstance("p1", peers)
inst2 := newPushPullTestInstance("p2", peers)
defer inst1.stop()
defer inst2.stop()
inst1.Add("1", "3")
inst2.Add("0", "1", "2", "3")
// Ensure inst2 sent a proper digest to inst1
inst1.hook(func(m interface{}) {
if dig, isDig := m.(*digestMsg); isDig {
assert.True(t, util.IndexInSlice(dig.digest, "0", Strcmp) != -1)
assert.True(t, util.IndexInSlice(dig.digest, "1", Strcmp) != -1)
assert.True(t, util.IndexInSlice(dig.digest, "2", Strcmp) != -1)
assert.True(t, util.IndexInSlice(dig.digest, "3", Strcmp) != -1)
}
})
// Ensure inst1 requested only needed updates from inst2
inst2.hook(func(m interface{}) {
if req, isReq := m.(*reqMsg); isReq {
assert.True(t, util.IndexInSlice(req.items, "1", Strcmp) == -1)
assert.True(t, util.IndexInSlice(req.items, "3", Strcmp) == -1)
assert.True(t, util.IndexInSlice(req.items, "0", Strcmp) != -1)
assert.True(t, util.IndexInSlice(req.items, "2", Strcmp) != -1)
}
})
// Ensure inst1 received only needed updates from inst2
inst1.hook(func(m interface{}) {
if res, isRes := m.(*resMsg); isRes {
assert.True(t, util.IndexInSlice(res.items, "1", Strcmp) == -1)
assert.True(t, util.IndexInSlice(res.items, "3", Strcmp) == -1)
assert.True(t, util.IndexInSlice(res.items, "0", Strcmp) != -1)
assert.True(t, util.IndexInSlice(res.items, "2", Strcmp) != -1)
}
})
inst1.setNextPeerSelection([]string{"p2"})
time.Sleep(time.Duration(2000) * time.Millisecond)
assert.Equal(t, len(inst2.state.ToArray()), len(inst1.state.ToArray()))
} | explode_data.jsonl/56503 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 784
} | [
2830,
3393,
36068,
4571,
64392,
37091,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
197,
322,
58663,
25,
1761,
16,
702,
314,
16,
11,
220,
18,
92,
323,
1761,
17,
702,
314,
15,
11,
16,
11,
17,
11,
18,
27275,
197,
322,
176... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestEachObjxMap(t *testing.T) {
v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
count := 0
replacedVals := make([](Map), 0)
assert.Equal(t, v, v.EachObjxMap(func(i int, val Map) bool {
count++
replacedVals = append(replacedVals, val)
// abort early
if i == 2 {
return false
}
return true
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustObjxMapSlice()[0])
assert.Equal(t, replacedVals[1], v.MustObjxMapSlice()[1])
assert.Equal(t, replacedVals[2], v.MustObjxMapSlice()[2])
} | explode_data.jsonl/23395 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 262
} | [
2830,
3393,
4854,
5261,
87,
2227,
1155,
353,
8840,
836,
8,
1476,
5195,
1669,
609,
1130,
90,
691,
25,
39444,
2227,
6098,
7,
2227,
2376,
3564,
7,
16,
5731,
320,
2227,
2376,
3564,
7,
16,
5731,
320,
2227,
2376,
3564,
7,
16,
5731,
320,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestPRMExactRepositoryUnmarshalJSON(t *testing.T) {
policyJSONUmarshallerTests{
newDest: func() json.Unmarshaler { return &prmExactRepository{} },
newValidObject: func() (interface{}, error) {
return NewPRMExactRepository("library/busybox:latest")
},
otherJSONParser: func(validJSON []byte) (interface{}, error) {
return newPolicyReferenceMatchFromJSON(validJSON)
},
breakFns: []func(mSI){
// The "type" field is missing
func(v mSI) { delete(v, "type") },
// Wrong "type" field
func(v mSI) { v["type"] = 1 },
func(v mSI) { v["type"] = "this is invalid" },
// Extra top-level sub-object
func(v mSI) { v["unexpected"] = 1 },
// The "dockerRepository" field is missing
func(v mSI) { delete(v, "dockerRepository") },
// Invalid "dockerRepository" field
func(v mSI) { v["dockerRepository"] = 1 },
},
duplicateFields: []string{"type", "dockerRepository"},
}.run(t)
} | explode_data.jsonl/36520 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 363
} | [
2830,
3393,
6480,
44,
57954,
4624,
1806,
27121,
5370,
1155,
353,
8840,
836,
8,
341,
3223,
8018,
5370,
52,
52541,
18200,
515,
197,
8638,
34830,
25,
2915,
368,
2951,
38097,
261,
314,
470,
609,
94043,
57954,
4624,
6257,
1153,
197,
8638,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestControllerDeleteEventWithGoodControllerNoAPIVersion(t *testing.T) {
c, tc := makeController("", "ReplicaSet")
c.Delete(simpleOwnedPod("unit", "test"))
validateSent(t, tc, sourcesv1beta1.ApiServerSourceDeleteRefEventType)
} | explode_data.jsonl/39272 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 78
} | [
2830,
3393,
2051,
6435,
1556,
2354,
15216,
2051,
2753,
7082,
5637,
1155,
353,
8840,
836,
8,
341,
1444,
11,
17130,
1669,
1281,
2051,
19814,
330,
18327,
15317,
1649,
1138,
1444,
18872,
1141,
6456,
57641,
23527,
445,
3843,
497,
330,
1944,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestDecimalBytesLogicalTypeInRecordEncode(t *testing.T) {
schema := `{"type": "record", "name": "myrecord", "fields" : [
{"name": "mydecimal", "type": "bytes", "logicalType": "decimal", "precision": 4, "scale": 2}]}`
testBinaryCodecPass(t, schema, map[string]interface{}{"mydecimal": big.NewRat(617, 50)}, []byte("\x04\x04\xd2"))
} | explode_data.jsonl/12014 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 138
} | [
2830,
3393,
11269,
7078,
64312,
929,
641,
6471,
32535,
1155,
353,
8840,
836,
8,
341,
1903,
3416,
1669,
1565,
4913,
1313,
788,
330,
8548,
497,
330,
606,
788,
330,
2408,
8548,
497,
330,
9007,
1,
549,
2278,
20295,
5212,
606,
788,
330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAttributeWithNamespace(t *testing.T) {
s := `<?xml version="1.0" encoding="UTF-8"?><root xmlns:n1="http://www.w3.org">
<good a="1" b="2" />
<good a="1" n1:a="2" /></root>`
doc, _ := Parse(strings.NewReader(s))
n := FindOne(doc, "//good[@n1:a='2']")
if n == nil {
t.Fatal("n is nil")
}
} | explode_data.jsonl/18845 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 147
} | [
2830,
3393,
3907,
2354,
22699,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
1565,
1316,
6455,
2319,
428,
16,
13,
15,
1,
11170,
428,
8561,
12,
23,
43869,
1784,
2888,
24967,
19266,
16,
428,
1254,
1110,
2136,
1418,
18,
2659,
881,
256,
366... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestEthereumAddressValidation(t *testing.T) {
validate := New()
tests := []struct {
param string
expected bool
}{
{"", false},
{"0x02F9AE5f22EA3fA88F05780B30385bEC", false},
{"123f681646d4a755815f9cb19e1acc8565a0c2ac", false},
{"0x02F9AE5f22EA3fA88F05780B30385bECFacbf130", true},
{"0x123f681646d4a755815f9cb19e1acc8565a0c2ac", true},
}
for i, test := range tests {
errs := validate.Var(test.param, "eth_addr")
if test.expected {
if !IsEqual(errs, nil) {
t.Fatalf("Index: %d eth_addr failed Error: %s", i, errs)
}
} else {
if IsEqual(errs, nil) {
t.Fatalf("Index: %d eth_addr failed Error: %s", i, errs)
} else {
val := getError(errs, "", "")
if val.Tag() != "eth_addr" {
t.Fatalf("Index: %d Latitude failed Error: %s", i, errs)
}
}
}
}
} | explode_data.jsonl/77292 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 406
} | [
2830,
3393,
36,
18532,
372,
4286,
13799,
1155,
353,
8840,
836,
8,
1476,
197,
7067,
1669,
1532,
2822,
78216,
1669,
3056,
1235,
341,
197,
36037,
262,
914,
198,
197,
42400,
1807,
198,
197,
59403,
197,
197,
4913,
497,
895,
1583,
197,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestSerializeRole(t *testing.T) {
auth := NewAuthenticator(gTestBucket, nil)
role, _ := auth.NewRole("froods", ch.SetOf("hoopy", "public"))
encoded, _ := json.Marshal(role)
assert.True(t, encoded != nil)
log.Printf("Marshaled Role as: %s", encoded)
elor := &roleImpl{}
err := json.Unmarshal(encoded, elor)
assert.True(t, err == nil)
assert.DeepEquals(t, elor.Name(), role.Name())
assert.DeepEquals(t, elor.ExplicitChannels(), role.ExplicitChannels())
} | explode_data.jsonl/31554 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 182
} | [
2830,
3393,
15680,
9030,
1155,
353,
8840,
836,
8,
341,
78011,
1669,
1532,
5087,
61393,
3268,
2271,
36018,
11,
2092,
340,
197,
5778,
11,
716,
1669,
4166,
7121,
9030,
445,
69,
299,
29697,
497,
521,
4202,
2124,
445,
6161,
1266,
497,
330,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_ToSlidingWindows(t *testing.T) {
tests := []struct {
input []int
want []int
}{
{[]int{}, []int{}},
{[]int{1}, []int{}},
{[]int{1, 2}, []int{}},
{[]int{1, 2, 3}, []int{6}},
{[]int{1, 2, 3, 4}, []int{6, 9}},
{[]int{199,
200,
208,
210,
200,
207,
240,
269,
260,
263}, []int{607, 618, 618, 617, 647, 716, 769, 792}},
}
for _, test := range tests {
t.Run("ToSlidingWindows("+fmt.Sprint(test.input)+")", func(t *testing.T) {
got := ToSlidingWindows(test.input)
assert.Equal(t, test.want, got)
})
}
} | explode_data.jsonl/3328 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 301
} | [
2830,
3393,
38346,
7442,
6577,
13164,
1155,
353,
8840,
836,
8,
1476,
78216,
1669,
3056,
1235,
341,
197,
22427,
3056,
396,
198,
197,
50780,
220,
3056,
396,
198,
197,
59403,
197,
197,
90,
1294,
396,
22655,
3056,
396,
6257,
15766,
197,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInputService21ProtocolTestTimestampInHeaderCase1(t *testing.T) {
sess := session.New()
svc := NewInputService21ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
input := &InputService21TestShapeInputService21TestCaseOperation1Input{
TimeArgInHeader: aws.Time(time.Unix(1422172800, 0)),
}
req, _ := svc.InputService21TestCaseOperation1Request(input)
r := req.HTTPRequest
// build request
restxml.Build(req)
assert.NoError(t, req.Error)
// assert URL
awstesting.AssertURL(t, "https://test/path", r.URL.String())
// assert headers
assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg"))
} | explode_data.jsonl/46501 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 242
} | [
2830,
3393,
2505,
1860,
17,
16,
20689,
2271,
20812,
641,
4047,
4207,
16,
1155,
353,
8840,
836,
8,
341,
1903,
433,
1669,
3797,
7121,
741,
1903,
7362,
1669,
1532,
2505,
1860,
17,
16,
20689,
2271,
57223,
11,
609,
8635,
10753,
90,
27380,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestEnableTracePoint(t *testing.T) {
SetGlobalLogMode(DisabledMode)
defer SetGlobalLogMode(DefaultMode)
// XXX(irfansharif): This test depends on the exact difference in line
// numbers between the call to callers and the logger.Info execution below.
// The tracepoint is set to be the line exactly ten lines below it.
file, line := caller(0)
tp := fmt.Sprintf("%s:%d", filepath.Base(file), line+10)
SetTracePoint(tp)
if tpenabled := GetTracePoint(tp); !tpenabled {
t.Errorf("Expected tracepoint %s to be enabled; found disabled", tp)
}
buffer := new(bytes.Buffer)
logger := New(Writer(buffer))
{
logger.Info()
if buffer.Len() == 0 {
t.Error("Expected stack trace to be populated, found empty buffer instead")
}
line, err := buffer.ReadString(byte('\n'))
if err != nil {
t.Error(err)
}
goroutineRegex := "^goroutine [\\d]+ \\[running\\]:"
match, err := regexp.Match(goroutineRegex, []byte(line))
if err != nil {
t.Error(err)
}
if !match {
t.Errorf("expected pattern (first line): \"%s\", got: %s", goroutineRegex, line)
}
line, err = buffer.ReadString(byte('\n'))
if err != nil {
t.Error(err)
}
functionSignatureRegex := "^github.com/irfansharif/log.TestEnableTracePoint"
match, err = regexp.Match(functionSignatureRegex, []byte(line))
if err != nil {
t.Error(err)
}
if !match {
t.Errorf("expected pattern (second line): \"%s\", got: %s", functionSignatureRegex, line)
}
}
} | explode_data.jsonl/48446 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 566
} | [
2830,
3393,
11084,
6550,
2609,
1155,
353,
8840,
836,
8,
341,
22212,
11646,
2201,
3636,
7,
25907,
3636,
340,
16867,
2573,
11646,
2201,
3636,
87874,
3636,
692,
197,
322,
19975,
76591,
85877,
12982,
333,
1648,
1096,
1273,
13798,
389,
279,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func TestNewGetStateBuilderQueryParam(t *testing.T) {
assert := assert.New(t)
pubnub.Config.UUID = "my-custom-uuid"
o := newGetStateBuilder(pubnub)
o.Channels([]string{"ch"})
o.ChannelGroups([]string{"cg"})
queryParam := map[string]string{
"q1": "v1",
"q2": "v2",
}
path, err := o.opts.buildPath()
o.opts.QueryParam = queryParam
assert.Nil(err)
u := &url.URL{
Path: path,
}
h.AssertPathsEqual(t,
"/v2/presence/sub-key/sub_key/channel/ch/uuid/my-custom-uuid",
u.EscapedPath(), []int{})
query, err := o.opts.buildQuery()
assert.Equal("v1", query.Get("q1"))
assert.Equal("v2", query.Get("q2"))
assert.Nil(err)
expected := &url.Values{}
expected.Set("channel-group", "cg")
expected.Set("q1", "v1")
expected.Set("q2", "v2")
h.AssertQueriesEqual(t, expected, query, []string{"pnsdk", "uuid"}, []string{})
body, err := o.opts.buildBody()
assert.Nil(err)
assert.Equal([]byte{}, body)
} | explode_data.jsonl/32496 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 412
} | [
2830,
3393,
3564,
1949,
1397,
3297,
84085,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
692,
62529,
77,
392,
10753,
39636,
284,
330,
2408,
36898,
12,
17128,
1837,
22229,
1669,
501,
1949,
1397,
3297,
74186,
77,
392,
340,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInventoryFilter_QueryParams_WithoutQueryParamValues(t *testing.T) {
// given
collectionFilter := InventoryFilter{
Type: "Test-Type",
FragmentType: "Test-FragmentType",
Ids: []string{"1","2","3"},
Text: "Test Text",
}
// when
err := collectionFilter.QueryParams(nil)
// then
if err == nil {
t.Error("Expected an error but no one was returned")
}
expectedError := "The provided parameter values must not be nil!"
if err.Error() != expectedError {
t.Errorf("Unexpected error was returned: %s; expected: %s", err, expectedError)
}
} | explode_data.jsonl/30667 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 217
} | [
2830,
3393,
22319,
5632,
48042,
4870,
62,
26040,
84085,
6227,
1155,
353,
8840,
836,
8,
341,
197,
322,
2661,
198,
1444,
1908,
5632,
1669,
28126,
5632,
515,
197,
27725,
25,
260,
330,
2271,
10804,
756,
197,
197,
9488,
929,
25,
330,
2271,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestRejectsSTHFromUnknownLog(t *testing.T) {
s := createAndOpenStorage()
defer closeAndDeleteStorage(s)
v := mustCreateSignatureVerifiers(t)
h := newHandlerWithClock(s, v, testStuckClock(stuckClockTimeMillis))
rr := httptest.NewRecorder()
req, err := http.NewRequest("POST", "/.well-known/ct/v1/sth-pollination", strings.NewReader(addSTHPollinationUnknownLogIDJSON))
if err != nil {
t.Fatalf("Failed to create request: %v", err)
}
h.HandleSTHPollination(rr, req)
if !assert.Equal(t, http.StatusOK, rr.Code) {
t.Fatal(rr.Body.String())
}
assert.EqualValues(t, 0, mustGet(t, s.getNumSTHs))
} | explode_data.jsonl/80086 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 241
} | [
2830,
3393,
78413,
82,
784,
39,
3830,
13790,
2201,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
1855,
3036,
5002,
5793,
741,
16867,
3265,
3036,
6435,
5793,
1141,
340,
5195,
1669,
1969,
4021,
25088,
10141,
11836,
1155,
340,
9598,
1669,
501,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestIpnsBasicIO(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
nd, mnt := setupIpnsTest(t, nil)
defer closeMount(mnt)
fname := mnt.Dir + "/local/testfile"
data := writeFileOrFail(t, 10, fname)
rbuf, err := ioutil.ReadFile(fname)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(rbuf, data) {
t.Fatal("Incorrect Read!")
}
fname2 := mnt.Dir + "/" + nd.Identity.Pretty() + "/testfile"
rbuf, err = ioutil.ReadFile(fname2)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(rbuf, data) {
t.Fatal("Incorrect Read!")
}
} | explode_data.jsonl/77465 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 259
} | [
2830,
3393,
23378,
4412,
15944,
3810,
1155,
353,
8840,
836,
8,
341,
743,
7497,
55958,
368,
341,
197,
3244,
57776,
7039,
741,
197,
532,
197,
303,
11,
296,
406,
1669,
6505,
23378,
4412,
2271,
1155,
11,
2092,
340,
16867,
3265,
16284,
125... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestDataTLB(t *testing.T) {
_, err := DataTLB(
unix.PERF_COUNT_HW_CACHE_OP_READ,
unix.PERF_COUNT_HW_CACHE_RESULT_ACCESS,
func() error { return nil },
)
if err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/33125 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 106
} | [
2830,
93200,
13470,
33,
1155,
353,
8840,
836,
8,
341,
197,
6878,
1848,
1669,
2885,
13470,
33,
1006,
197,
20479,
941,
47320,
37,
14672,
44013,
29138,
13908,
13117,
345,
197,
20479,
941,
47320,
37,
14672,
44013,
29138,
21181,
24420,
345,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestGetUserWithWallet_NewUser(t *testing.T) {
setupTest()
srv := test.RandServerAddress(t)
rt := sdkrouter.New(map[string]string{"a": srv})
url, cleanup := dummyAPI(srv)
defer cleanup()
u, err := GetUserWithSDKServer(rt, url, "abc", "")
require.NoError(t, err, errors.Unwrap(err))
require.NotNil(t, u)
count, err := models.Users(models.UserWhere.ID.EQ(u.ID)).CountG()
require.NoError(t, err)
assert.EqualValues(t, 1, count)
assert.True(t, u.LbrynetServerID.IsZero()) // because the server came from a config, it should not have an id set
// now assign the user a new server thats set in the db
// rand.Intn(99999),
sdk := &models.LbrynetServer{
Name: "testing",
Address: "test.test.test.test",
}
err = u.SetLbrynetServerG(true, sdk)
require.NoError(t, err)
require.NotEqual(t, 0, sdk.ID)
require.Equal(t, u.LbrynetServerID.Int, sdk.ID)
// now fetch it all back from the db
u2, err := GetUserWithSDKServer(rt, url, "abc", "")
require.NoError(t, err, errors.Unwrap(err))
require.NotNil(t, u2)
sdk2, err := u.LbrynetServer().OneG()
require.NoError(t, err)
require.Equal(t, sdk.ID, sdk2.ID)
require.Equal(t, sdk.Address, sdk2.Address)
require.Equal(t, u.LbrynetServerID.Int, sdk2.ID)
} | explode_data.jsonl/1629 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 523
} | [
2830,
3393,
1949,
1474,
2354,
38259,
39582,
1474,
1155,
353,
8840,
836,
8,
341,
84571,
2271,
741,
1903,
10553,
1669,
1273,
2013,
437,
5475,
4286,
1155,
340,
55060,
1669,
45402,
9937,
7121,
9147,
14032,
30953,
4913,
64,
788,
43578,
3518,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestValidateUEKey(t *testing.T) {
err := validateUEKey(nil)
assert.Exactly(t, errors.New("Invalid Argument: key cannot be nil"), err)
err = validateUEKey(make([]byte, 5))
assert.Exactly(t, errors.New("Invalid Argument: key must be 16 bytes"), err)
err = validateUEKey(make([]byte, 16))
assert.NoError(t, err)
} | explode_data.jsonl/28137 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 112
} | [
2830,
3393,
17926,
2230,
1592,
1155,
353,
8840,
836,
8,
341,
9859,
1669,
9593,
2230,
1592,
27907,
340,
6948,
5121,
32739,
1155,
11,
5975,
7121,
445,
7928,
13818,
25,
1376,
4157,
387,
2092,
3975,
1848,
692,
9859,
284,
9593,
2230,
1592,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestOutputOptions(t *testing.T) {
stdout := bytes.NewBuffer([]byte{})
stderr := bytes.NewBuffer([]byte{})
type testCase func(*testing.T, Output)
cases := map[string]testCase{
"NilOptionsValidate": func(t *testing.T, opts Output) {
assert.Zero(t, opts)
assert.NoError(t, opts.Validate())
},
"ErrorOutputSpecified": func(t *testing.T, opts Output) {
opts.Output = stdout
opts.Error = stderr
assert.NoError(t, opts.Validate())
},
"SuppressErrorWhenSpecified": func(t *testing.T, opts Output) {
opts.Error = stderr
opts.SuppressError = true
assert.Error(t, opts.Validate())
},
"SuppressOutputWhenSpecified": func(t *testing.T, opts Output) {
opts.Output = stdout
opts.SuppressOutput = true
assert.Error(t, opts.Validate())
},
"RedirectErrorToNillFails": func(t *testing.T, opts Output) {
opts.SendOutputToError = true
assert.Error(t, opts.Validate())
},
"RedirectOutputToError": func(t *testing.T, opts Output) {
opts.SendOutputToError = true
assert.Error(t, opts.Validate())
},
"SuppressAndRedirectOutputIsInvalid": func(t *testing.T, opts Output) {
opts.SuppressOutput = true
opts.SendOutputToError = true
assert.Error(t, opts.Validate())
},
"SuppressAndRedirectErrorIsInvalid": func(t *testing.T, opts Output) {
opts.SuppressError = true
opts.SendErrorToOutput = true
assert.Error(t, opts.Validate())
},
"DiscardIsNilForOutput": func(t *testing.T, opts Output) {
opts.Error = stderr
opts.Output = ioutil.Discard
assert.True(t, opts.outputIsNull())
assert.False(t, opts.errorIsNull())
},
"NilForOutputIsValid": func(t *testing.T, opts Output) {
opts.Error = stderr
assert.True(t, opts.outputIsNull())
assert.False(t, opts.errorIsNull())
},
"DiscardIsNilForError": func(t *testing.T, opts Output) {
opts.Error = ioutil.Discard
opts.Output = stdout
assert.True(t, opts.errorIsNull())
assert.False(t, opts.outputIsNull())
},
"NilForErrorIsValid": func(t *testing.T, opts Output) {
opts.Output = stdout
assert.True(t, opts.errorIsNull())
assert.False(t, opts.outputIsNull())
},
"OutputGetterNilIsIoDiscard": func(t *testing.T, opts Output) {
out, err := opts.GetOutput()
assert.NoError(t, err)
assert.Equal(t, ioutil.Discard, out)
},
"OutputGetterWhenPopulatedIsCorrect": func(t *testing.T, opts Output) {
opts.Output = stdout
out, err := opts.GetOutput()
assert.NoError(t, err)
assert.Equal(t, stdout, out)
},
"ErrorGetterNilIsIoDiscard": func(t *testing.T, opts Output) {
outErr, err := opts.GetError()
assert.NoError(t, err)
assert.Equal(t, ioutil.Discard, outErr)
},
"ErrorGetterWhenPopulatedIsCorrect": func(t *testing.T, opts Output) {
opts.Error = stderr
outErr, err := opts.GetError()
assert.NoError(t, err)
assert.Equal(t, stderr, outErr)
},
"RedirectErrorHasCorrectSemantics": func(t *testing.T, opts Output) {
opts.Output = stdout
opts.Error = stderr
opts.SendErrorToOutput = true
outErr, err := opts.GetError()
assert.NoError(t, err)
assert.Equal(t, stdout, outErr)
},
"RedirectOutputHasCorrectSemantics": func(t *testing.T, opts Output) {
opts.Output = stdout
opts.Error = stderr
opts.SendOutputToError = true
out, err := opts.GetOutput()
assert.NoError(t, err)
assert.Equal(t, stderr, out)
},
"RedirectCannotHaveCycle": func(t *testing.T, opts Output) {
opts.Output = stdout
opts.Error = stderr
opts.SendOutputToError = true
opts.SendErrorToOutput = true
assert.Error(t, opts.Validate())
},
"SuppressOutputWithLogger": func(t *testing.T, opts Output) {
opts.Loggers = []*LoggerConfig{
{
info: loggerConfigInfo{
Type: LogDefault,
Format: RawLoggerConfigFormatBSON,
},
},
}
opts.SuppressOutput = true
assert.NoError(t, opts.Validate())
},
"SuppressErrorWithLogger": func(t *testing.T, opts Output) {
opts.Loggers = []*LoggerConfig{
{
info: loggerConfigInfo{
Type: LogDefault,
Format: RawLoggerConfigFormatBSON,
},
},
}
opts.SuppressError = true
assert.NoError(t, opts.Validate())
},
"SuppressOutputAndErrorWithLogger": func(t *testing.T, opts Output) {
opts.Loggers = []*LoggerConfig{
{
info: loggerConfigInfo{
Type: LogDefault,
Format: RawLoggerConfigFormatBSON,
},
},
}
opts.SuppressOutput = true
opts.SuppressError = true
assert.NoError(t, opts.Validate())
},
"RedirectOutputWithLogger": func(t *testing.T, opts Output) {
opts.Loggers = []*LoggerConfig{
{
info: loggerConfigInfo{
Type: LogDefault,
Format: RawLoggerConfigFormatBSON,
},
},
}
opts.SendOutputToError = true
assert.NoError(t, opts.Validate())
},
"RedirectErrorWithLogger": func(t *testing.T, opts Output) {
opts.Loggers = []*LoggerConfig{
{
info: loggerConfigInfo{
Type: LogDefault,
Format: RawLoggerConfigFormatBSON,
},
},
}
opts.SendErrorToOutput = true
assert.NoError(t, opts.Validate())
},
"GetOutputWithStdoutAndLogger": func(t *testing.T, opts Output) {
opts.Output = stdout
opts.Loggers = []*LoggerConfig{
{
info: loggerConfigInfo{
Type: LogInMemory,
Format: RawLoggerConfigFormatBSON,
},
producer: &InMemoryLoggerOptions{
InMemoryCap: 100,
Base: BaseOptions{Format: LogFormatPlain},
},
},
}
out, err := opts.GetOutput()
require.NoError(t, err)
msg := "foo"
_, err = out.Write([]byte(msg))
assert.NoError(t, err)
assert.NoError(t, opts.outputSender.Close())
assert.Equal(t, msg, stdout.String())
safeSender, ok := opts.Loggers[0].sender.(*SafeSender)
require.True(t, ok)
sender, ok := safeSender.Sender.(*send.InMemorySender)
require.True(t, ok)
logOut, err := sender.GetString()
require.NoError(t, err)
require.Equal(t, 1, len(logOut))
assert.Equal(t, msg, strings.Join(logOut, ""))
},
"GetErrorWithErrorAndLogger": func(t *testing.T, opts Output) {
opts.Error = stderr
opts.Loggers = []*LoggerConfig{
{
info: loggerConfigInfo{
Type: LogInMemory,
Format: RawLoggerConfigFormatJSON,
},
producer: &InMemoryLoggerOptions{
InMemoryCap: 100,
Base: BaseOptions{Format: LogFormatPlain},
},
},
}
errOut, err := opts.GetError()
require.NoError(t, err)
msg := "foo"
_, err = errOut.Write([]byte(msg))
assert.NoError(t, err)
assert.NoError(t, opts.errorSender.Close())
assert.Equal(t, msg, stderr.String())
safeSender, ok := opts.Loggers[0].sender.(*SafeSender)
require.True(t, ok)
sender, ok := safeSender.Sender.(*send.InMemorySender)
require.True(t, ok)
logErr, err := sender.GetString()
require.NoError(t, err)
require.Equal(t, 1, len(logErr))
assert.Equal(t, msg, strings.Join(logErr, ""))
},
// "": func(t *testing.T, opts Output) {}
}
for name, test := range cases {
t.Run(name, func(t *testing.T) {
test(t, Output{})
})
}
} | explode_data.jsonl/4846 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 3213
} | [
2830,
3393,
5097,
3798,
1155,
353,
8840,
836,
8,
341,
6736,
411,
1669,
5820,
7121,
4095,
10556,
3782,
37790,
6736,
615,
1669,
5820,
7121,
4095,
10556,
3782,
6257,
692,
13158,
54452,
2915,
4071,
8840,
836,
11,
9258,
692,
1444,
2264,
1669... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNotSargable(t *testing.T) {
if checkSkipTest(t) {
return
}
initIndexer := func(indexer *Indexer) (*Indexer, errors.Error) {
if indexer.IndexesById == nil {
indexer.IndexesById = initIndexesById(t, map[string]*Index{
"ftsIdx": {
SourceName: "1doc",
Parent: indexer,
IdStr: "ftsIdx",
NameStr: "ftsIdx",
IndexMapping: &mapping.IndexMappingImpl{
DefaultAnalyzer: "keyword",
DefaultDateTimeParser: "disabled",
DefaultMapping: &mapping.DocumentMapping{
Enabled: true,
},
},
},
})
}
return indexer, nil
}
c := MakeWrapCallbacksForIndexType(datastore.IndexType("FTS"), initIndexer)
s, err := NewServer("./", c)
if err != nil {
t.Fatalf("did not expect err: %v", err)
}
r, err := ExecuteStatement(s,
"select * from data:`1doc` as b"+
` WHERE SEARCH(b.a, "hello", {"index": "ftsIdx"})`, nil, nil)
if err != nil {
t.Errorf("did not expect err: %v", err)
}
fmt.Printf("r: %+v\n", r)
r, err = ExecuteStatement(s,
"select *, META() from data:`1doc` as b", nil, nil)
if err != nil {
t.Errorf("did not expect err: %v", err)
}
fmt.Printf("r: %+v\n", r)
r, err = ExecuteStatement(s,
"select * from data:`1doc` as b"+
` WHERE SEARCH(b.a, {"match": "hello"}, {"index": "ftsIdx"})`, nil, nil)
if err != nil {
t.Errorf("did not expect err: %v", err)
}
fmt.Printf("r: %+v\n", r)
r, err = ExecuteStatement(s,
"select * from data:`1doc` as b UNNEST children as c UNNEST c.pets as cpets"+
" LET x = c.pets"+
` WHERE SEARCH(b.a, {"match": "hello"}, {"index": "ftsIdx"})`+
` AND x = "fluffy"`+
` AND cpets = "spot"`,
nil, nil)
if err != nil {
t.Errorf("did not expect err: %v", err)
}
fmt.Printf("r: %+v\n", r)
} | explode_data.jsonl/45729 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 833
} | [
2830,
3393,
2623,
50,
858,
480,
1155,
353,
8840,
836,
8,
341,
743,
1779,
35134,
2271,
1155,
8,
341,
197,
853,
198,
197,
630,
28248,
1552,
261,
1669,
2915,
7195,
261,
353,
1552,
261,
8,
4609,
1552,
261,
11,
5975,
6141,
8,
341,
197,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestWorkerInProgress(t *testing.T) {
pool := newTestPool(":6379")
ns := "work"
job1 := "job1"
deleteQueue(pool, ns, job1)
deleteRetryAndDead(pool, ns)
deletePausedAndLockedKeys(ns, job1, pool)
jobTypes := make(map[string]*jobType)
jobTypes[job1] = &jobType{
Name: job1,
JobOptions: JobOptions{Priority: 1},
IsGeneric: true,
GenericHandler: func(job *Job) error {
time.Sleep(30 * time.Millisecond)
return nil
},
}
enqueuer := NewEnqueuer(ns, pool)
_, err := enqueuer.Enqueue(job1, Q{"a": 1})
assert.Nil(t, err)
w := newWorker(ns, "1", pool, tstCtxType, nil, jobTypes, nil)
w.start()
// instead of w.forceIter(), we'll wait for 10 milliseconds to let the job start
// The job will then sleep for 30ms. In that time, we should be able to see something in the in-progress queue.
time.Sleep(10 * time.Millisecond)
assert.EqualValues(t, 0, listSize(pool, redisKeyJobs(ns, job1)))
assert.EqualValues(t, 1, listSize(pool, redisKeyJobsInProgress(ns, "1", job1)))
assert.EqualValues(t, 1, getInt64(pool, redisKeyJobsLock(ns, job1)))
assert.EqualValues(t, 1, hgetInt64(pool, redisKeyJobsLockInfo(ns, job1), w.poolID))
// nothing in the worker status
w.observer.drain()
h := readHash(pool, redisKeyWorkerObservation(ns, w.workerID))
assert.Equal(t, job1, h["job_name"])
assert.Equal(t, `{"a":1}`, h["args"])
// NOTE: we could check for job_id and started_at, but it's a PITA and it's tested in observer_test.
w.drain()
w.stop()
// At this point, it should all be empty.
assert.EqualValues(t, 0, listSize(pool, redisKeyJobs(ns, job1)))
assert.EqualValues(t, 0, listSize(pool, redisKeyJobsInProgress(ns, "1", job1)))
// nothing in the worker status
h = readHash(pool, redisKeyWorkerObservation(ns, w.workerID))
assert.EqualValues(t, 0, len(h))
} | explode_data.jsonl/35596 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 717
} | [
2830,
3393,
21936,
88711,
1155,
353,
8840,
836,
8,
341,
85273,
1669,
501,
2271,
10551,
18893,
21,
18,
22,
24,
1138,
84041,
1669,
330,
1778,
698,
68577,
16,
1669,
330,
8799,
16,
698,
15618,
7554,
41838,
11,
12268,
11,
2618,
16,
340,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestControllerFleetAutoScalerState(t *testing.T) {
registry := prometheus.NewRegistry()
_, err := RegisterPrometheusExporter(registry)
assert.Nil(t, err)
c := newFakeController()
defer c.close()
c.run(t)
// testing fleet name change
fasFleetNameChange := fleetAutoScaler("first-fleet", "name-switch")
c.fasWatch.Add(fasFleetNameChange)
fasFleetNameChange = fasFleetNameChange.DeepCopy()
fasFleetNameChange.Spec.Policy.Buffer.BufferSize = intstr.FromInt(10)
fasFleetNameChange.Spec.Policy.Buffer.MaxReplicas = 50
fasFleetNameChange.Spec.Policy.Buffer.MinReplicas = 10
fasFleetNameChange.Status.CurrentReplicas = 20
fasFleetNameChange.Status.DesiredReplicas = 10
fasFleetNameChange.Status.ScalingLimited = true
c.fasWatch.Modify(fasFleetNameChange)
fasFleetNameChange = fasFleetNameChange.DeepCopy()
fasFleetNameChange.Spec.FleetName = "second-fleet"
c.fasWatch.Modify(fasFleetNameChange)
// testing deletion
fasDeleted := fleetAutoScaler("deleted-fleet", "deleted")
fasDeleted.Spec.Policy.Buffer.BufferSize = intstr.FromString("50%")
fasDeleted.Spec.Policy.Buffer.MaxReplicas = 150
fasDeleted.Spec.Policy.Buffer.MinReplicas = 15
c.fasWatch.Add(fasDeleted)
c.fasWatch.Delete(fasDeleted)
c.sync()
report()
assert.Nil(t, testutil.GatherAndCompare(registry, strings.NewReader(fasStateExpected),
"agones_fleet_autoscalers_able_to_scale", "agones_fleet_autoscalers_buffer_limits", "agones_fleet_autoscalers_buffer_size",
"agones_fleet_autoscalers_current_replicas_count", "agones_fleet_autoscalers_desired_replicas_count", "agones_fleet_autoscalers_limited"))
} | explode_data.jsonl/53230 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 599
} | [
2830,
3393,
2051,
37,
18973,
13253,
59553,
1397,
1155,
353,
8840,
836,
8,
341,
197,
29172,
1669,
2706,
39705,
7121,
15603,
741,
197,
6878,
1848,
1669,
8451,
35186,
39705,
88025,
18390,
4944,
340,
6948,
59678,
1155,
11,
1848,
692,
1444,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSwarmGetFrontendName(t *testing.T) {
testCases := []struct {
service swarm.Service
expected string
networks map[string]*docker.NetworkResource
}{
{
service: swarmService(serviceName("foo")),
expected: "Host-foo-docker-localhost-0",
networks: map[string]*docker.NetworkResource{},
},
{
service: swarmService(serviceLabels(map[string]string{
label.TraefikFrontendRule: "Headers:User-Agent,bat/0.1.0",
})),
expected: "Headers-User-Agent-bat-0-1-0-0",
networks: map[string]*docker.NetworkResource{},
},
{
service: swarmService(serviceLabels(map[string]string{
label.TraefikFrontendRule: "Host:foo.bar",
})),
expected: "Host-foo-bar-0",
networks: map[string]*docker.NetworkResource{},
},
{
service: swarmService(serviceLabels(map[string]string{
label.TraefikFrontendRule: "Path:/test",
})),
expected: "Path-test-0",
networks: map[string]*docker.NetworkResource{},
},
{
service: swarmService(
serviceName("test"),
serviceLabels(map[string]string{
label.TraefikFrontendRule: "PathPrefix:/test2",
}),
),
expected: "PathPrefix-test2-0",
networks: map[string]*docker.NetworkResource{},
},
}
for serviceID, test := range testCases {
test := test
t.Run(strconv.Itoa(serviceID), func(t *testing.T) {
t.Parallel()
dData := parseService(test.service, test.networks)
segmentProperties := label.ExtractTraefikLabels(dData.Labels)
dData.SegmentLabels = segmentProperties[""]
provider := &Provider{
Domain: "docker.localhost",
SwarmMode: true,
}
actual := provider.getFrontendName(dData, 0)
assert.Equal(t, test.expected, actual)
})
}
} | explode_data.jsonl/1392 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 702
} | [
2830,
3393,
13218,
2178,
1949,
23395,
408,
675,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
52934,
220,
60841,
13860,
198,
197,
42400,
914,
198,
197,
9038,
2349,
82,
2415,
14032,
8465,
28648,
30149,
4783,
198... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetDeploymentConfigOK(t *testing.T) {
mockRegistry := test.NewDeploymentConfigRegistry()
mockRegistry.DeploymentConfig = &api.DeploymentConfig{
ObjectMeta: kapi.ObjectMeta{Name: "foo"},
}
storage := REST{registry: mockRegistry}
deploymentConfig, err := storage.Get(kapi.NewDefaultContext(), "foo")
if deploymentConfig == nil {
t.Error("Unexpected nil deploymentConfig")
}
if err != nil {
t.Errorf("Unexpected non-nil error", err)
}
if deploymentConfig.(*api.DeploymentConfig).Name != "foo" {
t.Errorf("Unexpected deploymentConfig: %#v", deploymentConfig)
}
} | explode_data.jsonl/66988 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 200
} | [
2830,
3393,
1949,
75286,
2648,
3925,
1155,
353,
8840,
836,
8,
341,
77333,
15603,
1669,
1273,
7121,
75286,
2648,
15603,
741,
77333,
15603,
34848,
39130,
2648,
284,
609,
2068,
34848,
39130,
2648,
515,
197,
23816,
12175,
25,
595,
2068,
80222... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestSSLClientCertificates(t *testing.T) {
maybeSkipSSLTests(t)
// Environment sanity check: should fail without SSL
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
// Should also fail without a valid certificate
db, err := openSSLConn(t, "sslmode=require user=pqgosslcert")
if err == nil {
db.Close()
t.Fatal("expected error")
}
pge, ok := err.(*Error)
if !ok {
t.Fatal("expected pq.Error")
}
if pge.Code.Name() != "invalid_authorization_specification" {
t.Fatalf("unexpected error code %q", pge.Code.Name())
}
// Should work
db, err = openSSLConn(t, getCertConninfo(t, "valid"))
if err != nil {
t.Fatal(err)
}
rows, err := db.Query("SELECT 1")
if err != nil {
t.Fatal(err)
}
rows.Close()
} | explode_data.jsonl/1386 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 294
} | [
2830,
3393,
22594,
2959,
97140,
1155,
353,
8840,
836,
8,
341,
2109,
49791,
35134,
22594,
18200,
1155,
340,
197,
322,
11586,
46842,
1779,
25,
1265,
3690,
2041,
25316,
198,
25157,
22594,
21821,
1155,
11,
330,
24635,
8516,
28,
18015,
1196,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestAnnotatingExecuteEntityIds(t *testing.T) {
keyspace, shards := setUpSandboxWithTwoShards("TestAnnotatingExecuteEntityIds")
_, err := rpcVTGate.ExecuteEntityIds(
context.Background(),
"INSERT INTO table () VALUES();",
nil,
keyspace,
"entity_column_name",
[]*vtgatepb.ExecuteEntityIdsRequest_EntityId{
{
Type: sqltypes.Int64,
Value: []byte("0"),
KeyspaceId: []byte{0x10}, // First shard.
},
{
Type: sqltypes.Int64,
Value: []byte("1"),
KeyspaceId: []byte{0x25}, // Second shard.
},
},
topodatapb.TabletType_MASTER,
nil,
false,
nil)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
verifyQueryAnnotatedAsUnfriendly(t, shards[0])
verifyQueryAnnotatedAsUnfriendly(t, shards[1])
} | explode_data.jsonl/7847 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 355
} | [
2830,
3393,
2082,
1921,
1095,
17174,
3030,
12701,
1155,
353,
8840,
836,
8,
341,
23634,
8746,
11,
74110,
1669,
18620,
50,
31536,
2354,
11613,
2016,
2347,
445,
2271,
2082,
1921,
1095,
17174,
3030,
12701,
5130,
197,
6878,
1848,
1669,
35596,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestReverseQuery(t *testing.T) {
var profile Profile
err := dORM.QueryTable("user_profile").Filter("User", 3).One(&profile)
throwFailNow(t, err)
throwFailNow(t, AssertIs(profile.Age, 30))
profile = Profile{}
err = dORM.QueryTable("user_profile").Filter("User__UserName", "astaxie").One(&profile)
throwFailNow(t, err)
throwFailNow(t, AssertIs(profile.Age, 30))
var user User
err = dORM.QueryTable("user").Filter("Posts__Title", "Examples").One(&user)
throwFailNow(t, err)
throwFailNow(t, AssertIs(user.UserName, "astaxie"))
user = User{}
err = dORM.QueryTable("user").Filter("Posts__User__UserName", "astaxie").Limit(1).One(&user)
throwFailNow(t, err)
throwFailNow(t, AssertIs(user.UserName, "astaxie"))
user = User{}
err = dORM.QueryTable("user").Filter("Posts__User__UserName", "astaxie").RelatedSel().Limit(1).One(&user)
throwFailNow(t, err)
throwFailNow(t, AssertIs(user.UserName, "astaxie"))
throwFailNow(t, AssertIs(user.Profile == nil, false))
throwFailNow(t, AssertIs(user.Profile.Age, 30))
var posts []*Post
num, err := dORM.QueryTable("post").Filter("Tags__Tag__Name", "golang").All(&posts)
throwFailNow(t, err)
throwFailNow(t, AssertIs(num, 3))
throwFailNow(t, AssertIs(posts[0].Title, "Introduction"))
posts = []*Post{}
num, err = dORM.QueryTable("post").Filter("Tags__Tag__Name", "golang").Filter("User__UserName", "slene").All(&posts)
throwFailNow(t, err)
throwFailNow(t, AssertIs(num, 1))
throwFailNow(t, AssertIs(posts[0].Title, "Introduction"))
posts = []*Post{}
num, err = dORM.QueryTable("post").Filter("Tags__Tag__Name", "golang").
Filter("User__UserName", "slene").RelatedSel().All(&posts)
throwFailNow(t, err)
throwFailNow(t, AssertIs(num, 1))
throwFailNow(t, AssertIs(posts[0].User == nil, false))
throwFailNow(t, AssertIs(posts[0].User.UserName, "slene"))
var tags []*Tag
num, err = dORM.QueryTable("tag").Filter("Posts__Post__Title", "Introduction").All(&tags)
throwFailNow(t, err)
throwFailNow(t, AssertIs(num, 1))
throwFailNow(t, AssertIs(tags[0].Name, "golang"))
tags = []*Tag{}
num, err = dORM.QueryTable("tag").Filter("Posts__Post__Title", "Introduction").
Filter("BestPost__User__UserName", "astaxie").All(&tags)
throwFailNow(t, err)
throwFailNow(t, AssertIs(num, 1))
throwFailNow(t, AssertIs(tags[0].Name, "golang"))
tags = []*Tag{}
num, err = dORM.QueryTable("tag").Filter("Posts__Post__Title", "Introduction").
Filter("BestPost__User__UserName", "astaxie").RelatedSel().All(&tags)
throwFailNow(t, err)
throwFailNow(t, AssertIs(num, 1))
throwFailNow(t, AssertIs(tags[0].Name, "golang"))
throwFailNow(t, AssertIs(tags[0].BestPost == nil, false))
throwFailNow(t, AssertIs(tags[0].BestPost.Title, "Examples"))
throwFailNow(t, AssertIs(tags[0].BestPost.User == nil, false))
throwFailNow(t, AssertIs(tags[0].BestPost.User.UserName, "astaxie"))
} | explode_data.jsonl/18140 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1112
} | [
2830,
3393,
45695,
2859,
1155,
353,
8840,
836,
8,
341,
2405,
5526,
12329,
198,
9859,
1669,
294,
4365,
15685,
2556,
445,
872,
13789,
1827,
5632,
445,
1474,
497,
220,
18,
568,
3966,
2099,
5365,
340,
9581,
19524,
7039,
1155,
11,
1848,
34... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRuleSelectStar(t *testing.T) {
common.Log.Debug("Entering function: %s", common.GetFunctionName())
sqls := []string{
"select * from tbl where id=1",
"select col, * from tbl where id=1",
// 反面例子
// "select count(*) from film where id=1",
// `select count(* ) from film where id=1`,
}
for _, sql := range sqls {
q, err := NewQuery4Audit(sql)
if err == nil {
rule := q.RuleSelectStar()
if rule.Item != "COL.001" {
t.Error("Rule not match:", rule.Item, "Expect : COL.001")
}
} else {
t.Error("sqlparser.Parse Error:", err)
}
}
common.Log.Debug("Exiting function: %s", common.GetFunctionName())
} | explode_data.jsonl/76756 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 267
} | [
2830,
3393,
11337,
3379,
12699,
1155,
353,
8840,
836,
8,
341,
83825,
5247,
20345,
445,
82867,
729,
25,
1018,
82,
497,
4185,
2234,
5152,
675,
2398,
30633,
82,
1669,
3056,
917,
515,
197,
197,
1,
1742,
353,
504,
21173,
1380,
877,
28,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestGetUserInfo(t *testing.T) {
log.SetLogger(t)
cfg := test.LoadTestingConf(t)
ldapConfig := Config{
RootDN: cfg["ldapRootDN"],
UserSearchBase: cfg["ldapUserSearchBase"],
UserSearch: cfg["ldapUserSearch"],
UserFullname: cfg["ldapFullname"],
Host: cfg["ldapHost"],
ManagerDN: cfg["ldapManagerDN"],
ManagerPassword: cfg["ldapManagerPassword"],
}
if ldapConfig.Host == "" {
t.SkipNow()
}
ldapConfig.Port, _ = strconv.Atoi(cfg["ldapPort"])
ldapConfig.SSL, _ = strconv.ParseBool(cfg["ldapSSL"])
driver, err := NewDriver(context.TODO(), false, ldapConfig)
require.NoError(t, err)
info, err := driver.GetUserInfo(context.TODO(), sdk.AuthConsumerSigninRequest{
"bind": cfg["ldapTestUsername"],
"password": cfg["ldapTestPassword"],
})
require.NoError(t, err)
require.Equal(t, cfg["ldapTestUsername"], info.Username)
require.NotEmpty(t, info.Email, "Email")
require.NotEmpty(t, info.Fullname, "Fullname")
require.NotEmpty(t, info.ExternalID, "ExternalID")
} | explode_data.jsonl/36374 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 436
} | [
2830,
3393,
1949,
36158,
1155,
353,
8840,
836,
8,
341,
6725,
4202,
7395,
1155,
340,
50286,
1669,
1273,
13969,
16451,
15578,
1155,
340,
197,
38665,
2648,
1669,
5532,
515,
197,
197,
8439,
31264,
25,
688,
13286,
1183,
38665,
8439,
31264,
8... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestErrMissingOneOf(t *testing.T) {
expected := cli.FieldErrors{
&field.Error{
Type: field.ErrorTypeRequired,
Field: "[field1, field2, field3]",
BadValue: "",
Detail: "expected exactly one, got neither",
},
}
actual := cli.ErrMissingOneOf("field1", "field2", "field3")
if diff := cmp.Diff(expected, actual); diff != "" {
t.Errorf("(-expected, +actual): %s", diff)
}
} | explode_data.jsonl/13220 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 169
} | [
2830,
3393,
7747,
25080,
3966,
2124,
1155,
353,
8840,
836,
8,
341,
42400,
1669,
21348,
17087,
13877,
515,
197,
197,
5,
2566,
6141,
515,
298,
27725,
25,
257,
2070,
6141,
929,
8164,
345,
298,
94478,
25,
262,
10545,
2566,
16,
11,
2070,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestBuzzerDriverOnError(t *testing.T) {
a := newGpioTestAdaptor()
d := initTestBuzzerDriver(a)
a.TestAdaptorDigitalWrite(func() (err error) {
return errors.New("write error")
})
gobottest.Assert(t, d.On(), errors.New("write error"))
} | explode_data.jsonl/52369 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 93
} | [
2830,
3393,
33,
91447,
11349,
74945,
1155,
353,
8840,
836,
8,
341,
11323,
1669,
501,
38,
11917,
2271,
2589,
32657,
741,
2698,
1669,
2930,
2271,
33,
91447,
11349,
2877,
340,
11323,
8787,
2589,
32657,
38112,
7985,
18552,
368,
320,
615,
14... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSendHandlerError(t *testing.T) {
svc := awstesting.NewClient(&aws.Config{
HTTPClient: &http.Client{
Transport: &testSendHandlerTransport{},
},
})
svc.Handlers.Clear()
svc.Handlers.Send.PushBackNamed(corehandlers.SendHandler)
r := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
r.Send()
if r.Error == nil {
t.Errorf("expect error, got none")
}
if r.HTTPResponse == nil {
t.Errorf("expect response, got none")
}
} | explode_data.jsonl/44098 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 183
} | [
2830,
3393,
11505,
3050,
1454,
1155,
353,
8840,
836,
8,
341,
1903,
7362,
1669,
1360,
267,
59855,
7121,
2959,
2099,
8635,
10753,
515,
197,
197,
9230,
2959,
25,
609,
1254,
11716,
515,
298,
197,
27560,
25,
609,
1944,
11505,
3050,
27560,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestBuildAllowAllTrafficPolicies(t *testing.T) {
assert := tassert.New(t)
mc := newFakeMeshCatalog()
actual := mc.buildAllowAllTrafficPolicies(tests.BookstoreV1Service)
var actualTargetNames []string
for _, target := range actual {
actualTargetNames = append(actualTargetNames, target.Name)
}
expected := []string{
"default/bookstore-v1->default/bookbuyer",
"default/bookstore-v1->default/bookstore-apex",
"default/bookstore-v2->default/bookbuyer",
"default/bookstore-v2->default/bookstore-apex",
"default/bookbuyer->default/bookstore-v1",
"default/bookbuyer->default/bookstore-apex",
"default/bookstore-apex->default/bookstore-v1",
"default/bookbuyer->default/bookstore-v2",
"default/bookstore-apex->default/bookstore-v2",
"default/bookstore-apex->default/bookbuyer",
"default/bookstore-v1->default/bookstore-v2",
"default/bookstore-v2->default/bookstore-v1",
}
assert.ElementsMatch(actualTargetNames, expected)
} | explode_data.jsonl/69762 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 353
} | [
2830,
3393,
11066,
18605,
2403,
87229,
47,
42038,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
259,
2207,
7121,
1155,
692,
97662,
1669,
501,
52317,
14194,
41606,
2822,
88814,
1669,
19223,
13239,
18605,
2403,
87229,
47,
42038,
8623,
82,
55253... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestAPICopyObjectPartHandlerSanity(t *testing.T) {
defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPICopyObjectPartHandlerSanity, []string{"CopyObjectPart"})
} | explode_data.jsonl/10700 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 61
} | [
2830,
3393,
2537,
1317,
1266,
1190,
5800,
3050,
23729,
487,
1155,
353,
8840,
836,
8,
341,
16867,
33287,
2271,
2304,
585,
1155,
8,
741,
197,
10216,
1190,
9188,
2537,
952,
477,
1155,
11,
1273,
2537,
1317,
1266,
1190,
5800,
3050,
23729,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func Test_getWssdGroup(t *testing.T) {
grp := &cloud.Group{
Name: &name,
ID: &Id,
}
wssdcloudGroup := getWssdGroup(grp)
if *grp.ID != wssdcloudGroup.Id {
t.Errorf("ID doesnt match post conversion")
}
if *grp.Name != wssdcloudGroup.Name {
t.Errorf("Name doesnt match post conversion")
}
} | explode_data.jsonl/1454 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 134
} | [
2830,
3393,
3062,
54,
778,
67,
2808,
1155,
353,
8840,
836,
8,
341,
197,
42854,
1669,
609,
12361,
5407,
515,
197,
21297,
25,
609,
606,
345,
197,
29580,
25,
256,
609,
764,
345,
197,
532,
6692,
778,
67,
12361,
2808,
1669,
633,
54,
77... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestSolveOperators_PreferCatalogInSameNamespace(t *testing.T) {
APISet := APISet{opregistry.APIKey{"g", "v", "k", "ks"}: struct{}{}}
Provides := APISet
namespace := "olm"
altNamespace := "alt-olm"
catalog := registry.CatalogKey{"community", namespace}
altnsCatalog := registry.CatalogKey{"alt-community", altNamespace}
csv := existingOperator(namespace, "packageA.v1", "packageA", "alpha", "", Provides, nil, nil, nil)
csvs := []*v1alpha1.ClusterServiceVersion{csv}
sub := existingSub(namespace,"packageA.v1", "packageA", "alpha", catalog)
subs := []*v1alpha1.Subscription{sub}
fakeNamespacedOperatorCache := NamespacedOperatorCache{
snapshots: map[registry.CatalogKey]*CatalogSnapshot{
catalog: {
operators: []*Operator{
genOperator("packageA.v0.0.1", "0.0.1", "packageA.v1", "packageA", "alpha", catalog.Name, catalog.Namespace, nil, Provides, nil, ""),
},
},
altnsCatalog: {
operators: []*Operator{
genOperator("packageA.v0.0.1", "0.0.1", "packageA.v1", "packageA", "alpha", altnsCatalog.Name, altnsCatalog.Namespace, nil, Provides, nil, ""),
},
},
},
namespaces: []string{namespace, altNamespace},
}
satResolver := SatResolver{
cache: getFakeOperatorCache(fakeNamespacedOperatorCache),
}
operators, err := satResolver.SolveOperators([]string{namespace}, csvs, subs)
assert.NoError(t, err)
expected := OperatorSet{
"packageA.v0.0.1": genOperator("packageA.v0.0.1", "0.0.1", "packageA.v1", "packageA", "alpha", catalog.Name, catalog.Namespace, nil, Provides, nil, ""),
}
require.EqualValues(t, expected, operators)
} | explode_data.jsonl/43806 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 616
} | [
2830,
3393,
50,
3948,
77760,
1088,
41160,
41606,
641,
19198,
22699,
1155,
353,
8840,
836,
8,
341,
197,
2537,
1637,
295,
1669,
10106,
1637,
295,
90,
453,
29172,
24922,
1592,
4913,
70,
497,
330,
85,
497,
330,
74,
497,
330,
2787,
9207,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestForeachArray(t *testing.T) {
const jsonStr = `{
"programmers": [
{
"firstName": "Janet",
"lastName": "McLaughlin",
}, {
"firstName": "Elliotte",
"lastName": "Hunter",
}, {
"firstName": "Jason",
"lastName": "Harold",
}
]
}`
// 获取每一行的lastName
result := Get(jsonStr, "programmers.#.lastName")
for _, name := range result.Array() {
println(name.String())
}
// 查找lastName为Hunter的数据
name := Get(jsonStr, `programmers.#(lastName="Hunter").firstName`)
println(name.String())
// 遍历数组
result = Get(jsonStr, "programmers")
result.ForEach(func(_, value Result) bool {
println(value.String())
return true // keep iterating
})
} | explode_data.jsonl/43409 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 349
} | [
2830,
3393,
37,
8539,
1857,
1155,
353,
8840,
836,
8,
341,
4777,
2951,
2580,
284,
1565,
515,
464,
197,
1,
14906,
22780,
788,
2278,
1144,
197,
515,
1144,
197,
1,
27987,
788,
330,
18315,
295,
497,
715,
1144,
197,
1,
29156,
788,
330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestArchiveTeams(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
id := model.NewId()
name := "name" + id
displayName := "Name " + id
th.CheckCommand(t, "team", "create", "--name", name, "--display_name", displayName)
th.CheckCommand(t, "team", "archive", name)
output := th.CheckCommand(t, "team", "list")
if !strings.Contains(string(output), name+" (archived)") {
t.Fatal("should have archived team")
}
} | explode_data.jsonl/65280 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 165
} | [
2830,
3393,
42502,
60669,
1155,
353,
8840,
836,
8,
341,
70479,
1669,
18626,
1005,
3803,
15944,
741,
16867,
270,
836,
682,
4454,
2822,
15710,
1669,
1614,
7121,
764,
741,
11609,
1669,
330,
606,
1,
488,
877,
198,
31271,
675,
1669,
330,
6... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestNotEnoughProducts(t *testing.T) {
const oneProduct = "testData/oneProduct.txt"
output := runCommand(oneProduct, 1000)
if !notPossible(output) {
t.Errorf("Incorrect outptut for file with one product")
}
} | explode_data.jsonl/66005 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 88
} | [
2830,
3393,
2623,
95801,
17746,
1155,
353,
8840,
836,
8,
341,
262,
733,
825,
4816,
284,
330,
1944,
1043,
14,
603,
4816,
3909,
698,
262,
2550,
1669,
1598,
4062,
51067,
4816,
11,
220,
16,
15,
15,
15,
340,
262,
421,
753,
1921,
65222,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.