text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestMemberAddHasBoxes(t *testing.T) { tc, owner, other, _, name := memberSetupMultiple(t) defer tc.Cleanup() assertRole(tc, name, owner.Username, keybase1.TeamRole_OWNER) assertRole(tc, name, other.Username, keybase1.TeamRole_NONE) // this change request should generate boxes since other.Username // is not a member req := keybase1.TeamChangeReq{Readers: []keybase1.UserVersion{other.GetUserVersion()}} tm, err := GetForTestByStringName(context.TODO(), tc.G, name) if err != nil { t.Fatal(err) } _, boxes, _, _, err := tm.changeMembershipSection(context.TODO(), req) if err != nil { t.Fatal(err) } if boxes == nil || len(boxes.Boxes) == 0 { t.Errorf("add member failed to make new boxes") } }
explode_data.jsonl/13517
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 273 }
[ 2830, 3393, 9366, 2212, 10281, 93665, 1155, 353, 8840, 836, 8, 341, 78255, 11, 6372, 11, 1008, 11, 8358, 829, 1669, 4462, 21821, 32089, 1155, 340, 16867, 17130, 727, 60639, 2822, 6948, 9030, 44415, 11, 829, 11, 6372, 42777, 11, 1376, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestCompareTime(t *testing.T) { assert.True(t, utils.IsLessThanAnHour(time.Now().Unix()-3599)) assert.False(t, utils.IsLessThanAnHour(time.Now().Unix()-3600)) }
explode_data.jsonl/35585
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 70 }
[ 2830, 3393, 27374, 1462, 1155, 353, 8840, 836, 8, 341, 6948, 32443, 1155, 11, 12439, 4506, 27451, 26067, 2082, 30254, 9730, 13244, 1005, 55832, 15694, 18, 20, 24, 24, 1171, 6948, 50757, 1155, 11, 12439, 4506, 27451, 26067, 2082, 30254, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestMakeStructFieldDescriptor(t *testing.T) { e := mustNotRaise(RuntimeErrorType.Call(NewRootFrame(), wrapArgs("foo"), nil)) fun := newBuiltinFunction("TestMakeStructFieldDescriptor", func(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) { if raised := checkMethodArgs(f, "TestMakeStructFieldDescriptor", args, TypeType, StrType, StrType, ObjectType); raised != nil { return nil, raised } t := toTypeUnsafe(args[0]) desc := makeStructFieldDescriptor(t, toStrUnsafe(args[1]).Value(), toStrUnsafe(args[2]).Value(), fieldDescriptorRO) get, raised := GetAttr(f, desc, NewStr("__get__"), nil) if raised != nil { return nil, raised } return get.Call(f, wrapArgs(args[3], t), nil) }).ToObject() cases := []invokeTestCase{ {args: wrapArgs(ObjectType, "dict", "__dict__", newObject(ObjectType)), want: None}, {args: wrapArgs(ObjectType, "dict", "__dict__", newBuiltinFunction("foo", func(*Frame, Args, KWArgs) (*Object, *BaseException) { return nil, nil })), want: NewDict().ToObject()}, {args: wrapArgs(IntType, "value", "value", 42), want: NewInt(42).ToObject()}, {args: wrapArgs(StrType, "value", "value", 42), wantExc: mustCreateException(TypeErrorType, "descriptor 'value' for 'str' objects doesn't apply to 'int' objects")}, {args: wrapArgs(BaseExceptionType, "args", "args", e), want: NewTuple(NewStr("foo").ToObject()).ToObject()}, } for _, cas := range cases { if err := runInvokeTestCase(fun, &cas); err != "" { t.Error(err) } } }
explode_data.jsonl/79894
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 541 }
[ 2830, 3393, 8078, 9422, 1877, 11709, 1155, 353, 8840, 836, 8, 341, 7727, 1669, 1969, 2623, 93101, 52610, 1454, 929, 27017, 35063, 8439, 4369, 1507, 15061, 4117, 445, 7975, 3975, 2092, 1171, 90126, 1669, 501, 33, 25628, 5152, 445, 2271, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestCloudTasksCreateTask(t *testing.T) { var name string = "name3373707" var dispatchCount int32 = 1217252086 var responseCount int32 = 424727441 var expectedResponse = &taskspb.Task{ Name: name, DispatchCount: dispatchCount, ResponseCount: responseCount, } mockCloudTasks.err = nil mockCloudTasks.reqs = nil mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse) var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]") var task *taskspb.Task = &taskspb.Task{} var request = &taskspb.CreateTaskRequest{ Parent: formattedParent, Task: task, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateTask(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } }
explode_data.jsonl/30865
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 429 }
[ 2830, 3393, 16055, 25449, 4021, 6262, 1155, 353, 8840, 836, 8, 341, 2405, 829, 914, 284, 330, 606, 18, 18, 22, 18, 22, 15, 22, 698, 2405, 6845, 2507, 526, 18, 17, 284, 220, 16, 17, 16, 22, 17, 20, 17, 15, 23, 21, 198, 2405, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestK8sPodToPublicPod(t *testing.T) { type podExp struct { k8sPod corev1.Pod ownerKind string ownerName string publicPod *pb.Pod } t.Run("Returns expected pods", func(t *testing.T) { expectations := []podExp{ { k8sPod: corev1.Pod{}, publicPod: &pb.Pod{ Name: "/", }, }, { k8sPod: corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: "ns", Name: "name", ResourceVersion: "resource-version", Labels: map[string]string{ k8s.ControllerComponentLabel: "controller-component", k8s.ControllerNSLabel: "controller-ns", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { Name: k8s.ProxyContainerName, Image: "linkerd-proxy:test-version", }, }, }, Status: corev1.PodStatus{ PodIP: "pod-ip", Phase: "status", ContainerStatuses: []corev1.ContainerStatus{ { Name: k8s.ProxyContainerName, Ready: true, }, }, }, }, ownerKind: k8s.Deployment, ownerName: "owner-name", publicPod: &pb.Pod{ Name: "ns/name", Owner: &pb.Pod_Deployment{Deployment: "ns/owner-name"}, ResourceVersion: "resource-version", ControlPlane: true, ControllerNamespace: "controller-ns", Status: "status", ProxyReady: true, ProxyVersion: "test-version", PodIP: "pod-ip", }, }, { k8sPod: corev1.Pod{ Status: corev1.PodStatus{ Phase: "Failed", Reason: "Evicted", ContainerStatuses: []corev1.ContainerStatus{ { Name: k8s.ProxyContainerName, Ready: true, }, }, }, }, ownerName: "owner-name", publicPod: &pb.Pod{ Name: "/", Status: "Evicted", ProxyReady: true, }, }, } for _, exp := range expectations { res := K8sPodToPublicPod(exp.k8sPod, exp.ownerKind, exp.ownerName) if diff := deep.Equal(exp.publicPod, res); diff != nil { t.Errorf("%v", diff) } } }) }
explode_data.jsonl/53453
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1159 }
[ 2830, 3393, 42, 23, 82, 23527, 1249, 12676, 23527, 1155, 353, 8840, 836, 8, 341, 13158, 7509, 8033, 2036, 341, 197, 16463, 23, 82, 23527, 262, 6200, 85, 16, 88823, 198, 197, 197, 8118, 10629, 914, 198, 197, 197, 8118, 675, 914, 198,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestSubmitSMSC(t *testing.T) { connection, err := gosmpp.NewTCPIPConnectionWithAddrPort(testSMSCAddr, testSMSCPort) if err != nil { t.Error(err) t.Fail() return } request := PDU.NewBindTransceiver() request.SetSystemId("smppclient1") request.SetPassword("password") request.SetSystemType("CMT") session = gosmpp.NewSessionWithConnection(connection) session.EnableStateChecking() listener := &TestPDUListener{} resp, e := session.BindWithListener(request, listener) if e != nil || resp.GetCommandStatus() != 0 { t.Error(e) t.Fail() return } // Test submit submit := PDU.NewSubmitSM() sourceAddr, _ := PDU.NewAddressWithAddr("smppclient1") sourceAddr.SetTon(5) sourceAddr.SetNpi(0) desAddr, _ := PDU.NewAddressWithAddr("smppclient2") desAddr.SetTon(1) desAddr.SetNpi(1) submit.SetSourceAddr(sourceAddr) submit.SetDestAddr(desAddr) submit.SetShortMessageWithEncoding("Biết đâu mà đợi", Data.ENC_UTF16) submit.SetDataCoding(8) submit.SetProtocolId(0) submit.SetRegisteredDelivery(1) submit.SetReplaceIfPresentFlag(0) submit.SetEsmClass(0) submit.SetSequenceNumber(10) if _, e = session.Submit(submit); e != nil { t.Errorf(e.Error.Error()) t.Fail() return } fmt.Println("Waiting 15 seconds to receive submitSMResp from SMSC or deliverSM") time.Sleep(15 * time.Second) fmt.Println("Done") }
explode_data.jsonl/70037
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 544 }
[ 2830, 3393, 8890, 9501, 3540, 1155, 353, 8840, 836, 8, 341, 54590, 11, 1848, 1669, 342, 436, 93368, 7121, 7749, 88322, 4526, 2354, 13986, 7084, 8623, 9501, 3540, 13986, 11, 1273, 9501, 3540, 7084, 340, 743, 1848, 961, 2092, 341, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestServiceAccountNotControlledByUs(t *testing.T) { f := newFixture(t) startTime := metav1.Now() completionTime := metav1.Now() var replicas int32 = 64 mpiJob := newMPIJob("test", &replicas, 1, gpuResourceName, &startTime, &completionTime) f.setUpMPIJob(mpiJob) configMap := newConfigMap(mpiJob, replicas, isGPULauncher(mpiJob)) updateDiscoverHostsInConfigMap(configMap, mpiJob, nil, isGPULauncher(mpiJob)) f.setUpConfigMap(configMap) serviceAccount := newLauncherServiceAccount(mpiJob) serviceAccount.OwnerReferences = nil f.setUpServiceAccount(serviceAccount) f.runExpectError(getKey(mpiJob, t)) }
explode_data.jsonl/29955
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 227 }
[ 2830, 3393, 1860, 7365, 2623, 3273, 832, 1359, 3558, 1155, 353, 8840, 836, 8, 341, 1166, 1669, 501, 18930, 1155, 340, 21375, 1462, 1669, 77520, 16, 13244, 741, 32810, 14386, 1462, 1669, 77520, 16, 13244, 2822, 2405, 80801, 526, 18, 17, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestList(t *testing.T) { instances := make([]ec2.Instance, 4) instances[0].Tags = []ec2.Tag{{"Name", "foo"}} instances[0].PrivateDNSName = "instance1" instances[0].State.Name = "running" instances[1].Tags = []ec2.Tag{{"Name", "bar"}} instances[1].PrivateDNSName = "instance2" instances[1].State.Name = "running" instances[2].Tags = []ec2.Tag{{"Name", "baz"}} instances[2].PrivateDNSName = "instance3" instances[2].State.Name = "running" instances[3].Tags = []ec2.Tag{{"Name", "quux"}} instances[3].PrivateDNSName = "instance4" instances[3].State.Name = "running" aws := mockInstancesResp(instances) table := []struct { input string expect []string }{ {"blahonga", []string{}}, {"quux", []string{"instance4"}}, {"a", []string{"instance2", "instance3"}}, } for _, item := range table { result, err := aws.List(item.input) if err != nil { t.Errorf("Expected call with %v to succeed, failed with %s", item.input, err) } if e, a := item.expect, result; !reflect.DeepEqual(e, a) { t.Errorf("Expected %v, got %v", e, a) } } }
explode_data.jsonl/65066
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 443 }
[ 2830, 3393, 852, 1155, 353, 8840, 836, 8, 341, 197, 47825, 1669, 1281, 10556, 757, 17, 12688, 11, 220, 19, 340, 197, 47825, 58, 15, 936, 15930, 284, 3056, 757, 17, 23676, 2979, 1, 675, 497, 330, 7975, 95642, 197, 47825, 58, 15, 93...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestDomainsConfig_Error(t *testing.T) { assert := assertlib.New(t) globalConfig := viper.New() globalConfig.Set("domains", []int{1, 2}) _, err := DomainsConfig(globalConfig) assert.EqualError(err, "2 error(s) decoding:\n\n* '[0]' expected a map, got 'int'\n* '[1]' expected a map, got 'int'") }
explode_data.jsonl/61947
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 116 }
[ 2830, 3393, 74713, 2648, 28651, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 2740, 7121, 1155, 340, 18842, 2648, 1669, 95132, 7121, 741, 18842, 2648, 4202, 445, 59621, 497, 3056, 396, 90, 16, 11, 220, 17, 3518, 197, 6878, 1848, 166...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_copyFile(t *testing.T) { assert := assert.New(t) file1, err := ioutil.TempFile("", "file") assert.Nil(err) file1.Close() defer os.Remove(file1.Name()) type args struct { filename string destFilename string } tests := []struct { name string args args wantErr bool }{ { "invalid filename", args{ "", file1.Name(), }, true, }, { "invalid destFilename", args{ file1.Name(), "", }, true, }, } for _, tt := range tests { gotErr := copyFile(tt.args.filename, tt.args.destFilename) assert.Equal(tt.wantErr, gotErr != nil, tt.name) } }
explode_data.jsonl/11257
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 298 }
[ 2830, 3393, 16096, 1703, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 692, 17661, 16, 11, 1848, 1669, 43144, 65009, 1703, 19814, 330, 1192, 1138, 6948, 59678, 3964, 340, 17661, 16, 10421, 741, 16867, 2643, 13270, 4866, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestRetransmitIPv4IDUniqueness(t *testing.T) { for _, tc := range []struct { name string size int }{ {"1Byte", 1}, {"512Bytes", 512}, } { t.Run(tc.name, func(t *testing.T) { c := context.New(t, defaultMTU) defer c.Cleanup() c.CreateConnected(context.TestInitialSequenceNumber, 30000 /* rcvWnd */, -1 /* epRcvBuf */) // Disabling PMTU discovery causes all packets sent from this socket to // have DF=0. This needs to be done because the IPv4 ID uniqueness // applies only to non-atomic IPv4 datagrams as defined in RFC 6864 // Section 4, and datagrams with DF=0 are non-atomic. if err := c.EP.SetSockOptInt(tcpip.MTUDiscoverOption, tcpip.PMTUDiscoveryDont); err != nil { t.Fatalf("disabling PMTU discovery via sockopt to force DF=0 failed: %s", err) } var r bytes.Reader r.Reset(make([]byte, tc.size)) if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil { t.Fatalf("Write failed: %s", err) } pkt := c.GetPacket() checker.IPv4(t, pkt, checker.FragmentFlags(0), checker.TCP( checker.DstPort(context.TestPort), checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh), ), ) idSet := map[uint16]struct{}{header.IPv4(pkt).ID(): {}} // Expect two retransmitted packets, and that all packets received have // unique IPv4 ID values. for i := 0; i <= 2; i++ { pkt := c.GetPacket() checker.IPv4(t, pkt, checker.FragmentFlags(0), checker.TCP( checker.DstPort(context.TestPort), checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh), ), ) id := header.IPv4(pkt).ID() if _, exists := idSet[id]; exists { t.Fatalf("duplicate IPv4 ID=%d found in retransmitted packet", id) } idSet[id] = struct{}{} } }) } }
explode_data.jsonl/75980
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 794 }
[ 2830, 3393, 12020, 33389, 1763, 58056, 19, 915, 1806, 5011, 23709, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 17130, 1669, 2088, 3056, 1235, 341, 197, 11609, 914, 198, 197, 13832, 526, 198, 197, 59403, 197, 197, 4913, 16, 7153, 497, 22...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestConsulUpdateRecord(t *testing.T) { consulReset() err := cache.UpdateRecord("nanobox.io", &nanopack) err2 := cache.UpdateRecord("nanopack.io", &nanopack) if err != nil || err2 != nil { t.Errorf("Failed to update record in consul cacher - %v%v", err, err2) } }
explode_data.jsonl/61010
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 108 }
[ 2830, 3393, 15220, 360, 4289, 6471, 1155, 353, 8840, 836, 8, 341, 197, 6254, 360, 14828, 741, 9859, 1669, 6500, 16689, 6471, 445, 18759, 32460, 4245, 497, 609, 18759, 453, 473, 340, 9859, 17, 1669, 6500, 16689, 6471, 445, 18759, 453, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestHandshake(t *testing.T) { signer := func(msg []byte) ([]byte, error) { mac := hmac.New(sha256.New, hmacKey) mac.Write(msg) return mac.Sum(nil), nil } mutator := func(msg *protoext.SignedGossipMessage) *protoext.SignedGossipMessage { return msg } assertPositivePath := func(msg protoext.ReceivedMessage, endpoint string) { expectedPKIID := common.PKIidType(endpoint) require.Equal(t, expectedPKIID, msg.GetConnectionInfo().ID) require.Equal(t, api.PeerIdentityType(endpoint), msg.GetConnectionInfo().Identity) require.NotNil(t, msg.GetConnectionInfo().Auth) sig, _ := (&naiveSecProvider{}).Sign(msg.GetConnectionInfo().Auth.SignedData) require.Equal(t, sig, msg.GetConnectionInfo().Auth.Signature) } // Positive path 1 - check authentication without TLS port, endpoint, ll := getAvailablePort(t) s := grpc.NewServer() id := []byte(endpoint) idMapper := identity.NewIdentityMapper(naiveSec, id, noopPurgeIdentity, naiveSec) inst, err := NewCommInstance(s, nil, idMapper, api.PeerIdentityType(endpoint), func() []grpc.DialOption { return []grpc.DialOption{grpc.WithInsecure()} }, naiveSec, disabledMetrics, testCommConfig) go s.Serve(ll) require.NoError(t, err) var msg protoext.ReceivedMessage _, tempEndpoint, tempL := getAvailablePort(t) acceptChan := handshaker(port, tempEndpoint, inst, t, mutator, none) select { case <-time.After(time.Duration(time.Second * 4)): require.FailNow(t, "Didn't receive a message, seems like handshake failed") case msg = <-acceptChan: } require.Equal(t, common.PKIidType(tempEndpoint), msg.GetConnectionInfo().ID) require.Equal(t, api.PeerIdentityType(tempEndpoint), msg.GetConnectionInfo().Identity) sig, _ := (&naiveSecProvider{}).Sign(msg.GetConnectionInfo().Auth.SignedData) require.Equal(t, sig, msg.GetConnectionInfo().Auth.Signature) inst.Stop() s.Stop() ll.Close() tempL.Close() time.Sleep(time.Second) comm, port := newCommInstance(t, naiveSec) defer comm.Stop() // Positive path 2: initiating peer sends its own certificate _, tempEndpoint, tempL = getAvailablePort(t) acceptChan = handshaker(port, tempEndpoint, comm, t, mutator, mutualTLS) select { case <-time.After(time.Second * 2): require.FailNow(t, "Didn't receive a message, seems like handshake failed") case msg = <-acceptChan: } assertPositivePath(msg, tempEndpoint) tempL.Close() // Negative path: initiating peer doesn't send its own certificate _, tempEndpoint, tempL = getAvailablePort(t) acceptChan = handshaker(port, tempEndpoint, comm, t, mutator, oneWayTLS) time.Sleep(time.Second) require.Equal(t, 0, len(acceptChan)) tempL.Close() // Negative path, signature is wrong _, tempEndpoint, tempL = getAvailablePort(t) mutator = func(msg *protoext.SignedGossipMessage) *protoext.SignedGossipMessage { msg.Signature = append(msg.Signature, 0) return msg } acceptChan = handshaker(port, tempEndpoint, comm, t, mutator, mutualTLS) time.Sleep(time.Second) require.Equal(t, 0, len(acceptChan)) tempL.Close() // Negative path, the PKIid doesn't match the identity _, tempEndpoint, tempL = getAvailablePort(t) mutator = func(msg *protoext.SignedGossipMessage) *protoext.SignedGossipMessage { msg.GetConn().PkiId = []byte(tempEndpoint) // Sign the message again msg.Sign(signer) return msg } _, tempEndpoint2, tempL2 := getAvailablePort(t) acceptChan = handshaker(port, tempEndpoint2, comm, t, mutator, mutualTLS) time.Sleep(time.Second) require.Equal(t, 0, len(acceptChan)) tempL.Close() tempL2.Close() // Negative path, the cert hash isn't what is expected _, tempEndpoint, tempL = getAvailablePort(t) mutator = func(msg *protoext.SignedGossipMessage) *protoext.SignedGossipMessage { msg.GetConn().TlsCertHash = append(msg.GetConn().TlsCertHash, 0) msg.Sign(signer) return msg } acceptChan = handshaker(port, tempEndpoint, comm, t, mutator, mutualTLS) time.Sleep(time.Second) require.Equal(t, 0, len(acceptChan)) tempL.Close() // Negative path, no PKI-ID was sent _, tempEndpoint, tempL = getAvailablePort(t) mutator = func(msg *protoext.SignedGossipMessage) *protoext.SignedGossipMessage { msg.GetConn().PkiId = nil msg.Sign(signer) return msg } acceptChan = handshaker(port, tempEndpoint, comm, t, mutator, mutualTLS) time.Sleep(time.Second) require.Equal(t, 0, len(acceptChan)) tempL.Close() // Negative path, connection message is of a different type _, tempEndpoint, tempL = getAvailablePort(t) mutator = func(msg *protoext.SignedGossipMessage) *protoext.SignedGossipMessage { msg.Content = &proto.GossipMessage_Empty{ Empty: &proto.Empty{}, } msg.Sign(signer) return msg } acceptChan = handshaker(port, tempEndpoint, comm, t, mutator, mutualTLS) time.Sleep(time.Second) require.Equal(t, 0, len(acceptChan)) tempL.Close() // Negative path, the peer didn't respond to the handshake in due time _, tempEndpoint, tempL = getAvailablePort(t) mutator = func(msg *protoext.SignedGossipMessage) *protoext.SignedGossipMessage { time.Sleep(time.Second * 5) return msg } acceptChan = handshaker(port, tempEndpoint, comm, t, mutator, mutualTLS) time.Sleep(time.Second) require.Equal(t, 0, len(acceptChan)) tempL.Close() }
explode_data.jsonl/42164
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1906 }
[ 2830, 3393, 2314, 29661, 1155, 353, 8840, 836, 8, 341, 69054, 261, 1669, 2915, 8119, 3056, 3782, 8, 34923, 3782, 11, 1465, 8, 341, 197, 2109, 580, 1669, 83356, 7121, 7, 15247, 17, 20, 21, 7121, 11, 83356, 1592, 340, 197, 2109, 580, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSingleNodeCandidate(t *testing.T) { tt := newNetwork(nil) tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) sm := tt.peers[1].(*raft) if sm.state != StateLeader { t.Errorf("state = %d, want %d", sm.state, StateLeader) } }
explode_data.jsonl/67341
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 104 }
[ 2830, 3393, 72352, 63901, 1155, 353, 8840, 836, 8, 341, 3244, 83, 1669, 501, 12320, 27907, 340, 3244, 83, 5219, 76878, 8472, 90, 3830, 25, 220, 16, 11, 2014, 25, 220, 16, 11, 3990, 25, 17310, 30365, 39, 454, 8824, 72023, 1669, 17853...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestBasicVreplicationWorkflow(t *testing.T) { defaultCellName := "zone1" allCells := []string{"zone1"} allCellNames = "zone1" vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", allCells, mainClusterConfig) require.NotNil(t, vc) defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func() { defaultReplicas = 1 }() defer vc.TearDown(t) defaultCell = vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1) vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) materializeRollup(t) shardCustomer(t, true, []*Cell{defaultCell}, defaultCellName, false) // the tenant table was to test a specific case with binary sharding keys. Drop it now so that we don't // have to update the rest of the tests execVtgateQuery(t, vtgateConn, "customer", "drop table tenant") validateRollupReplicates(t) shardOrders(t) shardMerchant(t) materializeProduct(t) materializeMerchantOrders(t) materializeSales(t) materializeMerchantSales(t) reshardMerchant2to3SplitMerge(t) reshardMerchant3to1Merge(t) insertMoreCustomers(t, 16) reshardCustomer2to4Split(t, nil, "") expectNumberOfStreams(t, vtgateConn, "Customer2to4", "sales", "product:0", 4) reshardCustomer3to2SplitMerge(t) expectNumberOfStreams(t, vtgateConn, "Customer3to2", "sales", "product:0", 3) reshardCustomer3to1Merge(t) expectNumberOfStreams(t, vtgateConn, "Customer3to1", "sales", "product:0", 1) }
explode_data.jsonl/44949
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 675 }
[ 2830, 3393, 15944, 53, 9995, 1693, 62768, 1155, 353, 8840, 836, 8, 341, 11940, 3599, 675, 1669, 330, 8684, 16, 698, 50960, 20857, 1669, 3056, 917, 4913, 8684, 16, 16707, 50960, 3599, 7980, 284, 330, 8684, 16, 698, 5195, 66, 284, 1532,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestChan(t *testing.T) { ch1 := make(chan string, 10) ch2 := make(chan string, 10) ch3 := make(chan string, 10) cases := createCases(ch1, ch2, ch3) // 进行10次select for i := 0; i < 10; i++ { // 从cases里随机选择一个可用case chosen, recv, ok := reflect.Select(cases) // 是否是接收 if recv.IsValid() && ok { t.Log("recv:", recv) } else { t.Log("send:", cases[chosen].Send) } } }
explode_data.jsonl/81133
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 212 }
[ 2830, 3393, 46019, 1155, 353, 8840, 836, 8, 341, 23049, 16, 1669, 1281, 35190, 914, 11, 220, 16, 15, 340, 23049, 17, 1669, 1281, 35190, 914, 11, 220, 16, 15, 340, 23049, 18, 1669, 1281, 35190, 914, 11, 220, 16, 15, 340, 1444, 2264...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestDomains_ListDomains(t *testing.T) { setup() defer teardown() mux.HandleFunc("/v2/domains", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, http.MethodGet) fmt.Fprint(w, `{ "domains": [ { "name":"foo.com" }, { "name":"bar.com" } ], "meta": { "total": 2 } }`) }) domains, resp, err := client.Domains.List(ctx, nil) if err != nil { t.Errorf("Domains.List returned error: %v", err) } expectedDomains := []Domain{{Name: "foo.com"}, {Name: "bar.com"}} if !reflect.DeepEqual(domains, expectedDomains) { t.Errorf("Domains.List returned domains %+v, expected %+v", domains, expectedDomains) } expectedMeta := &Meta{Total: 2} if !reflect.DeepEqual(resp.Meta, expectedMeta) { t.Errorf("Domains.List returned meta %+v, expected %+v", resp.Meta, expectedMeta) } }
explode_data.jsonl/22668
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 372 }
[ 2830, 3393, 74713, 27104, 74713, 1155, 353, 8840, 836, 8, 341, 84571, 741, 16867, 49304, 2822, 2109, 2200, 63623, 4283, 85, 17, 71344, 1735, 497, 2915, 3622, 1758, 37508, 11, 435, 353, 1254, 9659, 8, 341, 197, 18185, 3523, 1155, 11, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFromPrefix(t *testing.T) { type args struct { lang string prefix string max int } tests := []struct { name string args args want string }{ {"two", args{"en", "riv", 0}, "rival river"}, {"one", args{"en", "oxy", 0}, "oxygen"}, {"five", args{"en", "dri", 0}, "drift drill drink drip drive"}, {"five limit 3", args{"en", "dri", 3}, "drift drill drink"}, {"none", args{"en", "zpj", 0}, ""}, {"bad lang", args{"xx", "act", 0}, ""}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := FromPrefix(tt.args.lang, tt.args.prefix, tt.args.max); got != tt.want { t.Errorf("FromPrefix() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/32831
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 303 }
[ 2830, 3393, 3830, 14335, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 197, 5205, 256, 914, 198, 197, 3223, 5060, 914, 198, 197, 22543, 262, 526, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMarshalUnmarshalChangeServerKey(t *testing.T) { addserv := newChangeServerKey() str, err := addserv.JSONString() if err != nil { t.Error(err) } t.Logf("str1 - %v", str) hex, err := addserv.MarshalBinary() if err != nil { t.Error(err) } t.Logf("Marshalled - %x", hex) addserv2, err := UnmarshalMessage(hex) if err != nil { t.Error(err) } str, err = addserv2.JSONString() if err != nil { t.Error(err) } t.Logf("str2 - %v", str) if addserv2.Type() != constants.CHANGESERVER_KEY_MSG { t.Error("Invalid message type unmarshalled") } if addserv.IsSameAs(addserv2.(*ChangeServerKeyMsg)) != true { t.Errorf("AddServer messages are not identical") } }
explode_data.jsonl/41777
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 288 }
[ 2830, 3393, 55438, 1806, 27121, 4072, 5475, 1592, 1155, 353, 8840, 836, 8, 341, 12718, 30775, 1669, 501, 4072, 5475, 1592, 2822, 11355, 11, 1848, 1669, 11367, 648, 18009, 703, 741, 743, 1848, 961, 2092, 341, 197, 3244, 6141, 3964, 340, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestTagNarrowScope(t *testing.T) { // Create a simple model m, elMap := createTagModel() // Create the view l := NewTagView(m, elMap["1/2/1"], "software") // Check elements are correct expectedElements := []mdl.Element{ elMap["1"], elMap["1/1"], elMap["1/2"], elMap["1/1/1"], elMap["1/2/1"], elMap["1/2/1/1"], elMap["1/2/1/2"], elMap["1/2/2"], elMap["2"], elMap["2/1"], elMap["2/1/1"], elMap["2/1/2"], } for _, el := range expectedElements { // Assert elements are present assert.Assert(t, is.Contains(l.Elements, el)) } assert.Assert(t, is.Len(l.Elements, len(expectedElements))) // Check relationships are correct expectedAssociations := []mdl.Association{ mdl.NewAssociation(elMap["1/1/1"], elMap["1/2/1/1"], []string{"start"}), mdl.NewAssociation(elMap["1/2/1/2"], elMap["1/2/2"], []string{"display"}), mdl.NewAssociation(elMap["2/1/1"], elMap["1/2/1/1"], []string{"receive"}), mdl.NewAssociation(elMap["1/2/1/1"], elMap["2/1/2"], []string{"send"}), } for _, ass := range expectedAssociations { // Assert elements are present assert.Assert(t, is.Contains(l.Associations, ass)) } assert.Assert(t, is.Len(l.Associations, len(expectedAssociations))) }
explode_data.jsonl/54020
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 531 }
[ 2830, 3393, 5668, 45, 6044, 10803, 1155, 353, 8840, 836, 8, 341, 197, 322, 4230, 264, 4285, 1614, 198, 2109, 11, 655, 2227, 1669, 1855, 5668, 1712, 2822, 197, 322, 4230, 279, 1651, 198, 8810, 1669, 1532, 5668, 851, 1255, 11, 655, 22...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestReconcileLoadBalancerAddServiceOnInternalSubnet(t *testing.T) { az := getTestCloud() clusterResources := getClusterResources(az, 1, 1) svc := getInternalTestService("servicea", 80) addTestSubnet(t, az, &svc) lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) if err != nil { t.Errorf("Unexpected error: %q", err) } // ensure we got a frontend ip configuration if len(*lb.FrontendIPConfigurations) != 1 { t.Error("Expected the loadbalancer to have a frontend ip configuration") } validateLoadBalancer(t, lb, svc) }
explode_data.jsonl/50386
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 211 }
[ 2830, 3393, 693, 40446, 457, 5879, 93825, 2212, 1860, 1925, 11569, 3136, 4711, 1155, 353, 8840, 836, 8, 341, 197, 1370, 1669, 633, 2271, 16055, 741, 197, 18855, 11277, 1669, 633, 28678, 11277, 7, 1370, 11, 220, 16, 11, 220, 16, 340, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_QueryAvaliableInstances(t *testing.T) { client, err := bssopenapi.NewClientWithAccessKey(os.Getenv("REGION_ID"), os.Getenv("ACCESS_KEY_ID"), os.Getenv("ACCESS_KEY_SECRET")) assert.Nil(t, err) request := bssopenapi.CreateQueryAvailableInstancesRequest() endpoints.AddEndpointMapping(os.Getenv("REGION_ID"), "BssOpenApi", "business.aliyuncs.com") response, err := client.QueryAvailableInstances(request) assert.Nil(t, err) assert.True(t, response.IsSuccess()) assert.Equal(t, 36, len(response.RequestId)) }
explode_data.jsonl/56881
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 193 }
[ 2830, 3393, 48042, 32, 831, 2156, 42725, 1155, 353, 8840, 836, 8, 341, 25291, 11, 1848, 1669, 293, 778, 2508, 2068, 7121, 2959, 2354, 6054, 1592, 9638, 64883, 445, 77431, 3450, 3975, 2643, 64883, 445, 55749, 6600, 3450, 3975, 2643, 6488...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCoprocessorPriority(t *testing.T) { cli := &checkPrioClient{} store, clean := testkit.CreateMockStore(t, mockstore.WithClientHijacker(func(c tikv.Client) tikv.Client { cli.Client = c return cli })) defer clean() tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table t (id int primary key)") tk.MustExec("create table t1 (id int, v int, unique index i_id (id))") defer tk.MustExec("drop table t") defer tk.MustExec("drop table t1") tk.MustExec("insert into t values (1)") // Insert some data to make sure plan build IndexLookup for t1. for i := 0; i < 10; i++ { tk.MustExec(fmt.Sprintf("insert into t1 values (%d, %d)", i, i)) } cli.mu.Lock() cli.mu.checkPrio = true cli.mu.Unlock() cli.setCheckPriority(kvrpcpb.CommandPri_High) tk.MustQuery("select id from t where id = 1") tk.MustQuery("select * from t1 where id = 1") cli.setCheckPriority(kvrpcpb.CommandPri_Normal) tk.MustQuery("select count(*) from t") tk.MustExec("update t set id = 3") tk.MustExec("delete from t") tk.MustExec("insert into t select * from t limit 2") tk.MustExec("delete from t") // Insert some data to make sure plan build IndexLookup for t. tk.MustExec("insert into t values (1), (2)") defer config.RestoreFunc()() config.UpdateGlobal(func(conf *config.Config) { conf.Log.ExpensiveThreshold = 0 }) cli.setCheckPriority(kvrpcpb.CommandPri_High) tk.MustQuery("select id from t where id = 1") tk.MustQuery("select * from t1 where id = 1") tk.MustExec("delete from t where id = 2") tk.MustExec("update t set id = 2 where id = 1") cli.setCheckPriority(kvrpcpb.CommandPri_Low) tk.MustQuery("select count(*) from t") tk.MustExec("delete from t") tk.MustExec("insert into t values (3)") // Test priority specified by SQL statement. cli.setCheckPriority(kvrpcpb.CommandPri_High) tk.MustQuery("select HIGH_PRIORITY * from t") cli.setCheckPriority(kvrpcpb.CommandPri_Low) tk.MustQuery("select LOW_PRIORITY id from t where id = 1") cli.setCheckPriority(kvrpcpb.CommandPri_High) tk.MustExec("set tidb_force_priority = 'HIGH_PRIORITY'") tk.MustQuery("select * from t").Check(testkit.Rows("3")) tk.MustExec("update t set id = id + 1") tk.MustQuery("select v from t1 where id = 0 or id = 1").Check(testkit.Rows("0", "1")) cli.setCheckPriority(kvrpcpb.CommandPri_Low) tk.MustExec("set tidb_force_priority = 'LOW_PRIORITY'") tk.MustQuery("select * from t").Check(testkit.Rows("4")) tk.MustExec("update t set id = id + 1") tk.MustQuery("select v from t1 where id = 0 or id = 1").Check(testkit.Rows("0", "1")) cli.setCheckPriority(kvrpcpb.CommandPri_Normal) tk.MustExec("set tidb_force_priority = 'DELAYED'") tk.MustQuery("select * from t").Check(testkit.Rows("5")) tk.MustExec("update t set id = id + 1") tk.MustQuery("select v from t1 where id = 0 or id = 1").Check(testkit.Rows("0", "1")) cli.setCheckPriority(kvrpcpb.CommandPri_Low) tk.MustExec("set tidb_force_priority = 'NO_PRIORITY'") tk.MustQuery("select * from t").Check(testkit.Rows("6")) tk.MustExec("update t set id = id + 1") tk.MustQuery("select v from t1 where id = 0 or id = 1").Check(testkit.Rows("0", "1")) cli.mu.Lock() cli.mu.checkPrio = false cli.mu.Unlock() }
explode_data.jsonl/38145
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1240 }
[ 2830, 3393, 56277, 40848, 269, 20555, 1155, 353, 8840, 836, 8, 341, 86448, 1669, 609, 2028, 47, 10383, 2959, 16094, 57279, 11, 4240, 1669, 1273, 8226, 7251, 11571, 6093, 1155, 11, 7860, 4314, 26124, 2959, 39, 3172, 9683, 18552, 1337, 86...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCheckUseServiceAccount(t *testing.T) { tests := []struct { name string input gke.GKE expected bool }{ { name: "Cluster node config with default service account", input: gke.GKE{ Metadata: types.NewTestMetadata(), Clusters: []gke.Cluster{ { Metadata: types.NewTestMetadata(), RemoveDefaultNodePool: types.Bool(false, types.NewTestMetadata()), NodeConfig: gke.NodeConfig{ Metadata: types.NewTestMetadata(), ServiceAccount: types.String("", types.NewTestMetadata()), }, }, }, }, expected: true, }, { name: "Cluster node config with service account provided", input: gke.GKE{ Metadata: types.NewTestMetadata(), Clusters: []gke.Cluster{ { Metadata: types.NewTestMetadata(), RemoveDefaultNodePool: types.Bool(false, types.NewTestMetadata()), NodeConfig: gke.NodeConfig{ Metadata: types.NewTestMetadata(), ServiceAccount: types.String("service-account", types.NewTestMetadata()), }, }, }, }, expected: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var testState state.State testState.Google.GKE = test.input results := CheckUseServiceAccount.Evaluate(&testState) var found bool for _, result := range results { if result.Status() == rules.StatusFailed && result.Rule().LongID() == CheckUseServiceAccount.Rule().LongID() { found = true } } if test.expected { assert.True(t, found, "Rule should have been found") } else { assert.False(t, found, "Rule should not have been found") } }) } }
explode_data.jsonl/28484
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 731 }
[ 2830, 3393, 3973, 10253, 1860, 7365, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 257, 914, 198, 197, 22427, 262, 342, 440, 1224, 3390, 198, 197, 42400, 1807, 198, 197, 59403, 197, 197, 515, 298, 11609, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestNewExecution_ShouldSetFields(t *testing.T) { t.Parallel() fs := afero.NewMemMapFs() stubRegion := "region" stubRegionalDeployType := config.RegionalRegionDeployType stubStep := config.Step{ Dir: "stub", Name: "stubName", DeployConfig: config.Config{ DeploymentRing: "stubDeploymentRing", Project: "stubProject", DryRun: true, RegionalRegions: []string{"stub"}, UniqueExternalExecutionID: "stubExecutionID", MaxRetries: 3, MaxTestRetries: 2, }, TrackName: "stubTrackName", } // act mock := NewExecution(stubStep, logger, fs, stubRegionalDeployType, stubRegion, map[string]map[string]string{}) // assert require.Equal(t, stubStep.Dir, mock.Dir, "Dir should match stub value") require.Equal(t, stubStep.Name, mock.StepName, "Name should match stub value") require.Equal(t, stubRegion, mock.Region, "Region should match stub value") require.Equal(t, stubRegionalDeployType, mock.RegionDeployType, "RegionDeployType should match stub value") require.Equal(t, stubStep.DeployConfig.DeploymentRing, mock.DeploymentRing, "DeploymentRing should match stub value") require.Equal(t, stubStep.DeployConfig.Project, mock.Project, "Project should match stub value") require.Equal(t, stubStep.DeployConfig.DryRun, mock.DryRun, "DryRun should match stub value") require.Equal(t, stubStep.TrackName, mock.TrackName, "TrackName should match stub value") require.Equal(t, stubStep.DeployConfig.UniqueExternalExecutionID, mock.UniqueExternalExecutionID, "UniqueExternalExecutionID should match stub value") require.Equal(t, stubStep.DeployConfig.RegionalRegions, mock.RegionGroupRegions, "RegionGroupRegions should match stub value") require.Equal(t, stubStep.DeployConfig.MaxRetries, mock.MaxRetries, "MaxRetries should match stub value") require.Equal(t, stubStep.DeployConfig.MaxTestRetries, mock.MaxTestRetries, "MaxTestRetries should match stub value") }
explode_data.jsonl/16363
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 719 }
[ 2830, 3393, 3564, 20294, 36578, 616, 1649, 8941, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 53584, 1669, 264, 802, 78, 7121, 18816, 2227, 48300, 741, 18388, 392, 14091, 1669, 330, 3943, 698, 18388, 392, 89807, 69464, 929, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidateCertFilesNotFoundAuthInfo(t *testing.T) { config := clientcmdapi.NewConfig() config.AuthInfos["error"] = &clientcmdapi.AuthInfo{ ClientCertificate: "missing", ClientKey: "missing", } test := configValidationTest{ config: config, expectedErrorSubstring: []string{"unable to read client-cert", "unable to read client-key"}, } test.testAuthInfo("error", t) test.testConfig(t) }
explode_data.jsonl/13495
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 164 }
[ 2830, 3393, 17926, 36934, 10809, 10372, 5087, 1731, 1155, 353, 8840, 836, 8, 341, 25873, 1669, 2943, 8710, 2068, 7121, 2648, 741, 25873, 25233, 38059, 1183, 841, 1341, 284, 609, 2972, 8710, 2068, 25233, 1731, 515, 197, 71724, 33202, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetExistingVMSS(t *testing.T) { testcases := []struct { name string vmssName string result *azure.VMSS expectedError string expect func(s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) }{ { name: "scale set not found", vmssName: "my-vmss", result: &azure.VMSS{}, expectedError: "failed to get existing vmss: #: Not found: StatusCode=404", expect: func(s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) { s.ResourceGroup().AnyTimes().Return("my-rg") s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) m.Get(gomockinternal.AContext(), "my-rg", "my-vmss").Return(compute.VirtualMachineScaleSet{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) }, }, { name: "get existing vmss", vmssName: "my-vmss", result: &azure.VMSS{ ID: "my-id", Name: "my-vmss", State: "Succeeded", Sku: "Standard_D2", Identity: "", Tags: nil, Capacity: int64(1), Zones: []string{"1", "3"}, Instances: []azure.VMSSVM{ { ID: "my-vm-id", InstanceID: "my-vm-1", Name: "instance-000001", State: "Succeeded", }, }, }, expectedError: "", expect: func(s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) { s.ResourceGroup().AnyTimes().Return("my-rg") s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) m.Get(gomockinternal.AContext(), "my-rg", "my-vmss").Return(compute.VirtualMachineScaleSet{ ID: to.StringPtr("my-id"), Name: to.StringPtr("my-vmss"), Sku: &compute.Sku{ Capacity: to.Int64Ptr(1), Name: to.StringPtr("Standard_D2"), }, VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ SinglePlacementGroup: to.BoolPtr(false), ProvisioningState: to.StringPtr("Succeeded"), }, Zones: &[]string{"1", "3"}, }, nil) m.ListInstances(gomock.Any(), "my-rg", "my-vmss").Return([]compute.VirtualMachineScaleSetVM{ { ID: to.StringPtr("my-vm-id"), InstanceID: to.StringPtr("my-vm-1"), Name: to.StringPtr("my-vm"), VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ ProvisioningState: to.StringPtr("Succeeded"), OsProfile: &compute.OSProfile{ ComputerName: to.StringPtr("instance-000001"), }, }, }, }, nil) }, }, { name: "list instances fails", vmssName: "my-vmss", result: &azure.VMSS{}, expectedError: "failed to list instances: #: Not found: StatusCode=404", expect: func(s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) { s.ResourceGroup().AnyTimes().Return("my-rg") s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) m.Get(gomockinternal.AContext(), "my-rg", "my-vmss").Return(compute.VirtualMachineScaleSet{ ID: to.StringPtr("my-id"), Name: to.StringPtr("my-vmss"), VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ SinglePlacementGroup: to.BoolPtr(false), ProvisioningState: to.StringPtr("Succeeded"), }, }, nil) m.ListInstances(gomockinternal.AContext(), "my-rg", "my-vmss").Return([]compute.VirtualMachineScaleSetVM{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) }, }, } for _, tc := range testcases { tc := tc t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) t.Parallel() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() scopeMock := mock_scalesets.NewMockScaleSetScope(mockCtrl) clientMock := mock_scalesets.NewMockClient(mockCtrl) tc.expect(scopeMock.EXPECT(), clientMock.EXPECT()) s := &Service{ Scope: scopeMock, Client: clientMock, } result, err := s.getVirtualMachineScaleSet(context.TODO(), tc.vmssName) if tc.expectedError != "" { g.Expect(err).To(HaveOccurred()) t.Log(err.Error()) g.Expect(err).To(MatchError(tc.expectedError)) } else { g.Expect(err).NotTo(HaveOccurred()) g.Expect(result).To(BeEquivalentTo(tc.result)) } }) } }
explode_data.jsonl/78241
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2050 }
[ 2830, 3393, 1949, 53067, 11187, 1220, 1155, 353, 8840, 836, 8, 341, 18185, 23910, 1669, 3056, 1235, 341, 197, 11609, 688, 914, 198, 197, 54879, 778, 675, 414, 914, 198, 197, 9559, 286, 353, 39495, 5058, 44, 1220, 198, 197, 42400, 1454...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCreateCleanDefaultCluster(t *testing.T) { config := createValidTestConfig() clientBuilder := NewDefaultClientConfig(*config, &ConfigOverrides{ ClusterDefaults: clientcmdapi.Cluster{Server: "http://localhost:8080"}, }) clientConfig, err := clientBuilder.ClientConfig() if err != nil { t.Fatalf("Unexpected error: %v", err) } matchStringArg(config.Clusters["clean"].Server, clientConfig.Host, t) matchBoolArg(config.Clusters["clean"].InsecureSkipTLSVerify, clientConfig.Insecure, t) matchStringArg(config.AuthInfos["clean"].Token, clientConfig.BearerToken, t) }
explode_data.jsonl/56167
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 192 }
[ 2830, 3393, 4021, 27529, 3675, 28678, 1155, 353, 8840, 836, 8, 341, 25873, 1669, 1855, 4088, 2271, 2648, 741, 25291, 3297, 1669, 1532, 3675, 2959, 2648, 4071, 1676, 11, 609, 2648, 80010, 515, 197, 197, 28678, 16273, 25, 2943, 8710, 2068...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestStuckPodDetector_DoesNothingIfNoStuckPodsAreFound(t *testing.T) { runningPod := makeRunningPod() fakeClusterContext, mockLeaseService, stuckPodDetector := makeStuckPodDetectorWithTestDoubles() addPod(t, fakeClusterContext, runningPod) stuckPodDetector.HandleStuckPods() assert.Zero(t, mockLeaseService.returnLeaseCalls) mockLeaseService.assertReportDoneCalledOnceWith(t, []*v1.Pod{}) }
explode_data.jsonl/64399
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 139 }
[ 2830, 3393, 623, 1942, 23527, 31606, 1557, 7072, 23780, 2679, 2753, 623, 1942, 23527, 82, 11526, 6650, 1155, 353, 8840, 836, 8, 341, 197, 27173, 23527, 1669, 1281, 18990, 23527, 2822, 1166, 726, 28678, 1972, 11, 7860, 2304, 519, 1860, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetTopicsWithThrottledBrokers(t *testing.T) { rtf := &ReplicationThrottleConfigs{ zk: &kafkazk.Mock{}, } // Minimally populate the ReplicationThrottleConfigs. rtf.brokerOverrides = BrokerOverrides{ 1001: BrokerThrottleOverride{ ID: 1001, ReassignmentParticipant: false, Config: ThrottleOverrideConfig{ Rate: 50, }, }, // Topics that include this broker shouldn't be included; the // BrokerThrottleOverride.Filter called in getTopicsWithThrottledBrokers // excludes any topics mapped to brokers where ReassignmentParticipant // == true. 1002: BrokerThrottleOverride{ ID: 1002, ReassignmentParticipant: true, Config: ThrottleOverrideConfig{ Rate: 50, }, }, } // Call. topicThrottledBrokers, _ := getTopicsWithThrottledBrokers(rtf) expected := topicThrottledReplicas{ "test_topic": throttled{"followers": brokerIDs{"0:1001"}}, "test_topic2": throttled{"followers": brokerIDs{"0:1001"}}, } if len(topicThrottledBrokers) != len(expected) { t.Fatalf("Expected len %d, got %d", len(expected), len(topicThrottledBrokers)) } for topic := range expected { output, exist := topicThrottledBrokers[topic] if !exist { t.Fatalf("Expected topic '%s' in output", topic) } got := output["followers"][0] expectedOut := expected[topic]["followers"][0] if got != expectedOut { t.Errorf("Expected followers '%s', got '%s'", expectedOut, got) } } }
explode_data.jsonl/39257
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 594 }
[ 2830, 3393, 1949, 45003, 2354, 1001, 46689, 832, 26272, 26177, 1155, 353, 8840, 836, 8, 341, 55060, 69, 1669, 609, 18327, 1693, 1001, 27535, 84905, 515, 197, 20832, 74, 25, 609, 74, 2577, 74, 1370, 74, 24664, 38837, 197, 630, 197, 322...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func Test_Problem707(t *testing.T) { obj := Constructor() fmt.Printf("obj = %v\n", ML2s(&obj)) param1 := obj.Get(1) fmt.Printf("param_1 = %v obj = %v\n", param1, ML2s(&obj)) obj.AddAtHead(38) fmt.Printf("obj = %v\n", ML2s(&obj)) obj.AddAtHead(45) fmt.Printf("obj = %v\n", ML2s(&obj)) obj.DeleteAtIndex(2) fmt.Printf("obj = %v\n", ML2s(&obj)) obj.AddAtIndex(1, 24) fmt.Printf("obj = %v\n", ML2s(&obj)) obj.AddAtTail(36) fmt.Printf("obj = %v\n", ML2s(&obj)) obj.AddAtIndex(3, 72) obj.AddAtTail(76) fmt.Printf("obj = %v\n", ML2s(&obj)) obj.AddAtHead(7) fmt.Printf("obj = %v\n", ML2s(&obj)) obj.AddAtHead(36) fmt.Printf("obj = %v\n", ML2s(&obj)) obj.AddAtHead(34) fmt.Printf("obj = %v\n", ML2s(&obj)) obj.AddAtTail(91) fmt.Printf("obj = %v\n", ML2s(&obj)) fmt.Printf("\n\n\n") obj1 := Constructor() fmt.Printf("obj1 = %v\n", ML2s(&obj1)) param2 := obj1.Get(0) fmt.Printf("param_2 = %v obj1 = %v\n", param2, ML2s(&obj1)) obj1.AddAtIndex(1, 2) fmt.Printf("obj1 = %v\n", ML2s(&obj1)) param2 = obj1.Get(0) fmt.Printf("param_2 = %v obj1 = %v\n", param2, ML2s(&obj1)) param2 = obj1.Get(1) fmt.Printf("param_2 = %v obj1 = %v\n", param2, ML2s(&obj1)) obj1.AddAtIndex(0, 1) fmt.Printf("obj1 = %v\n", ML2s(&obj1)) param2 = obj1.Get(0) fmt.Printf("param_1 = %v obj1 = %v\n", param2, ML2s(&obj1)) param2 = obj1.Get(1) fmt.Printf("param_2 = %v obj1 = %v\n", param2, ML2s(&obj1)) }
explode_data.jsonl/39072
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 749 }
[ 2830, 3393, 16670, 10121, 22, 15, 22, 1155, 353, 8840, 836, 8, 341, 22671, 1669, 16786, 741, 11009, 19367, 445, 2295, 284, 1018, 85, 1699, 497, 19614, 17, 82, 2099, 2295, 1171, 36037, 16, 1669, 2839, 2234, 7, 16, 340, 11009, 19367, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParsePSIData(t *testing.T) { // Invalid CRC32 w := astibinary.New() w.Write(uint8(0)) // Pointer field w.Write(uint8(115)) // TOT table ID w.Write("1") // TOT syntax section indicator w.Write("1") // TOT private bit w.Write("11") // TOT reserved w.Write("000000001110") // TOT section length w.Write(totBytes()) // TOT data w.Write(uint32(32)) // TOT CRC32 _, err := parsePSIData(astibyte.NewIterator(w.Bytes())) assert.EqualError(t, err, "astits: parsing PSI table failed: astits: Table CRC32 20 != computed CRC32 6969b13") // Valid d, err := parsePSIData(astibyte.NewIterator(psiBytes())) assert.NoError(t, err) assert.Equal(t, d, psi) }
explode_data.jsonl/25231
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 312 }
[ 2830, 3393, 14463, 5012, 81536, 1155, 353, 8840, 836, 8, 341, 197, 322, 13882, 29395, 18, 17, 198, 6692, 1669, 11763, 579, 3287, 7121, 741, 6692, 4073, 8488, 23, 7, 15, 593, 981, 442, 21635, 2070, 198, 6692, 4073, 8488, 23, 7, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStartKubeletError(t *testing.T) { errMsg := "error has occurred" r := &fakeRunner{ errMsg: errMsg, } kubeletScriptTemplate, err := template.New(StepName).Parse("") output := new(bytes.Buffer) config := &steps.Config{ KubeletConfig: steps.KubeletConfig{}, Runner: r, } j := &Step{ kubeletScriptTemplate, } err = j.Run(context.Background(), output, config) if err == nil { t.Errorf("Error must not be nil") return } if !strings.Contains(err.Error(), errMsg) { t.Errorf("Error message expected to contain %s actual %s", errMsg, err.Error()) } }
explode_data.jsonl/56739
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 229 }
[ 2830, 3393, 3479, 42, 3760, 1149, 1454, 1155, 353, 8840, 836, 8, 341, 9859, 6611, 1669, 330, 841, 702, 10017, 1837, 7000, 1669, 609, 30570, 19486, 515, 197, 9859, 6611, 25, 60078, 345, 197, 630, 16463, 3760, 1149, 5910, 7275, 11, 1848...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestTerragruntEmptyStringCommandHook(t *testing.T) { t.Parallel() cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_EMPTY_STRING_COMMAND_PATH) tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_EMPTY_STRING_COMMAND_PATH) rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_EMPTY_STRING_COMMAND_PATH) var ( stdout bytes.Buffer stderr bytes.Buffer ) err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr) if err != nil { assert.Contains(t, err.Error(), "Need at least one non-empty argument in 'execute'.") } else { t.Error("Expected an Error with message: 'Need at least one argument'") } }
explode_data.jsonl/10071
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 285 }
[ 2830, 3393, 51402, 68305, 3850, 3522, 703, 4062, 31679, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 1444, 60639, 51, 13886, 627, 13682, 1155, 11, 13602, 42635, 41486, 82251, 50, 36640, 12283, 22723, 7944, 340, 20082, 14359, 182...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDailyEvery5Days(t *testing.T) { Convey("With a daily event every 5 days", t, func() { local, err := time.LoadLocation("Europe/Berlin") So(err, ShouldBeNil) r := Recurrence{ Frequence: Daily, Interval: 5, Location: local, Start: time.Date(2016, 1, 1, 12, 0, 0, 0, local), } Convey("which ends 2017", func() { r.End = time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC) Convey("there should be no event 2017", func() { nextEvent := r.GetNextDate(time.Date(2017, 1, 1, 1, 0, 0, 0, time.UTC)) So(nextEvent, ShouldNotHappen) }) Convey("the first event should be on 1st january", func() { nextEvent := r.GetNextDate(time.Date(2015, 12, 12, 0, 0, 0, 0, time.UTC)) So(nextEvent, ShouldHappenOn, time.Date(2016, 1, 1, 12, 0, 0, 0, local)) }) Convey("the second event should be on 6th january", func() { nextEvent := r.GetNextDate(time.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC)) So(nextEvent, ShouldHappenOn, time.Date(2016, 1, 6, 12, 0, 0, 0, local)) }) Convey("the time after the change to DST should stay the same", func() { nextEvent := r.GetNextDate(time.Date(2016, 3, 26, 15, 0, 0, 0, time.UTC)) So(nextEvent, ShouldHappenOn, time.Date(2016, 3, 31, 12, 0, 0, 0, local)) }) Convey("the last event should be on 31th december", func() { lastEvent := r.GetNextDate(time.Date(2016, 12, 27, 0, 0, 0, 0, time.UTC)) So(lastEvent, ShouldHappenOn, time.Date(2016, 12, 31, 12, 0, 0, 0, local)) nextEvent := r.GetNextDate(lastEvent) So(nextEvent, ShouldNotHappen) }) }) Convey("which doesn't end", func() { r.End = time.Time{} Convey("there should be an event on 5th january 2017", func() { nextEvent := r.GetNextDate(time.Date(2017, 1, 1, 1, 0, 0, 0, time.UTC)) So(nextEvent, ShouldHappenOn, time.Date(2017, 1, 5, 12, 0, 0, 0, local)) }) }) }) }
explode_data.jsonl/54706
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 815 }
[ 2830, 3393, 43553, 11510, 20, 20557, 1155, 353, 8840, 836, 8, 341, 93070, 5617, 445, 2354, 264, 7298, 1538, 1449, 220, 20, 2849, 497, 259, 11, 2915, 368, 341, 197, 8854, 11, 1848, 1669, 882, 13969, 4707, 445, 30780, 16276, 261, 3732, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDefaultSessionManager_FindGlobalSession(t *testing.T) { gs := globalSessionProvider(t) sessionManager := NewDefaultSessionManager("default") sessionManager.AddGlobalSession(gs) expected := sessionManager.FindGlobalSession(gs.XID) assert.NotNil(t, expected) assert.Equal(t, gs.TransactionID, expected.TransactionID) assert.Equal(t, gs.ApplicationID, expected.ApplicationID) assert.Equal(t, gs.TransactionServiceGroup, expected.TransactionServiceGroup) assert.Equal(t, gs.TransactionName, expected.TransactionName) assert.Equal(t, gs.Status, expected.Status) sessionManager.RemoveGlobalSession(gs) }
explode_data.jsonl/60393
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 191 }
[ 2830, 3393, 3675, 5283, 2043, 95245, 11646, 5283, 1155, 353, 8840, 836, 8, 341, 3174, 82, 1669, 3644, 5283, 5179, 1155, 340, 25054, 2043, 1669, 1532, 3675, 5283, 2043, 445, 2258, 1138, 25054, 2043, 1904, 11646, 5283, 83394, 340, 42400, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAddChannelRequestBasicQueryParam(t *testing.T) { assert := assert.New(t) opts := &addChannelOpts{ Channels: []string{"ch1", "ch2", "ch3"}, ChannelGroup: "cg", pubnub: pubnub, } queryParam := map[string]string{ "q1": "v1", "q2": "v2", } opts.QueryParam = queryParam path, err := opts.buildPath() assert.Nil(err) u := &url.URL{ Path: path, } h.AssertPathsEqual(t, fmt.Sprintf("/v1/channel-registration/sub-key/sub_key/channel-group/cg"), u.EscapedPath(), []int{}) query, err := opts.buildQuery() assert.Nil(err) expected := &url.Values{} expected.Set("q1", "v1") expected.Set("q2", "v2") expected.Set("add", "ch1,ch2,ch3") h.AssertQueriesEqual(t, expected, query, []string{"pnsdk", "uuid"}, []string{}) body, err := opts.buildBody() assert.Nil(err) assert.Equal([]byte{}, body) }
explode_data.jsonl/8262
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 379 }
[ 2830, 3393, 2212, 9629, 1900, 15944, 84085, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 692, 64734, 1669, 609, 718, 9629, 43451, 515, 197, 197, 35925, 25, 257, 3056, 917, 4913, 331, 16, 497, 330, 331, 17, 497, 330, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTuplePqPop(t *testing.T) { st := []struct { name string pq TuplePriorityQueue nodes [][3]int exp [][3]int }{ {"nodes' len eq 1", TuplePriorityQueue{}, [][3]int{[3]int{1, 2, 3}}, [][3]int{[3]int{1, 2, 3}}}, {"nodes' len eq 2", TuplePriorityQueue{}, [][3]int{[3]int{1, 2, 3}, [3]int{2, 2, 2}}, [][3]int{[3]int{1, 2, 3}, [3]int{2, 2, 2}}}, } for _, tt := range st { t.Run(tt.name, func(t *testing.T) { for i := range tt.nodes { heap.Push(&tt.pq, tt.nodes[i]) } var idx int for tt.pq.Len() > 0 { item := heap.Pop(&tt.pq).([3]int) if item != tt.exp[idx] { t.Fatalf("priorityQueue: %v and nodes: %v wanted %d but got %d", tt.pq, tt.nodes, tt.exp[idx], item) } t.Log("pass") idx++ } }) } }
explode_data.jsonl/28731
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 423 }
[ 2830, 3393, 28681, 47, 80, 11598, 1155, 353, 8840, 836, 8, 341, 18388, 1669, 3056, 1235, 341, 197, 11609, 220, 914, 198, 197, 3223, 80, 262, 24622, 20555, 7554, 198, 197, 79756, 508, 1457, 18, 63025, 198, 197, 48558, 256, 508, 1457, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestRunLoopDeleteSuccess(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) watcher, fakeWatcher := newFakeWatchServiceInstanceFunc(nil) updater, updated := newFakeUpdateServiceInstanceFunc(nil) getServiceClassFn := refs.NewFakeServiceClassGetterFunc(&data.ServiceClass{}, nil) getServiceBrokerFn := refs.NewFakeServiceBrokerGetterFunc(&data.ServiceBroker{}, nil) deprovisioner := fake.NewDeprovisioner() lifecycler := &fake.Lifecycler{ Deprovisioner: deprovisioner, } errCh := make(chan error) go func() { errCh <- RunLoop(ctx, watcher, updater, getServiceClassFn, getServiceBrokerFn, lifecycler) }() inst := new(data.ServiceInstance) inst.Kind = data.ServiceInstanceKind fakeWatcher.Delete(inst) time.Sleep(100 * time.Millisecond) cancel() fakeWatcher.Stop() err := <-errCh assert.Equal(t, len(deprovisioner.Reqs), 1, "number of deprovision requests") assert.Equal(t, len(*updated), 0, "number of updated service instances") assert.Err(t, ErrCancelled, err) }
explode_data.jsonl/72427
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 363 }
[ 2830, 3393, 80520, 6435, 7188, 1155, 353, 8840, 836, 8, 341, 20985, 11, 9121, 1669, 2266, 26124, 9269, 5378, 19047, 2398, 6692, 28058, 11, 12418, 47248, 1669, 501, 52317, 14247, 1860, 2523, 9626, 27907, 340, 59810, 27463, 11, 6049, 1669, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAPIAccessAllowedSourceCIDRsForControllerSG(t *testing.T) { testCases := []struct { conf string cidrs []string }{ { conf: externalDNSNameConfig, cidrs: []string{}, }, { conf: ` apiEndpoints: - name: endpoint-1 dnsName: test-1.staging.core-os.net loadBalancer: type: network recordSetManaged: false apiAccessAllowedSourceCIDRs: [] `, cidrs: []string{}, }, { conf: ` apiEndpoints: - name: endpoint-1 dnsName: test-1.staging.core-os.net loadBalancer: type: network recordSetManaged: false `, cidrs: []string{"0.0.0.0/0"}, }, { conf: ` apiEndpoints: - name: endpoint-1 dnsName: test-1.staging.core-os.net loadBalancer: type: network recordSetManaged: false apiAccessAllowedSourceCIDRs: - 127.0.0.1/32 # Ignores non-network load balancers - name: endpoint-2 dnsName: test-1.staging.core-os.net loadBalancer: recordSetManaged: false apiAccessAllowedSourceCIDRs: - 127.0.0.2/32 # Ignores non-network load balancers - name: endpoint-2 dnsName: test-1.staging.core-os.net loadBalancer: type: classic recordSetManaged: false apiAccessAllowedSourceCIDRs: - 127.0.0.3/32 `, cidrs: []string{"127.0.0.1/32"}, }, { conf: ` apiEndpoints: - name: endpoint-1 dnsName: test-1.staging.core-os.net loadBalancer: type: network recordSetManaged: false apiAccessAllowedSourceCIDRs: - 127.0.0.1/32 - 0.0.0.0/0 - name: endpoint-2 dnsName: test-2.staging.core-os.net loadBalancer: type: network recordSetManaged: false apiAccessAllowedSourceCIDRs: - 127.0.0.1/32 # Duplicated CIDR - 192.168.0.0/24 `, cidrs: []string{"0.0.0.0/0", "127.0.0.1/32", "192.168.0.0/24"}, }, } for _, testCase := range testCases { confBody := availabilityZoneConfig + apiEndpointMinimalConfigYaml + testCase.conf c, err := ClusterFromBytes([]byte(confBody)) if err != nil { t.Errorf("Unexpected error parsing config: %v\n %s", err, confBody) continue } actualCIDRs := c.APIAccessAllowedSourceCIDRsForControllerSG() if !reflect.DeepEqual(actualCIDRs, testCase.cidrs) { t.Errorf( "CIDRs %s do not match actual list %s in config: %s", testCase.cidrs, actualCIDRs, confBody, ) } } }
explode_data.jsonl/4365
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1041 }
[ 2830, 3393, 7082, 6054, 35382, 3608, 54146, 42327, 2461, 2051, 7783, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 67850, 220, 914, 198, 197, 1444, 307, 5428, 3056, 917, 198, 197, 59403, 197, 197, 515, 298, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestMatToBytes(t *testing.T) { mat := NewMatWithSize(101, 102, MatTypeCV8U) b := mat.ToBytes() if len(b) != 101*102 { t.Errorf("Mat bytes incorrect length: %v\n", len(b)) } copy, err := NewMatFromBytes(101, 102, MatTypeCV8U, b) if err != nil { t.Error(err.Error()) } if copy.Rows() != 101 { t.Errorf("Mat from bytes incorrect row count: %v\n", copy.Rows()) } if copy.Cols() != 102 { t.Errorf("Mat region incorrect col count: %v\n", copy.Cols()) } mat = NewMatWithSize(101, 102, MatTypeCV16S) b = mat.ToBytes() if len(b) != 101*102*2 { t.Errorf("Mat bytes incorrect length: %v\n", len(b)) } mat = NewMatFromScalar(NewScalar(255.0, 105.0, 180.0, 0.0), MatTypeCV8UC3) b = mat.ToBytes() if len(b) != 3 { t.Errorf("Mat bytes incorrect length: %v\n", len(b)) } if bytes.Compare(b, []byte{255, 105, 180}) != 0 { t.Errorf("Mat bytes unexpected values: %v\n", b) } }
explode_data.jsonl/81692
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 395 }
[ 2830, 3393, 11575, 1249, 7078, 1155, 353, 8840, 836, 8, 341, 59874, 1669, 1532, 11575, 2354, 1695, 7, 16, 15, 16, 11, 220, 16, 15, 17, 11, 6867, 929, 19589, 23, 52, 340, 2233, 1669, 5517, 3274, 7078, 741, 743, 2422, 1883, 8, 961, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) { testRejectRequest(t, func(st *serverTester) { st.addLogFilter(`invalid pseudo-header ":unknown_thing"`) st.bodylessReq1(":unknown_thing", "") }) }
explode_data.jsonl/71631
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 86 }
[ 2830, 3393, 5475, 44024, 50693, 583, 1088, 21952, 62, 13790, 1155, 353, 8840, 836, 8, 341, 18185, 78413, 1900, 1155, 11, 2915, 5895, 353, 4030, 58699, 8, 341, 197, 18388, 1364, 2201, 5632, 5809, 11808, 34750, 9351, 13022, 16088, 62, 159...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestTokenLifetime_StableMarshal(t *testing.T) { lifetimeFrom := generateLifetime(10, 20, 30) t.Run("non empty", func(t *testing.T) { wire, err := lifetimeFrom.StableMarshal(nil) require.NoError(t, err) lifetimeTo := new(session.TokenLifetime) require.NoError(t, lifetimeTo.Unmarshal(wire)) require.Equal(t, lifetimeFrom, lifetimeTo) }) }
explode_data.jsonl/79970
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 140 }
[ 2830, 3393, 3323, 74579, 70645, 480, 55438, 1155, 353, 8840, 836, 8, 341, 8810, 28515, 3830, 1669, 6923, 74579, 7, 16, 15, 11, 220, 17, 15, 11, 220, 18, 15, 692, 3244, 16708, 445, 6280, 4287, 497, 2915, 1155, 353, 8840, 836, 8, 34...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNumberReadyStatus(t *testing.T) { for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy manager, podControl, clientset, err := newTestController(ds) if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } var updated *apps.DaemonSet clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { if action.GetSubresource() != "status" { return false, nil, nil } if u, ok := action.(core.UpdateAction); ok { updated = u.GetObject().(*apps.DaemonSet) } return false, nil, nil }) addNodes(manager.nodeStore, 0, 2, simpleNodeLabel) addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) manager.dsStore.Add(ds) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) if updated.Status.NumberReady != 0 { t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) } selector, _ := metav1.LabelSelectorAsSelector(ds.Spec.Selector) daemonPods, _ := manager.podLister.Pods(ds.Namespace).List(selector) for _, pod := range daemonPods { condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} pod.Status.Conditions = append(pod.Status.Conditions, condition) } syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) if updated.Status.NumberReady != 2 { t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) } } }
explode_data.jsonl/50333
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 604 }
[ 2830, 3393, 2833, 19202, 2522, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 8282, 1669, 2088, 2647, 2580, 69388, 368, 341, 197, 83336, 1669, 501, 89177, 1649, 445, 7975, 1138, 197, 83336, 36473, 16689, 19816, 284, 353, 61914, 198, 197, 922...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestAddsHashToDeploymentSpecForEnvFrom(t *testing.T) { t.Parallel() deploymentSpec := apps_v1.Deployment{ TypeMeta: meta_v1.TypeMeta{ Kind: "Deployment", APIVersion: apps_v1.SchemeGroupVersion.String(), }, ObjectMeta: meta_v1.ObjectMeta{ Namespace: testNs, }, Spec: apps_v1.DeploymentSpec{ Template: core_v1.PodTemplateSpec{ Spec: core_v1.PodSpec{ Containers: []core_v1.Container{ core_v1.Container{}, // empty EnvFrom core_v1.Container{ EnvFrom: []core_v1.EnvFromSource{ core_v1.EnvFromSource{ SecretRef: &core_v1.SecretEnvSource{ LocalObjectReference: core_v1.LocalObjectReference{ Name: "secret1", }, }, }, }, }, core_v1.Container{ EnvFrom: []core_v1.EnvFromSource{ core_v1.EnvFromSource{ SecretRef: &core_v1.SecretEnvSource{ LocalObjectReference: core_v1.LocalObjectReference{ Name: "secret1", }, }, }, core_v1.EnvFromSource{ SecretRef: &core_v1.SecretEnvSource{ LocalObjectReference: core_v1.LocalObjectReference{ Name: "secret2", }, }, }, core_v1.EnvFromSource{ ConfigMapRef: &core_v1.ConfigMapEnvSource{ LocalObjectReference: core_v1.LocalObjectReference{ Name: "configmap1", }, }, }, }, }, }, }, }, }, } spec := runtimeToUnstructured(t, &deploymentSpec) store := speccheckertesting.FakeStore{ Namespace: testNs, Responses: map[string]runtime.Object{ "secret1": &core_v1.Secret{ TypeMeta: meta_v1.TypeMeta{ Kind: "Secret", APIVersion: "v1", }, ObjectMeta: meta_v1.ObjectMeta{ Name: "secret1", Namespace: testNs, }, Data: map[string][]byte{ "parameters": []byte(`{ "secretEnvVars": { "a": "1", "b": "2" } }`), }, }, "secret2": &core_v1.Secret{ TypeMeta: meta_v1.TypeMeta{ Kind: "Secret", APIVersion: "v1", }, ObjectMeta: meta_v1.ObjectMeta{ Name: "secret2", Namespace: testNs, }, Data: map[string][]byte{ "parameters": []byte(`{ "iamRole": "some-role" } }`), }, }, "configmap1": &core_v1.ConfigMap{ TypeMeta: meta_v1.TypeMeta{ Kind: "ConfigMap", APIVersion: "v1", }, ObjectMeta: meta_v1.ObjectMeta{ Name: "configmap1", Namespace: testNs, }, Data: map[string]string{ "a": "b", "c": "d", }, }, }, } logger := zaptest.NewLogger(t) defer logger.Sync() // nolint: errcheck updatedSpec, err := deployment{}.BeforeCreate(&specchecker.Context{Logger: logger, Store: store}, spec) require.NoError(t, err) deploymentCheck := updatedSpec.(*apps_v1.Deployment) require.Contains(t, deploymentCheck.Spec.Template.Annotations, EnvRefHashAnnotation) assert.NotEmpty(t, deploymentCheck.Spec.Template.Annotations[EnvRefHashAnnotation]) }
explode_data.jsonl/78677
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1614 }
[ 2830, 3393, 72111, 6370, 1249, 75286, 8327, 2461, 14359, 3830, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 197, 82213, 8327, 1669, 10500, 2273, 16, 34848, 39130, 515, 197, 27725, 12175, 25, 8823, 2273, 16, 10184, 12175, 515, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_Domain_WhenPropertiesConverted_RoundTripsWithoutLoss(t *testing.T) { t.Parallel() parameters := gopter.DefaultTestParameters() parameters.MaxSize = 10 properties := gopter.NewProperties(parameters) properties.Property( "Round trip from Domain to Domain via AssignPropertiesToDomain & AssignPropertiesFromDomain returns original", prop.ForAll(RunPropertyAssignmentTestForDomain, DomainGenerator())) properties.TestingRun(t, gopter.NewFormatedReporter(false, 240, os.Stdout)) }
explode_data.jsonl/39606
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 151 }
[ 2830, 3393, 1557, 3121, 62, 4498, 7903, 61941, 2568, 795, 21884, 1690, 26040, 39838, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 67543, 1669, 728, 73137, 13275, 2271, 9706, 741, 67543, 14535, 1695, 284, 220, 16, 15, 198, 86928...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDisableNameSuffixHash(t *testing.T) { th := kusttest_test.NewKustTestHarness(t, "/whatever") th.WriteK("/whatever/", kustomizationContent) th.WriteF("/whatever/deployment.yaml", deploymentContent) th.WriteF("/whatever/namespace.yaml", namespaceContent) th.WriteF("/whatever/jsonpatch.json", jsonpatchContent) m, err := th.MakeKustTarget().MakeCustomizedResMap() if err != nil { t.Fatalf("unexpected Resources error %v", err) } secret := findSecret(m) if secret == nil { t.Errorf("Expected to find a Secret") } if secret.GetName() != "foo-secret-bar-9btc7bt4kb" { t.Errorf("unexpected secret resource name: %s", secret.GetName()) } th.WriteK("/whatever/", strings.Replace(kustomizationContent, "disableNameSuffixHash: false", "disableNameSuffixHash: true", -1)) m, err = th.MakeKustTarget().MakeCustomizedResMap() if err != nil { t.Fatalf("unexpected Resources error %v", err) } secret = findSecret(m) if secret == nil { t.Errorf("Expected to find a Secret") } if secret.GetName() != "foo-secret-bar" { // No hash at end. t.Errorf("unexpected secret resource name: %s", secret.GetName()) } }
explode_data.jsonl/78143
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 427 }
[ 2830, 3393, 25479, 675, 40177, 6370, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 595, 590, 1944, 4452, 7121, 42, 590, 2271, 74248, 1155, 11, 3521, 68286, 1138, 70479, 4073, 42, 4283, 68286, 28105, 595, 1450, 2022, 2762, 340, 70479, 4073,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestFuncDecodeReturns(t *testing.T) { t.Parallel() tests := []struct { Func core.Func Output []byte Returns []interface{} WantReturns []interface{} }{ { Func: MustNewFunc("test()", "address"), Output: B("0x000000000000000000000000000000000000000000000000000000000000c0fe"), Returns: []interface{}{new(common.Address)}, WantReturns: []interface{}{APtr("0x000000000000000000000000000000000000c0Fe")}, }, { Func: MustNewFunc("test()", "uint256"), Output: B("0x000000000000000000000000000000000000000000000000000000000000002a"), Returns: []interface{}{new(big.Int)}, WantReturns: []interface{}{big.NewInt(42)}, }, { Func: MustNewFunc("test()", "bool"), Output: B("0x0000000000000000000000000000000000000000000000000000000000000001"), Returns: []interface{}{boolPtr(false)}, WantReturns: []interface{}{boolPtr(true)}, }, { Func: MustNewFunc("test()", "bytes32"), Output: B("0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"), Returns: []interface{}{&[32]byte{}}, WantReturns: []interface{}{&[32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}}, }, { Func: MustNewFunc("test()", "bytes32"), Output: B("0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"), Returns: []interface{}{new(common.Hash)}, WantReturns: []interface{}{hashPtr(H("0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"))}, }, { Func: MustNewFunc("test()", "bytes"), Output: B("0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000030102030000000000000000000000000000000000000000000000000000000000"), Returns: []interface{}{&[]byte{}}, WantReturns: []interface{}{&[]byte{1, 2, 3}}, }, } for i, test := range tests { t.Run(strconv.Itoa(i), func(t *testing.T) { if err := test.Func.DecodeReturns(test.Output, test.Returns...); err != nil { t.Fatalf("Failed to decode returns: %v", err) } if diff := cmp.Diff(test.WantReturns, test.Returns, cmp.AllowUnexported(big.Int{})); diff != "" { t.Fatalf("(-want, +got)\n%s", diff) } }) } }
explode_data.jsonl/68056
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1016 }
[ 2830, 3393, 9626, 32564, 16446, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 78216, 1669, 3056, 1235, 341, 197, 197, 9626, 286, 6200, 69845, 198, 197, 80487, 414, 3056, 3782, 198, 197, 76086, 257, 3056, 4970, 16094, 197, 17300...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGetPodContainerID(t *testing.T) { fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "foo1", Image: "busybox", }, { Name: "foo2", Image: "busybox", }, }, }, } // Set fake sandbox and fake containers to fakeRuntime. fakeSandbox, _ := makeAndSetFakePod(t, m, fakeRuntime, pod) // Convert fakeSandbox to kubecontainer.Container sandbox, err := m.sandboxToKubeContainer(&runtimeapi.PodSandbox{ Id: fakeSandbox.Id, Metadata: fakeSandbox.Metadata, State: fakeSandbox.State, CreatedAt: fakeSandbox.CreatedAt, Labels: fakeSandbox.Labels, }) assert.NoError(t, err) expectedPod := &kubecontainer.Pod{ ID: pod.UID, Name: pod.Name, Namespace: pod.Namespace, Containers: []*kubecontainer.Container{}, Sandboxes: []*kubecontainer.Container{sandbox}, } actual, err := m.GetPodContainerID(expectedPod) assert.Equal(t, fakeSandbox.Id, actual.ID) }
explode_data.jsonl/14237
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 520 }
[ 2830, 3393, 1949, 23527, 4502, 915, 1155, 353, 8840, 836, 8, 341, 1166, 726, 15123, 11, 8358, 296, 11, 1848, 1669, 1855, 2271, 15123, 2043, 741, 6948, 35699, 1155, 11, 1848, 692, 3223, 347, 1669, 609, 85, 16, 88823, 515, 197, 23816, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetBadSignature(t *testing.T) { mb, err := mailbox.Create("get.badtoken") if err != nil { t.Fatal(err) } mb.PutMessage("TEST") req := api.GetMessageRequest{Mailbox: mb.Id} var resp api.GetMessageResponse code := doRequest(t, req, &resp, "get") if code == 200 { t.Fatal("Bad token should respond with an error") } }
explode_data.jsonl/15710
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 131 }
[ 2830, 3393, 1949, 17082, 25088, 1155, 353, 8840, 836, 8, 341, 2109, 65, 11, 1848, 1669, 45742, 7251, 445, 455, 31563, 5839, 1138, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 532, 2109, 65, 39825, 2052, 445, 10033, 113...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestBenchlistMaxStake(t *testing.T) { vdrs := validators.NewSet() vdr0 := validators.GenerateRandomValidator(1000) vdr1 := validators.GenerateRandomValidator(1000) vdr2 := validators.GenerateRandomValidator(1000) vdr3 := validators.GenerateRandomValidator(2000) vdr4 := validators.GenerateRandomValidator(100) // Total weight is 5100 errs := wrappers.Errs{} errs.Add( vdrs.AddWeight(vdr0.ID(), vdr0.Weight()), vdrs.AddWeight(vdr1.ID(), vdr1.Weight()), vdrs.AddWeight(vdr2.ID(), vdr2.Weight()), vdrs.AddWeight(vdr3.ID(), vdr3.Weight()), vdrs.AddWeight(vdr4.ID(), vdr4.Weight()), ) if errs.Errored() { t.Fatal(errs.Err) } threshold := 3 duration := 1 * time.Hour // Shouldn't bench more than 2550 (5100/2) maxPortion := 0.5 benchIntf, err := NewBenchlist( logging.NoLog{}, vdrs, threshold, minimumFailingDuration, duration, maxPortion, "", prometheus.NewRegistry(), ) if err != nil { t.Fatal(err) } b := benchIntf.(*benchlist) defer b.timer.Stop() now := time.Now() b.clock.Set(now) // Register [threshold-1] failures for 3 validators for _, vdr := range []validators.Validator{vdr0, vdr1, vdr2} { for i := 0; i < threshold-1; i++ { b.RegisterFailure(vdr.ID()) } } // Advance the time to past the minimum failing duration newTime := now.Add(minimumFailingDuration).Add(time.Second) b.lock.Lock() b.clock.Set(newTime) b.lock.Unlock() // Register another failure for all three for _, vdr := range []validators.Validator{vdr0, vdr1, vdr2} { b.RegisterFailure(vdr.ID()) } // Only vdr0 and vdr1 should be benched (total weight 2000) // Benching vdr2 (weight 1000) would cause the amount benched // to exceed the maximum b.lock.Lock() assert.True(t, b.isBenched(vdr0.ID())) assert.True(t, b.isBenched(vdr1.ID())) assert.False(t, b.isBenched(vdr2.ID())) assert.Equal(t, b.benchedQueue.Len(), 2) assert.Equal(t, b.benchlistSet.Len(), 2) assert.Len(t, b.failureStreaks, 1) fs := b.failureStreaks[vdr2.ID()] fs.consecutive = threshold fs.firstFailure = now b.lock.Unlock() // Register threshold - 1 failures for vdr4 for i := 0; i < threshold-1; i++ { b.RegisterFailure(vdr4.ID()) } // Advance the time past min failing duration newTime2 := newTime.Add(minimumFailingDuration).Add(time.Second) b.lock.Lock() b.clock.Set(newTime2) b.lock.Unlock() // Register another failure for vdr4 b.RegisterFailure(vdr4.ID()) // vdr4 should be benched now b.lock.Lock() assert.True(t, b.isBenched(vdr0.ID())) assert.True(t, b.isBenched(vdr1.ID())) assert.True(t, b.isBenched(vdr4.ID())) assert.Equal(t, 3, b.benchedQueue.Len()) assert.Equal(t, 3, b.benchlistSet.Len()) assert.Contains(t, b.benchlistSet, vdr0.ID()) assert.Contains(t, b.benchlistSet, vdr1.ID()) assert.Contains(t, b.benchlistSet, vdr4.ID()) assert.Len(t, b.failureStreaks, 1) // for vdr2 b.lock.Unlock() // More failures for vdr2 shouldn't add it to the bench // because the max bench amount would be exceeded for i := 0; i < threshold-1; i++ { b.RegisterFailure(vdr2.ID()) } b.lock.Lock() assert.True(t, b.isBenched(vdr0.ID())) assert.True(t, b.isBenched(vdr1.ID())) assert.True(t, b.isBenched(vdr4.ID())) assert.False(t, b.isBenched(vdr2.ID())) assert.Equal(t, 3, b.benchedQueue.Len()) assert.Equal(t, 3, b.benchlistSet.Len()) assert.Len(t, b.failureStreaks, 1) assert.Contains(t, b.failureStreaks, vdr2.ID()) // Ensure the benched queue root has the min end time minEndTime := b.benchedQueue[0].benchedUntil benchedIDs := []ids.ShortID{vdr0.ID(), vdr1.ID(), vdr4.ID()} for _, benchedVdr := range b.benchedQueue { assert.Contains(t, benchedIDs, benchedVdr.validatorID) assert.True(t, !benchedVdr.benchedUntil.Before(minEndTime)) } b.lock.Unlock() }
explode_data.jsonl/25538
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1575 }
[ 2830, 3393, 33, 19762, 1607, 5974, 623, 726, 1155, 353, 8840, 836, 8, 341, 5195, 93246, 1669, 38588, 7121, 1649, 741, 5195, 3612, 15, 1669, 38588, 57582, 13999, 14256, 7, 16, 15, 15, 15, 340, 5195, 3612, 16, 1669, 38588, 57582, 13999,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestInit(t *testing.T) { for _, k := range expectedRegisteredBackends { _, found := backend.Functions[k] if !found { t.Errorf("registered backend expected but not found: '%v'", k) } } }
explode_data.jsonl/36216
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 77 }
[ 2830, 3393, 3803, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 595, 1669, 2088, 3601, 41430, 3707, 1412, 341, 197, 197, 6878, 1730, 1669, 19163, 30547, 82, 6732, 921, 197, 743, 753, 15105, 341, 298, 3244, 13080, 445, 34909, 19163, 3601, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
3
func TestTracedError_Unwrap(t *testing.T) { tracedErrWithoutSubErr := New("error without sub-error") tracedErrWithBuiltInSubErr := Wrap(errors.New("built-in sub-error"), "error with built-in sub-error") tracedErrWithTracedSubErr := Wrap(tracedErrWithBuiltInSubErr, "error with traced sub-error") errs := []*tracedError{ tracedErrWithoutSubErr, tracedErrWithBuiltInSubErr, tracedErrWithTracedSubErr, } for _, err := range errs { unwrapped := err.Unwrap() isEqual := errors.Is(unwrapped, err.subErr) if !isEqual { t.Errorf("\nexpected: %v\nactual: %v", err.subErr, unwrapped) } } }
explode_data.jsonl/42772
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 252 }
[ 2830, 3393, 1282, 4435, 1454, 40687, 10097, 1155, 353, 8840, 836, 8, 341, 25583, 4435, 7747, 26040, 3136, 7747, 1669, 1532, 445, 841, 2041, 1186, 18917, 1138, 25583, 4435, 7747, 2354, 54300, 641, 3136, 7747, 1669, 42187, 38881, 7121, 445,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestStrategicMergePatch(t *testing.T) { testStrategicMergePatchWithCustomArgumentsUsingStruct(t, "bad struct", "{}", "{}", []byte("<THIS IS NOT A STRUCT>"), mergepatch.ErrBadArgKind(struct{}{}, []byte{})) mergeItemOpenapiSchema := PatchMetaFromOpenAPI{ Schema: sptest.GetSchemaOrDie(&fakeMergeItemSchema, "mergeItem"), } schemas := []LookupPatchMeta{ mergeItemStructSchema, mergeItemOpenapiSchema, } tc := StrategicMergePatchTestCases{} err := yaml.Unmarshal(createStrategicMergePatchTestCaseData, &tc) if err != nil { t.Errorf("can't unmarshal test cases: %s\n", err) return } for _, schema := range schemas { testStrategicMergePatchWithCustomArguments(t, "bad original", "<THIS IS NOT JSON>", "{}", schema, mergepatch.ErrBadJSONDoc) testStrategicMergePatchWithCustomArguments(t, "bad patch", "{}", "<THIS IS NOT JSON>", schema, mergepatch.ErrBadJSONDoc) testStrategicMergePatchWithCustomArguments(t, "nil struct", "{}", "{}", nil, mergepatch.ErrBadArgKind(struct{}{}, nil)) for _, c := range tc.TestCases { testTwoWayPatch(t, c, schema) testThreeWayPatch(t, c, schema) } // run multiple times to exercise different map traversal orders for i := 0; i < 10; i++ { for _, c := range strategicMergePatchRawTestCases { testTwoWayPatchForRawTestCase(t, c, schema) testThreeWayPatchForRawTestCase(t, c, schema) } } } }
explode_data.jsonl/3510
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 536 }
[ 2830, 3393, 2580, 89367, 52096, 43622, 1155, 353, 8840, 836, 8, 341, 18185, 2580, 89367, 52096, 43622, 2354, 10268, 19139, 16429, 9422, 1155, 11, 330, 13855, 2036, 756, 197, 197, 1, 42351, 35503, 497, 3056, 3782, 9639, 35912, 3424, 4183, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestUnexportedExecuteQuery(t *testing.T) { defer func() { client = nil logger = nil }() Convey("Given a host and query", t, func() { host := "testhost" query := "testquery" Convey("When executeQuery is called with no client or logger established", func() { _, err := executeQuery(host, query) Convey("Then an error should be returned", func() { So(err, ShouldNotBeNil) }) }) }) }
explode_data.jsonl/9667
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 156 }
[ 2830, 3393, 1806, 1533, 291, 17174, 2859, 1155, 353, 8840, 836, 8, 341, 16867, 2915, 368, 341, 197, 25291, 284, 2092, 198, 197, 17060, 284, 2092, 198, 197, 69826, 93070, 5617, 445, 22043, 264, 3468, 323, 3239, 497, 259, 11, 2915, 368,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSvcStatus_Ask(t *testing.T) { mockError := errors.New("some error") testCases := map[string]struct { inputApp string inputSvc string inputEnvironment string mockSelector func(m *mocks.MockdeploySelector) wantedError error }{ "errors if failed to select application": { mockSelector: func(m *mocks.MockdeploySelector) { m.EXPECT().Application(svcStatusAppNamePrompt, svcStatusAppNameHelpPrompt).Return("", mockError) }, wantedError: fmt.Errorf("select application: some error"), }, "errors if failed to select deployed service": { inputApp: "mockApp", mockSelector: func(m *mocks.MockdeploySelector) { m.EXPECT().DeployedService(svcStatusNamePrompt, svcStatusNameHelpPrompt, "mockApp", gomock.Any(), gomock.Any()). Return(nil, mockError) }, wantedError: fmt.Errorf("select deployed services for application mockApp: some error"), }, "success": { inputApp: "mockApp", inputSvc: "mockSvc", inputEnvironment: "mockEnv", mockSelector: func(m *mocks.MockdeploySelector) { m.EXPECT().DeployedService(svcStatusNamePrompt, svcStatusNameHelpPrompt, "mockApp", gomock.Any(), gomock.Any()). Return(&selector.DeployedService{ Env: "mockEnv", Svc: "mockSvc", }, nil) }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockSelector := mocks.NewMockdeploySelector(ctrl) tc.mockSelector(mockSelector) svcStatus := &svcStatusOpts{ svcStatusVars: svcStatusVars{ svcName: tc.inputSvc, envName: tc.inputEnvironment, appName: tc.inputApp, }, sel: mockSelector, } // WHEN err := svcStatus.Ask() // THEN if tc.wantedError != nil { require.EqualError(t, err, tc.wantedError.Error()) } else { require.NoError(t, err) } }) } }
explode_data.jsonl/47141
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 839 }
[ 2830, 3393, 92766, 2522, 1566, 4886, 1155, 353, 8840, 836, 8, 341, 77333, 1454, 1669, 5975, 7121, 445, 14689, 1465, 1138, 18185, 37302, 1669, 2415, 14032, 60, 1235, 341, 197, 22427, 2164, 260, 914, 198, 197, 22427, 92766, 260, 914, 198,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCascadingDeletion(t *testing.T) { stopCh := make(chan struct{}) glog.V(6).Infof("TestCascadingDeletion starts") defer glog.V(6).Infof("TestCascadingDeletion ends") s, closeFn, gc, clientSet := setup(t, stopCh) defer func() { // We have to close the stop channel first, so the shared informers can terminate their watches; // otherwise closeFn() will hang waiting for active client connections to finish. close(stopCh) closeFn() }() ns := framework.CreateTestingNamespace("gc-cascading-deletion", s, t) defer framework.DeleteTestingNamespace(ns, s, t) rcClient := clientSet.Core().ReplicationControllers(ns.Name) podClient := clientSet.Core().Pods(ns.Name) toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name)) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } remainingRC, err := rcClient.Create(newOwnerRC(remainingRCName, ns.Name)) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } rcs, err := rcClient.List(metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list replication controllers: %v", err) } if len(rcs.Items) != 2 { t.Fatalf("Expect only 2 replication controller") } // this pod should be cascadingly deleted. pod := newPod(garbageCollectedPodName, ns.Name, []metav1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}}) _, err = podClient.Create(pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } // this pod shouldn't be cascadingly deleted, because it has a valid reference. pod = newPod(oneValidOwnerPodName, ns.Name, []metav1.OwnerReference{ {UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}, {UID: remainingRC.ObjectMeta.UID, Name: remainingRCName}, }) _, err = podClient.Create(pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } // this pod shouldn't be cascadingly deleted, because it doesn't have an owner. pod = newPod(independentPodName, ns.Name, []metav1.OwnerReference{}) _, err = podClient.Create(pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } // set up watch pods, err := podClient.List(metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list pods: %v", err) } if len(pods.Items) != 3 { t.Fatalf("Expect only 3 pods") } go gc.Run(5, stopCh) // delete one of the replication controller if err := rcClient.Delete(toBeDeletedRCName, getNonOrphanOptions()); err != nil { t.Fatalf("failed to delete replication controller: %v", err) } // sometimes the deletion of the RC takes long time to be observed by // the gc, so wait for the garbage collector to observe the deletion of // the toBeDeletedRC if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { return !gc.GraphHasUID([]types.UID{toBeDeletedRC.ObjectMeta.UID}), nil }); err != nil { t.Fatal(err) } if err := integration.WaitForPodToDisappear(podClient, garbageCollectedPodName, 5*time.Second, 30*time.Second); err != nil { t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err) } // checks the garbage collect doesn't delete pods it shouldn't delete. if _, err := podClient.Get(independentPodName, metav1.GetOptions{}); err != nil { t.Fatal(err) } if _, err := podClient.Get(oneValidOwnerPodName, metav1.GetOptions{}); err != nil { t.Fatal(err) } }
explode_data.jsonl/37641
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1215 }
[ 2830, 3393, 34, 5061, 2228, 1912, 52625, 1155, 353, 8840, 836, 8, 341, 62644, 1143, 1669, 1281, 35190, 2036, 6257, 692, 97130, 5058, 7, 21, 568, 1731, 69, 445, 2271, 34, 5061, 2228, 1912, 52625, 8471, 1138, 16867, 342, 839, 5058, 7, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_waitForTextFile(t *testing.T) { t.Run("Times out for non-existent filename", func(t *testing.T) { bytes, err := waitForTextFile("path/to/non-existent/file", time.After(0)) assert.Error(t, err) assert.Equal(t, err.Error(), "Operation waitForTextFile timed out.") assert.Nil(t, bytes) }) t.Run("Returns bytes for eventually existent filename", func(t *testing.T) { file_to_exist, _ := ioutil.TempFile("", "existent-file") file_to_exist_name := file_to_exist.Name() os.Remove(file_to_exist_name) go func() { ioutil.WriteFile(file_to_exist_name, []byte("some random stuff"), 0600) }() defer os.Remove(file_to_exist_name) bytes, err := waitForTextFile(file_to_exist_name, nil) assert.NoError(t, err) assert.Equal(t, "some random stuff", string(bytes)) }) }
explode_data.jsonl/12767
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 312 }
[ 2830, 3393, 18760, 2461, 1178, 1703, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 18889, 700, 369, 2477, 59828, 3899, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 70326, 11, 1848, 1669, 52223, 1178, 1703, 445, 2343, 32429, 91130, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMakeStructFieldDescriptorRWSet(t *testing.T) { fun := newBuiltinFunction("TestMakeStructFieldDescriptorRW_set", func(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) { if raised := checkMethodArgs(f, "TestMakeStructFieldDescriptorRW_set", args, TypeType, StrType, StrType, ObjectType, ObjectType); raised != nil { return nil, raised } t := toTypeUnsafe(args[0]) desc := makeStructFieldDescriptor(t, toStrUnsafe(args[1]).Value(), toStrUnsafe(args[2]).Value(), fieldDescriptorRW) set, raised := GetAttr(f, desc, NewStr("__set__"), nil) if raised != nil { return nil, raised } return set.Call(f, wrapArgs(args[3], args[4]), nil) }).ToObject() cases := []invokeTestCase{ {args: wrapArgs(FileType, "Softspace", "softspace", newObject(FileType), NewInt(0).ToObject()), want: None}, {args: wrapArgs(FileType, "Softspace", "softspace", newObject(FileType), NewInt(0)), want: None}, {args: wrapArgs(FileType, "Softspace", "softspace", newObject(FileType), "wrong"), wantExc: mustCreateException(TypeErrorType, "an int is required")}, {args: wrapArgs(FileType, "Softspace", "softspace", 42, NewInt(0)), wantExc: mustCreateException(TypeErrorType, "descriptor 'softspace' for 'file' objects doesn't apply to 'int' objects")}, } for _, cas := range cases { if err := runInvokeTestCase(fun, &cas); err != "" { t.Error(err) } } }
explode_data.jsonl/79896
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 494 }
[ 2830, 3393, 8078, 9422, 1877, 11709, 56368, 1649, 1155, 353, 8840, 836, 8, 341, 90126, 1669, 501, 33, 25628, 5152, 445, 2271, 8078, 9422, 1877, 11709, 56368, 2602, 497, 2915, 955, 353, 4369, 11, 2827, 17693, 11, 16494, 71915, 4117, 8, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestCmaEsChol(t *testing.T) { for i, test := range cmaTestCases() { src := rand.New(rand.NewSource(1)) method := test.method method.Src = src initX := test.initX if initX == nil { initX = make([]float64, test.dim) } // Run and check that the expected termination occurs. result, err := Minimize(test.problem, initX, test.settings, method) if testErr := test.good(result, err, test.settings.Concurrent); testErr != nil { t.Errorf("cas %d: %v", i, testErr) } // Run a second time to make sure there are no residual effects result, err = Minimize(test.problem, initX, test.settings, method) if testErr := test.good(result, err, test.settings.Concurrent); testErr != nil { t.Errorf("cas %d second: %v", i, testErr) } // Test the problem in parallel. test.settings.Concurrent = 5 result, err = Minimize(test.problem, initX, test.settings, method) if testErr := test.good(result, err, test.settings.Concurrent); testErr != nil { t.Errorf("cas %d concurrent: %v", i, testErr) } test.settings.Concurrent = 0 } }
explode_data.jsonl/41758
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 405 }
[ 2830, 3393, 34, 1728, 17360, 1143, 337, 1155, 353, 8840, 836, 8, 341, 2023, 600, 11, 1273, 1669, 2088, 272, 1728, 2271, 37302, 368, 341, 197, 41144, 1669, 10382, 7121, 37595, 7121, 3608, 7, 16, 1171, 197, 42257, 1669, 1273, 12908, 198...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestPVCControlUpdatePVCConflictSuccess(t *testing.T) { g := NewGomegaWithT(t) tc := newTidbCluster() pvc := newPVC(tc) pvc.Annotations = map[string]string{"a": "b"} oldPVC := newPVC(tc) fakeClient, pvcLister, pvcIndexer, recorder := newFakeClientAndRecorder() pvcIndexer.Add(oldPVC) control := NewRealPVCControl(fakeClient, recorder, pvcLister) conflict := false fakeClient.AddReactor("update", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) { update := action.(core.UpdateAction) if !conflict { conflict = true return true, oldPVC, apierrors.NewConflict(action.GetResource().GroupResource(), pvc.Name, errors.New("conflict")) } return true, update.GetObject(), nil }) updatePVC, err := control.UpdatePVC(tc, pvc) g.Expect(err).To(Succeed()) g.Expect(updatePVC.Annotations["a"]).To(Equal("b")) }
explode_data.jsonl/66725
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 333 }
[ 2830, 3393, 47, 11287, 3273, 4289, 47, 11287, 57974, 7188, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 1532, 38, 32696, 2354, 51, 1155, 340, 78255, 1669, 501, 51, 307, 65, 28678, 741, 3223, 7362, 1669, 501, 47, 11287, 44415, 340, 3223, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestWithLSB(t *testing.T) { tests := []struct { byte byte bit bool want byte }{ {byte: 0b00000000, bit: false, want: 0b00000000}, {byte: 0b00000000, bit: true, want: 0b00000001}, {byte: 0b11111111, bit: true, want: 0b11111111}, {byte: 0b11111111, bit: false, want: 0b11111110}, } for _, tt := range tests { name := fmt.Sprintf("setting LSB of %08b to %t should be %08b", tt.byte, tt.bit, tt.want) t.Run(name, func(t *testing.T) { got := WithLSB(tt.byte, tt.bit) assert.Equal(t, tt.want, got, "WithLSB() = %v, want %v", got, tt.want) }) } }
explode_data.jsonl/78675
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 264 }
[ 2830, 3393, 2354, 7268, 33, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 31422, 4922, 198, 197, 79980, 220, 1807, 198, 197, 50780, 4922, 198, 197, 59403, 197, 197, 90, 3782, 25, 220, 15, 65, 15, 15, 15, 15, 15,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCopyFKCheck(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop(context.TODO()) db.SetMaxOpenConns(1) r := sqlutils.MakeSQLRunner(db) r.Exec(t, ` CREATE DATABASE d; SET DATABASE = d; CREATE TABLE p (p INT PRIMARY KEY); CREATE TABLE t ( a INT PRIMARY KEY, p INT REFERENCES p(p) ); SET experimental_optimizer_foreign_keys = true; `) txn, err := db.Begin() if err != nil { t.Fatal(err) } defer func() { _ = txn.Rollback() }() stmt, err := txn.Prepare(pq.CopyIn("t", "a", "p")) if err != nil { t.Fatal(err) } _, err = stmt.Exec(1, 1) if err != nil { t.Fatal(err) } err = stmt.Close() if !testutils.IsError(err, "foreign key violation|violates foreign key constraint") { t.Fatalf("expected FK error, got: %v", err) } }
explode_data.jsonl/2952
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 391 }
[ 2830, 3393, 12106, 26998, 3973, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 2822, 25856, 11, 716, 1669, 7032, 7251, 2271, 5475, 4870, 741, 1903, 11, 2927, 11, 716, 1669, 3538, 6031, 12101, 5475, 1155, 11, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFxVerifyTransfer(t *testing.T) { vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.CLK.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { t.Fatal(err) } if err := fx.VerifyTransfer(nil, nil, nil, nil); err == nil { t.Fatalf("this Fx doesn't support transfers") } }
explode_data.jsonl/56464
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 180 }
[ 2830, 3393, 81856, 32627, 21970, 1155, 353, 8840, 836, 8, 341, 54879, 1669, 511, 4672, 17, 20, 21, 74, 16, 8298, 8787, 11187, 515, 197, 197, 36913, 25, 13482, 34607, 7121, 3675, 3148, 197, 24201, 25, 256, 8392, 16766, 2201, 38837, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestSimpleCaseWithMap(t *testing.T) { obj := &generated.SimpleWithMap{ FieldA: 1, FieldB: "test", FieldC: map[string]string{ "test1": "test1", }, } testSerialization(t, obj) }
explode_data.jsonl/48808
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 85 }
[ 2830, 3393, 16374, 4207, 2354, 2227, 1155, 353, 8840, 836, 8, 341, 22671, 1669, 609, 16187, 24252, 2354, 2227, 515, 197, 94478, 32, 25, 220, 16, 345, 197, 94478, 33, 25, 330, 1944, 756, 197, 94478, 34, 25, 2415, 14032, 30953, 515, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetRequestCurrencyPairFormat(t *testing.T) { cfg := GetConfig() err := cfg.LoadConfig(ConfigTestFile) if err != nil { t.Errorf( "Test failed. TestGetRequestCurrencyPairFormat. LoadConfig Error: %s", err.Error(), ) } _, err = cfg.GetRequestCurrencyPairFormat("asdasdasd") if err == nil { t.Errorf( "Test failed. TestGetRequestCurrencyPairFormat. Non-existent exchange returned nil error", ) } exchFmt, err := cfg.GetRequestCurrencyPairFormat("Liqui") if exchFmt.Uppercase || exchFmt.Delimiter != "_" || exchFmt.Separator != "-" { t.Errorf( "Test failed. TestGetRequestCurrencyPairFormat. Invalid values", ) } }
explode_data.jsonl/21895
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 253 }
[ 2830, 3393, 1949, 1900, 26321, 12443, 4061, 1155, 353, 8840, 836, 8, 341, 50286, 1669, 2126, 2648, 741, 9859, 1669, 13286, 13969, 2648, 33687, 2271, 1703, 340, 743, 1848, 961, 2092, 341, 197, 3244, 13080, 1006, 298, 197, 1, 2271, 4641, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestBlkioSetMultipleWeightDevice(t *testing.T) { helper := NewCgroupTestUtil("blkio", t) defer helper.cleanup() const ( weightDeviceBefore = "8:0 400" ) wd1 := configs.NewWeightDevice(8, 0, 500, 0) wd2 := configs.NewWeightDevice(8, 16, 500, 0) // we cannot actually set and check both because normal ioutil.WriteFile // when writing to cgroup file will overwrite the whole file content instead // of updating it as the kernel is doing. Just check the second device // is present will suffice for the test to ensure multiple writes are done. weightDeviceAfter := wd2.WeightString() blkio := &BlkioGroup{} blkio.detectWeightFilenames(helper.CgroupPath) if blkio.weightDeviceFilename != "blkio.bfq.weight_device" { t.Fatalf("when blkio controller is unavailable, expected to use \"blkio.bfq.weight_device\", tried to use %q", blkio.weightDeviceFilename) } helper.writeFileContents(map[string]string{ blkio.weightDeviceFilename: weightDeviceBefore, }) helper.CgroupData.config.Resources.BlkioWeightDevice = []*configs.WeightDevice{wd1, wd2} if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config.Resources); err != nil { t.Fatal(err) } value, err := fscommon.GetCgroupParamString(helper.CgroupPath, blkio.weightDeviceFilename) if err != nil { t.Fatal(err) } if value != weightDeviceAfter { t.Fatalf("Got the wrong value, set %s failed.", blkio.weightDeviceFilename) } }
explode_data.jsonl/45835
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 492 }
[ 2830, 3393, 4923, 74, 815, 1649, 32089, 8295, 6985, 1155, 353, 8840, 836, 8, 341, 9598, 2947, 1669, 1532, 34, 4074, 2271, 2742, 445, 34989, 815, 497, 259, 340, 16867, 13137, 87689, 2822, 4777, 2399, 197, 197, 4765, 6985, 10227, 284, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestWithPrometheus(t *testing.T) { type prometheusTest struct { method string argument []interface{} } pt := func(method string, args ...interface{}) prometheusTest { return prometheusTest{ method: method, argument: args, } } testCases := []struct { name string actualResult func() (*reporters.MockPrometheus, []prometheusTest) }{ { name: "test prometheus middleware for success", actualResult: func() (*reporters.MockPrometheus, []prometheusTest) { w := httptest.NewRecorder() r, err := http.NewRequest(http.MethodGet, "/random", nil) require.NoError(t, err) th := func(resp http.ResponseWriter, req *http.Request) { resp.WriteHeader(http.StatusOK) } mockPrometheus := &reporters.MockPrometheus{} mockPrometheus.On("ReportAttempt", "random") mockPrometheus.On("ReportSuccess", "random") mockPrometheus.On("Observe", "random", mock.Anything) middleware.WithPrometheus(mockPrometheus, "random", th)(w, r) return mockPrometheus, []prometheusTest{ pt("ReportAttempt", "random"), pt("ReportSuccess", "random"), pt("Observe", "random", mock.Anything), } }, }, { name: "test prometheus middleware for 400 error", actualResult: func() (*reporters.MockPrometheus, []prometheusTest) { w := httptest.NewRecorder() r, err := http.NewRequest(http.MethodGet, "/random", nil) require.NoError(t, err) th := func(resp http.ResponseWriter, req *http.Request) { resp.WriteHeader(http.StatusBadRequest) } mockPrometheus := &reporters.MockPrometheus{} mockPrometheus.On("ReportAttempt", "random") mockPrometheus.On("ReportFailure", "random") mockPrometheus.On("Observe", "random", mock.Anything) middleware.WithPrometheus(mockPrometheus, "random", th)(w, r) return mockPrometheus, []prometheusTest{ pt("ReportAttempt", "random"), pt("ReportFailure", "random"), pt("Observe", "random", mock.Anything), } }, }, { name: "test statsd middleware for 500 error", actualResult: func() (*reporters.MockPrometheus, []prometheusTest) { w := httptest.NewRecorder() r, err := http.NewRequest(http.MethodGet, "/random", nil) require.NoError(t, err) th := func(resp http.ResponseWriter, req *http.Request) { resp.WriteHeader(http.StatusInternalServerError) } mockPrometheus := &reporters.MockPrometheus{} mockPrometheus.On("ReportAttempt", "random") mockPrometheus.On("ReportFailure", "random") mockPrometheus.On("Observe", "random", mock.Anything) middleware.WithPrometheus(mockPrometheus, "random", th)(w, r) return mockPrometheus, []prometheusTest{ pt("ReportAttempt", "random"), pt("ReportFailure", "random"), pt("Observe", "random", mock.Anything), } }, }, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { cl, res := testCase.actualResult() for _, r := range res { cl.AssertCalled(t, r.method, r.argument...) } }) } }
explode_data.jsonl/57374
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1207 }
[ 2830, 3393, 2354, 35186, 39705, 1155, 353, 8840, 836, 8, 341, 13158, 2706, 39705, 2271, 2036, 341, 197, 42257, 256, 914, 198, 197, 197, 14479, 3056, 4970, 16094, 197, 630, 60796, 1669, 2915, 17262, 914, 11, 2827, 2503, 4970, 28875, 2706...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_ReadUint32(t *testing.T) { buf := make([]byte, 8) PutUint32(buf, 0, 20) PutUint32(buf, 4, 40) assert.Equal(t, uint32(20), ReadUint32(buf, 0)) assert.Equal(t, uint32(40), ReadUint32(buf, 4)) }
explode_data.jsonl/77411
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 100 }
[ 2830, 3393, 38381, 21570, 18, 17, 1155, 353, 8840, 836, 8, 341, 26398, 1669, 1281, 10556, 3782, 11, 220, 23, 340, 10025, 332, 21570, 18, 17, 10731, 11, 220, 15, 11, 220, 17, 15, 340, 10025, 332, 21570, 18, 17, 10731, 11, 220, 19, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRejectTrafficProvidedWithoutCanary(t *testing.T) { g := gomega.NewGomegaWithT(t) kfsvc := makeTestKFService() kfsvc.Spec.CanaryTrafficPercent = 1 g.Expect(kfsvc.ValidateCreate()).Should(gomega.MatchError(TrafficProvidedWithoutCanaryError)) }
explode_data.jsonl/7101
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 100 }
[ 2830, 3393, 78413, 87229, 35819, 291, 26040, 6713, 658, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 342, 32696, 7121, 38, 32696, 2354, 51, 1155, 340, 16463, 69, 58094, 1669, 1281, 2271, 65008, 1860, 741, 16463, 69, 58094, 36473, 53280, 65...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSeal(t *testing.T) { rwc := openTPMOrSkip(t) defer rwc.Close() data := make([]byte, 64) data[0] = 137 data[1] = 138 data[2] = 139 srkAuth := getAuth(srkAuthEnvVar) sealed, err := Seal(rwc, 0 /* locality 0 */, []int{17} /* PCR 17 */, data, srkAuth[:]) if err != nil { t.Fatal("Couldn't seal the data:", err) } data2, err := Unseal(rwc, sealed, srkAuth[:]) if err != nil { t.Fatal("Couldn't unseal the data:", err) } if !bytes.Equal(data2, data) { t.Fatal("Unsealed data doesn't match original data") } }
explode_data.jsonl/75348
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 239 }
[ 2830, 3393, 1514, 278, 1155, 353, 8840, 836, 8, 341, 7000, 24028, 1669, 1787, 4239, 44, 2195, 35134, 1155, 340, 16867, 435, 24028, 10421, 2822, 8924, 1669, 1281, 10556, 3782, 11, 220, 21, 19, 340, 8924, 58, 15, 60, 284, 220, 16, 18,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestTimer_AddOnce(t *testing.T) { gtest.Case(t, func() { timer := New() array := garray.New() timer.AddOnce(200*time.Millisecond, func() { array.Append(1) }) timer.AddOnce(200*time.Millisecond, func() { array.Append(1) }) time.Sleep(250*time.Millisecond) gtest.Assert(array.Len(), 2) time.Sleep(250*time.Millisecond) gtest.Assert(array.Len(), 2) timer.Close() time.Sleep(250*time.Millisecond) fixedLength := array.Len() time.Sleep(250*time.Millisecond) gtest.Assert(array.Len(), fixedLength) }) }
explode_data.jsonl/3718
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 317 }
[ 2830, 3393, 10105, 21346, 12522, 1155, 353, 8840, 836, 8, 341, 256, 342, 1944, 727, 519, 1155, 11, 2915, 368, 341, 981, 9021, 220, 1669, 1532, 741, 981, 1334, 220, 1669, 342, 1653, 7121, 741, 981, 9021, 1904, 12522, 7, 17, 15, 15, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_resolveAutoTag(t *testing.T) { cases := []struct { typ string name string inputs *Inputs expects []string }{ {RefTypeBranch, "master", &Inputs{AutoTag: false, Tags: []string{}}, []string{}}, {RefTypeBranch, "master", &Inputs{AutoTag: false, Tags: []string{"master"}}, []string{"master"}}, {RefTypeBranch, "master", &Inputs{AutoTag: false, Tags: []string{"a", "b", "c"}}, []string{"a", "b", "c"}}, {RefTypeBranch, "master", &Inputs{AutoTag: true, Tags: []string{"master"}}, []string{"latest"}}, {RefTypeBranch, "master", &Inputs{AutoTag: true, Tags: []string{"feature", "develop"}}, []string{"latest"}}, {RefTypeBranch, "master", &Inputs{AutoTag: true, Tags: []string{}}, []string{"latest"}}, {RefTypeBranch, "develop", &Inputs{AutoTag: true, Tags: []string{}}, []string{"develop"}}, {RefTypeTag, "v1.0.0", &Inputs{AutoTag: true, Tags: []string{}}, []string{"1", "1.0", "1.0.0"}}, {RefTypePull, "master", &Inputs{AutoTag: true, Tags: []string{}}, []string{"pr-master"}}, } for _, c := range cases { resolveAutoTag(c.typ, c.name, c.inputs) if !reflect.DeepEqual(c.expects, c.inputs.Tags) { t.Errorf("expect tag list is %v, actual is %v", c.expects, c.inputs.Tags) } } }
explode_data.jsonl/29141
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 500 }
[ 2830, 3393, 77291, 13253, 5668, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 3056, 1235, 341, 197, 25314, 257, 914, 198, 197, 11609, 262, 914, 198, 197, 22427, 82, 220, 353, 31946, 198, 197, 8122, 7973, 3056, 917, 198, 197, 59403, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestParseValidPos(t *testing.T) { fname, offset, err := parsePos("foo.go:#123") if fname != "foo.go" { t.Errorf("want foo.go, got %v", fname) } if offset != 123 { t.Errorf("want 123, got %v", 123) } if err != nil { t.Error(err) } }
explode_data.jsonl/28461
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 115 }
[ 2830, 3393, 14463, 4088, 4859, 1155, 353, 8840, 836, 8, 341, 1166, 606, 11, 4347, 11, 1848, 1669, 4715, 4859, 445, 7975, 18002, 14111, 16, 17, 18, 1138, 743, 22548, 961, 330, 7975, 18002, 1, 341, 197, 3244, 13080, 445, 52657, 15229, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestACL_PolicyMerge(t *testing.T) { t.Run("root-ns", func(t *testing.T) { t.Parallel() testACLPolicyMerge(t, namespace.RootNamespace) }) }
explode_data.jsonl/15270
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 66 }
[ 2830, 3393, 55393, 1088, 8018, 52096, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 2888, 12, 4412, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 3244, 41288, 7957, 741, 197, 18185, 1706, 12567, 8018, 52096, 1155, 11, 4473, 45345, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestUnkownStorageURIPrefixFails(t *testing.T) { g := gomega.NewGomegaWithT(t) isvc := makeTestInferenceService() isvc.Spec.Default.Predictor.Tensorflow.StorageURI = "blob://foo/bar" g.Expect(isvc.validate(c)).ShouldNot(gomega.Succeed()) }
explode_data.jsonl/1486
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 104 }
[ 2830, 3393, 1806, 74, 779, 5793, 1511, 3298, 5060, 37, 6209, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 342, 32696, 7121, 38, 32696, 2354, 51, 1155, 340, 19907, 7362, 1669, 1281, 2271, 641, 2202, 1860, 741, 19907, 7362, 36473, 13275, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPool(t *testing.T) { // disable GC so we can control when it happens. defer debug.SetGCPercent(debug.SetGCPercent(-1)) var p Pool if p.Get() != nil { t.Fatal("expected empty") } // Make sure that the goroutine doesn't migrate to another P // between Put and Get calls. Runtime_procPin() p.Put("a") p.Put("b") if g := p.Get(); g != "a" { t.Fatalf("got %#v; want a", g) } if g := p.Get(); g != "b" { t.Fatalf("got %#v; want b", g) } if g := p.Get(); g != nil { t.Fatalf("got %#v; want nil", g) } Runtime_procUnpin() // Put in a large number of objects so they spill into // stealable space. for i := 0; i < 100; i++ { p.Put("c") } // After one GC, the victim cache should keep them alive. runtime.GC() if g := p.Get(); g != "c" { t.Fatalf("got %#v; want c after GC", g) } // A second GC should drop the victim cache. runtime.GC() if g := p.Get(); g != nil { t.Fatalf("got %#v; want nil after second GC", g) } }
explode_data.jsonl/51808
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 395 }
[ 2830, 3393, 10551, 1155, 353, 8840, 836, 8, 341, 197, 322, 11156, 22381, 773, 582, 646, 2524, 979, 432, 8573, 624, 16867, 7390, 4202, 22863, 32010, 42154, 4202, 22863, 32010, 4080, 16, 1171, 2405, 281, 22728, 198, 743, 281, 2234, 368, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestProjectRoleAccess(t *testing.T) { assert := assert.New(t) { ctl := &projecttesting.Controller{} mock.OnAnything(ctl, "Get").Return(public, nil) mock.OnAnything(ctl, "ListRoles").Return([]int{common.RoleProjectAdmin}, nil) user := &models.User{ UserID: 1, Username: "username", } evaluator := NewEvaluator(ctl, NewBuilderForUser(user, ctl)) resorce := NewNamespace(public.ProjectID).Resource(rbac.ResourceRepository) assert.True(evaluator.HasPermission(context.TODO(), resorce, rbac.ActionPush)) } { ctl := &projecttesting.Controller{} mock.OnAnything(ctl, "Get").Return(public, nil) mock.OnAnything(ctl, "ListRoles").Return([]int{common.RoleGuest}, nil) user := &models.User{ UserID: 1, Username: "username", } evaluator := NewEvaluator(ctl, NewBuilderForUser(user, ctl)) resorce := NewNamespace(public.ProjectID).Resource(rbac.ResourceRepository) assert.False(evaluator.HasPermission(context.TODO(), resorce, rbac.ActionPush)) } }
explode_data.jsonl/75361
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 388 }
[ 2830, 3393, 7849, 9030, 6054, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 692, 197, 515, 197, 197, 12373, 1669, 609, 4987, 8840, 29112, 16094, 197, 77333, 8071, 77303, 7, 12373, 11, 330, 1949, 1827, 5598, 31688, 11, 20...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPluginsList(t *testing.T) { var server = mockPluginsServer(t, "testdata/defaults/plugin.yaml", PluginType) defer server.Close() p, err := ContentList(server.URL) if err != nil { t.Fatalf("expected nil but got %v", err) } if 2 != len(p) { t.Fatalf("expected %d but got %v", 2, len(p)) } }
explode_data.jsonl/1911
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 125 }
[ 2830, 3393, 45378, 852, 1155, 353, 8840, 836, 8, 341, 2405, 3538, 284, 7860, 45378, 5475, 1155, 11, 330, 92425, 14, 26756, 51372, 33406, 497, 21245, 929, 340, 16867, 3538, 10421, 741, 3223, 11, 1848, 1669, 8883, 852, 21421, 20893, 340, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestDefaultInspect_UID(t *testing.T) { testDefaultInspectUIDandGID( t, commandIDU, func(inspect Inspect, ctx context.Context, containerID string) (int, error) { return inspect.UID(ctx, containerID) }, ) }
explode_data.jsonl/76037
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 86 }
[ 2830, 3393, 3675, 58533, 69882, 1155, 353, 8840, 836, 8, 341, 18185, 3675, 58533, 6463, 437, 38, 915, 1006, 197, 3244, 345, 197, 45566, 915, 52, 345, 197, 29244, 56337, 987, 9726, 987, 11, 5635, 2266, 9328, 11, 5476, 915, 914, 8, 32...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTransaction_Bytes(t *testing.T) { branchDAG, utxoDAG := setupDependencies(t) defer branchDAG.Shutdown() wallets := createWallets(2) input := generateOutput(utxoDAG, wallets[0].address, 0) tx, _ := singleInputTransaction(utxoDAG, wallets[0], wallets[1], input, false) bytes := tx.Bytes() _tx, _, err := TransactionFromBytes(bytes) assert.NoError(t, err) assert.Equal(t, tx.ID(), _tx.ID()) }
explode_data.jsonl/26248
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 158 }
[ 2830, 3393, 8070, 62, 7078, 1155, 353, 8840, 836, 8, 341, 197, 17940, 35, 1890, 11, 8621, 40822, 35, 1890, 1669, 6505, 48303, 1155, 340, 16867, 8870, 35, 1890, 10849, 18452, 2822, 6692, 7464, 82, 1669, 1855, 38259, 82, 7, 17, 340, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAutoLoopDisabled(t *testing.T) { defer test.Guard(t)() // Set parameters for a channel that will require a swap. channels := []lndclient.ChannelInfo{ channel1, } params := defaultParameters params.ChannelRules = map[lnwire.ShortChannelID]*ThresholdRule{ chanID1: chanRule, } c := newAutoloopTestCtx(t, params, channels) c.start() // We expect a single quote to be required for our swap on channel 1. // We set its quote to have acceptable fees for our current limit. quotes := []quoteRequestResp{ { request: &loop.LoopOutQuoteRequest{ Amount: chan1Rec.Amount, SweepConfTarget: chan1Rec.SweepConfTarget, }, quote: testQuote, }, } // Trigger an autoloop attempt for our test context with no existing // loop in/out swaps. We expect a swap for our channel to be suggested, // but do not expect any swaps to be executed, since autoloop is // disabled by default. c.autoloop(1, chan1Rec.Amount+1, nil, quotes, nil) // Trigger another autoloop, this time setting our server restrictions // to have a minimum swap amount greater than the amount that we need // to swap. In this case we don't even expect to get a quote, because // our suggested swap is beneath the minimum swap size. c.autoloop(chan1Rec.Amount+1, chan1Rec.Amount+2, nil, nil, nil) c.stop() }
explode_data.jsonl/73426
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 441 }
[ 2830, 3393, 13253, 14620, 25907, 1155, 353, 8840, 836, 8, 341, 16867, 1273, 1224, 11034, 1155, 8, 2822, 197, 322, 2573, 5029, 369, 264, 5496, 429, 686, 1373, 264, 14291, 624, 23049, 6680, 1669, 3056, 75, 303, 2972, 38716, 1731, 515, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestResponseRouterBasic(t *testing.T) { receiver := &fakeReceiver{ Responses: []rpcResponse{ {amqpMessageWithCorrelationId("my message id"), nil}, {nil, amqp.ErrLinkClosed}, }, } link := &Link{ responseMap: map[string]chan rpcResponse{ "my message id": make(chan rpcResponse, 1), }, receiver: receiver, } ch := link.responseMap["my message id"] link.startResponseRouter() result := <-ch require.EqualValues(t, result.message.Data[0], []byte("ID was my message id")) require.Empty(t, receiver.Responses) require.Nil(t, link.responseMap, "Response map is nil'd out after we get a closed error") }
explode_data.jsonl/57201
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 234 }
[ 2830, 3393, 2582, 9523, 15944, 1155, 353, 8840, 836, 8, 341, 17200, 12862, 1669, 609, 30570, 25436, 515, 197, 197, 70743, 25, 3056, 29414, 2582, 515, 298, 197, 90, 309, 32763, 2052, 2354, 10580, 22221, 764, 445, 2408, 1943, 877, 3975, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEventAfterCallback(t *testing.T) { addressAfterCheckout := "I'm an address should be set after checkout" OrderStateMachine.Event(OrderEventCheckout).To(OrderStatePaying).From(OrderStateDraft).After(func(order interface{}, tx *gorm.DB) (err error) { order.(*Order).Address = addressAfterCheckout return }) order := &Order{} CreateOrderAndExecuteTransition(order, OrderEventCheckout, t, true) if order.Address != addressAfterCheckout { t.Errorf("After callback not triggered") } }
explode_data.jsonl/44776
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 157 }
[ 2830, 3393, 1556, 6025, 7494, 1155, 353, 8840, 836, 8, 341, 63202, 6025, 55145, 1669, 330, 40, 2776, 458, 2621, 1265, 387, 738, 1283, 27264, 698, 197, 4431, 94666, 6904, 39692, 1556, 55145, 568, 1249, 39692, 1397, 47, 17270, 568, 3830, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestServerLogger(t *testing.T) { cl := &customLogger{} s := &Server{ Handler: func(ctx *RequestCtx) { logger := ctx.Logger() h := &ctx.Request.Header logger.Printf("begin") ctx.Success("text/html", []byte(fmt.Sprintf("requestURI=%s, body=%q, remoteAddr=%s", h.RequestURI(), ctx.Request.Body(), ctx.RemoteAddr()))) logger.Printf("end") }, Logger: cl, } rw := &readWriter{} rw.r.WriteString("GET /foo1 HTTP/1.1\r\nHost: google.com\r\n\r\n") rw.r.WriteString("POST /foo2 HTTP/1.1\r\nHost: aaa.com\r\nContent-Length: 5\r\nContent-Type: aa\r\n\r\nabcde") rwx := &readWriterRemoteAddr{ rw: rw, addr: &net.TCPAddr{ IP: []byte{1, 2, 3, 4}, Port: 8765, }, } globalConnID = 0 ch := make(chan error) go func() { ch <- s.ServeConn(rwx) }() select { case err := <-ch: if err != nil { t.Fatalf("Unexpected error from serveConn: %s", err) } case <-time.After(100 * time.Millisecond): t.Fatalf("timeout") } br := bufio.NewReader(&rw.w) verifyResponse(t, br, 200, "text/html", "requestURI=/foo1, body=\"\", remoteAddr=1.2.3.4:8765") verifyResponse(t, br, 200, "text/html", "requestURI=/foo2, body=\"abcde\", remoteAddr=1.2.3.4:8765") expectedLogOut := `#0000000100000001 - 1.2.3.4:8765<->1.2.3.4:8765 - GET http://google.com/foo1 - begin #0000000100000001 - 1.2.3.4:8765<->1.2.3.4:8765 - GET http://google.com/foo1 - end #0000000100000002 - 1.2.3.4:8765<->1.2.3.4:8765 - POST http://aaa.com/foo2 - begin #0000000100000002 - 1.2.3.4:8765<->1.2.3.4:8765 - POST http://aaa.com/foo2 - end ` if cl.out != expectedLogOut { t.Fatalf("Unexpected logger output: %q. Expected %q", cl.out, expectedLogOut) } }
explode_data.jsonl/73309
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 774 }
[ 2830, 3393, 5475, 7395, 1155, 353, 8840, 836, 8, 341, 39407, 1669, 609, 9163, 7395, 16094, 1903, 1669, 609, 5475, 515, 197, 197, 3050, 25, 2915, 7502, 353, 1900, 23684, 8, 341, 298, 17060, 1669, 5635, 12750, 741, 298, 9598, 1669, 609,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestP224BaseMult(t *testing.T) { p224 := P224() for i, e := range p224BaseMultTests { k, ok := new(big.Int).SetString(e.k, 10) if !ok { t.Errorf("%d: bad value for k: %s", i, e.k) } x, y := p224.ScalarBaseMult(k.Bytes()) if fmt.Sprintf("%x", x) != e.x || fmt.Sprintf("%x", y) != e.y { t.Errorf("%d: bad output for k=%s: got (%x, %x), want (%s, %s)", i, e.k, x, y, e.x, e.y) } if testing.Short() && i > 5 { break } } }
explode_data.jsonl/52841
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 233 }
[ 2830, 3393, 47, 17, 17, 19, 3978, 40404, 1155, 353, 8840, 836, 8, 341, 3223, 17, 17, 19, 1669, 393, 17, 17, 19, 741, 2023, 600, 11, 384, 1669, 2088, 281, 17, 17, 19, 3978, 40404, 18200, 341, 197, 16463, 11, 5394, 1669, 501, 7561...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestCall_GetBlockByNumber_InvalidArgs(t *testing.T) { mockJSONRPC := &mocks.JSONRPC{} mockGraphQL := &mocks.GraphQL{} c := &Client{ c: mockJSONRPC, g: mockGraphQL, traceSemaphore: semaphore.NewWeighted(100), } ctx := context.Background() resp, err := c.Call( ctx, &RosettaTypes.CallRequest{ Method: "eth_getBlockByNumber", Parameters: map[string]interface{}{ "index": "a string", "show_transaction_details": false, }, }, ) assert.Nil(t, resp) assert.True(t, errors.Is(err, ErrCallParametersInvalid)) mockJSONRPC.AssertExpectations(t) mockGraphQL.AssertExpectations(t) }
explode_data.jsonl/55495
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 306 }
[ 2830, 3393, 7220, 13614, 4713, 1359, 2833, 62, 7928, 4117, 1155, 353, 8840, 836, 8, 341, 77333, 5370, 29528, 1669, 609, 16712, 82, 18009, 29528, 16094, 77333, 88637, 1669, 609, 16712, 82, 40237, 3588, 31483, 1444, 1669, 609, 2959, 515, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMultiReverseProxyFromClient(t *testing.T) { p := newMultiHostTestProxy() // This is a full end-end test, so the proxy handler. proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { p.ServeHTTP(w, r) })) defer proxy.Close() // Table tests. var multiProxy = []struct { url string body []byte }{ { "/", upstreamResp1, }, { "/api/", upstreamResp2, }, { "/messages/", upstreamResp1, }, { "/api/messages/?text=cat", upstreamResp2, }, } for _, tt := range multiProxy { // Create client request reqURL := proxy.URL + tt.url req, err := http.NewRequest("GET", reqURL, nil) if err != nil { t.Fatalf("Failed to make request: %v", err) } resp, err := http.DefaultClient.Do(req) if err != nil { t.Fatalf("Failed to make request: %v", err) } body, err := ioutil.ReadAll(resp.Body) resp.Body.Close() if err != nil { t.Fatalf("Failed to read response: %v", err) } if !bytes.Equal(body, tt.body) { t.Errorf("Expected '%s' but got '%s' instead", tt.body, body) } } }
explode_data.jsonl/64238
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 491 }
[ 2830, 3393, 20358, 45695, 16219, 3830, 2959, 1155, 353, 8840, 836, 8, 341, 3223, 1669, 501, 20358, 9296, 2271, 16219, 2822, 197, 322, 1096, 374, 264, 2480, 835, 13068, 1273, 11, 773, 279, 13291, 7013, 624, 197, 22803, 1669, 54320, 70334...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParseCaseStatementMultiple(t *testing.T) { p := createParser(`case 5, 8, 9 {}`) bvmUtils.Assert(t, isCaseStatement(p), "should detect case statement") parseCaseStatement(p) }
explode_data.jsonl/49724
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 66 }
[ 2830, 3393, 14463, 4207, 8636, 32089, 1155, 353, 8840, 836, 8, 341, 3223, 1669, 1855, 6570, 5809, 5638, 220, 20, 11, 220, 23, 11, 220, 24, 4687, 24183, 2233, 7338, 4209, 11711, 1155, 11, 374, 4207, 8636, 1295, 701, 330, 5445, 11140, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestReset(t *testing.T) { cb := circuitbreaker.New(nil) cb.Success() cb.Reset() assert.Equal(t, circuitbreaker.Counters{}, cb.Counters()) cb.Fail() cb.Reset() assert.Equal(t, circuitbreaker.Counters{}, cb.Counters()) }
explode_data.jsonl/8222
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 95 }
[ 2830, 3393, 14828, 1155, 353, 8840, 836, 8, 341, 63810, 1669, 16224, 64121, 7121, 27907, 340, 63810, 33320, 741, 63810, 36660, 741, 6948, 12808, 1155, 11, 16224, 64121, 6134, 388, 22655, 9858, 6134, 388, 12367, 63810, 57243, 741, 63810, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestSetupLoggingFormat(t *testing.T) { c := CaptureLog(t) defer c.Release() EnableWith(log.OffLevel, func() { _, _ = c.(interface { Write(p []byte) (n int, err error) }).Write([]byte("hello")) }) if GetLevel() != log.OffLevel { t.Fatal("wrong level") } log.SetLevel(log.OffLevel) SetupLoggingFormat("any", 1) t.Logf("%v, %v", GetDebugMode(), GetTraceMode()) SetDebugMode(true) SetTraceMode(true) t.Logf("%v, %v, %v", GetDebugMode(), GetTraceMode(), InDebugging()) }
explode_data.jsonl/4688
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 206 }
[ 2830, 3393, 21821, 34575, 4061, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 39885, 2201, 1155, 340, 16867, 272, 58693, 2822, 197, 11084, 2354, 12531, 13, 4596, 4449, 11, 2915, 368, 341, 197, 197, 6878, 716, 284, 272, 12832, 4970, 341, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReadonlyRoot(t *testing.T) { for _, conf := range configs(overlay) { t.Logf("Running test with conf: %+v", conf) spec := testutil.NewSpecWithArgs("/bin/touch", "/foo") spec.Root.Readonly = true rootDir, bundleDir, err := testutil.SetupContainer(spec, conf) if err != nil { t.Fatalf("error setting up container: %v", err) } defer os.RemoveAll(rootDir) defer os.RemoveAll(bundleDir) conf.Overlay = true // Create, start and wait for the container. s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "") if err != nil { t.Fatalf("error creating container: %v", err) } defer s.Destroy() if err := s.Start(conf); err != nil { t.Fatalf("error starting container: %v", err) } ws, err := s.Wait() if err != nil { t.Fatalf("error waiting on container: %v", err) } if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM { t.Fatalf("container failed, waitStatus: %v", ws) } } }
explode_data.jsonl/48927
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 391 }
[ 2830, 3393, 4418, 3243, 8439, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 2335, 1669, 2088, 42309, 7, 21118, 8, 341, 197, 3244, 98954, 445, 18990, 1273, 448, 2335, 25, 68524, 85, 497, 2335, 692, 197, 98100, 1669, 1273, 1314, 7121, 8327,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestLDFlags(t *testing.T) { tmpHub, tmpTag := version.DockerInfo.Hub, version.DockerInfo.Tag defer func() { version.DockerInfo.Hub, version.DockerInfo.Tag = tmpHub, tmpTag }() version.DockerInfo.Hub = "testHub" version.DockerInfo.Tag = "testTag" l := NewLogger(true, os.Stdout, os.Stderr) _, iops, err := GenerateConfig(nil, "", true, nil, l) if err != nil { t.Fatal(err) } if iops.Hub != version.DockerInfo.Hub || iops.Tag != version.DockerInfo.Tag { t.Fatalf("DockerInfoHub, DockerInfoTag got: %s,%s, want: %s, %s", iops.Hub, iops.Tag, version.DockerInfo.Hub, version.DockerInfo.Tag) } }
explode_data.jsonl/55979
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 255 }
[ 2830, 3393, 12335, 9195, 1155, 353, 8840, 836, 8, 341, 20082, 19316, 11, 4174, 5668, 1669, 2319, 909, 13659, 1731, 3839, 392, 11, 2319, 909, 13659, 1731, 23676, 198, 16867, 2915, 368, 341, 197, 74954, 909, 13659, 1731, 3839, 392, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGatherServiceAccounts(t *testing.T) { tests := []struct { name string data []*corev1.ServiceAccount exp string }{ { name: "one account", data: []*corev1.ServiceAccount{&corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "local-storage-operator", Namespace: "default", }, Secrets: []corev1.ObjectReference{corev1.ObjectReference{}}, }}, exp: `{"serviceAccounts":{"TOTAL_COUNT":1,"namespaces":{"default":{"name":"local-storage-operator","secrets":1}}}}`, }, { name: "multiple accounts", data: []*corev1.ServiceAccount{&corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "deployer", Namespace: "openshift", }, Secrets: []corev1.ObjectReference{corev1.ObjectReference{}}, }, &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "openshift-apiserver-sa", Namespace: "openshift-apiserver", }, Secrets: []corev1.ObjectReference{corev1.ObjectReference{}}, }}, exp: `{"serviceAccounts":{"TOTAL_COUNT":2,"namespaces":{"openshift":{"name":"deployer","secrets":1},"openshift-apiserver":{"name":"openshift-apiserver-sa","secrets":1}}}}`, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { coreClient := kubefake.NewSimpleClientset() for _, d := range test.data { _, err := coreClient.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: d.Namespace}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create fake ns %s", err) } _, err = coreClient.CoreV1().ServiceAccounts(d.Namespace). Create(context.Background(), d, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create fake service account %s", err) } } gatherer := &Gatherer{ctx: context.Background(), coreClient: coreClient.CoreV1()} sa, errs := GatherServiceAccounts(gatherer)() if len(errs) > 0 { t.Fatalf("unexpected errors: %#v", errs) return } bts, err := sa[0].Item.Marshal(context.Background()) if err != nil { t.Fatalf("error marshalling %s", err) } s := string(bts) if test.exp != s { t.Fatalf("serviceaccount test failed. expected: %s got: %s", test.exp, s) } }) } }
explode_data.jsonl/32595
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 975 }
[ 2830, 3393, 38, 1856, 1860, 41369, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 8924, 29838, 98645, 16, 13860, 7365, 198, 197, 48558, 220, 914, 198, 197, 59403, 197, 197, 515, 298, 11609, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestProwJobStatus(t *testing.T) { now := metav1.Now() later := metav1.NewTime(now.Time.Add(1 * time.Hour)) cases := []struct { name string input buildv1alpha1.BuildStatus state prowjobv1.ProwJobState desc string fallback string }{ { name: "empty conditions returns triggered/scheduling", state: prowjobv1.TriggeredState, desc: descScheduling, }, { name: "truly succeeded state returns success", input: buildv1alpha1.BuildStatus{ Conditions: []duckv1alpha1.Condition{ { Type: buildv1alpha1.BuildSucceeded, Status: corev1.ConditionTrue, Message: "fancy", }, }, }, state: prowjobv1.SuccessState, desc: "fancy", fallback: descSucceeded, }, { name: "falsely succeeded state returns failure", input: buildv1alpha1.BuildStatus{ Conditions: []duckv1alpha1.Condition{ { Type: buildv1alpha1.BuildSucceeded, Status: corev1.ConditionFalse, Message: "weird", }, }, }, state: prowjobv1.FailureState, desc: "weird", fallback: descFailed, }, { name: "unstarted job returns triggered/initializing", input: buildv1alpha1.BuildStatus{ Conditions: []duckv1alpha1.Condition{ { Type: buildv1alpha1.BuildSucceeded, Status: corev1.ConditionUnknown, Message: "hola", }, }, }, state: prowjobv1.TriggeredState, desc: "hola", fallback: descInitializing, }, { name: "unfinished job returns running", input: buildv1alpha1.BuildStatus{ StartTime: &now, Conditions: []duckv1alpha1.Condition{ { Type: buildv1alpha1.BuildSucceeded, Status: corev1.ConditionUnknown, Message: "hola", }, }, }, state: prowjobv1.PendingState, desc: "hola", fallback: descRunning, }, { name: "builds with unknown success status are still running", input: buildv1alpha1.BuildStatus{ StartTime: &now, CompletionTime: &later, Conditions: []duckv1alpha1.Condition{ { Type: buildv1alpha1.BuildSucceeded, Status: corev1.ConditionUnknown, Message: "hola", }, }, }, state: prowjobv1.PendingState, desc: "hola", fallback: descRunning, }, { name: "completed builds without a succeeded condition end in error", input: buildv1alpha1.BuildStatus{ StartTime: &now, CompletionTime: &later, }, state: prowjobv1.ErrorState, desc: descMissingCondition, }, } for _, tc := range cases { if len(tc.fallback) > 0 { tc.desc = tc.fallback tc.fallback = "" tc.name += " [fallback]" cond := tc.input.Conditions[0] cond.Message = "" tc.input.Conditions = []duckv1alpha1.Condition{cond} cases = append(cases, tc) } } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { state, desc := prowJobStatus(tc.input) if state != tc.state { t.Errorf("state %q != expected %q", state, tc.state) } if desc != tc.desc { t.Errorf("description %q != expected %q", desc, tc.desc) } }) } }
explode_data.jsonl/78936
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1454 }
[ 2830, 3393, 47, 651, 12245, 2522, 1155, 353, 8840, 836, 8, 341, 80922, 1669, 77520, 16, 13244, 741, 8810, 962, 1669, 77520, 16, 7121, 1462, 32263, 16299, 1904, 7, 16, 353, 882, 73550, 1171, 1444, 2264, 1669, 3056, 1235, 341, 197, 1160...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestStatefulSetControllerUpdatePodChangeControllerRef(t *testing.T) { ssc, spc, _ := newFakeStatefulSetController() set := newStatefulSet(3) set2 := newStatefulSet(3) set2.Name = "foo2" pod := newStatefulSetPod(set, 0) pod2 := newStatefulSetPod(set2, 0) spc.setsIndexer.Add(set) spc.setsIndexer.Add(set2) clone := *pod clone.OwnerReferences = pod2.OwnerReferences fakeResourceVersion(&clone) ssc.updatePod(&clone, pod) if got, want := ssc.queue.Len(), 2; got != want { t.Errorf("queue.Len() = %v, want %v", got, want) } }
explode_data.jsonl/18618
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 214 }
[ 2830, 3393, 1397, 1262, 1649, 2051, 4289, 23527, 4072, 2051, 3945, 1155, 353, 8840, 836, 8, 341, 34472, 66, 11, 978, 66, 11, 716, 1669, 501, 52317, 1397, 1262, 1649, 2051, 741, 8196, 1669, 36848, 1262, 1649, 7, 18, 340, 8196, 17, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestContextToHTTP(t *testing.T) { tracer := mocktracer.New() tracer.RegisterExtractor( opentracing.HTTPHeaders, &mocktracer.TextMapPropagator{true}, ) contextSpan := tracer.StartSpan("testOp").(*mocktracer.MockSpan) ctx := opentracing.ContextWithSpan(context.Background(), contextSpan) req, _ := http.NewRequest("testmethod", "testurl", nil) ContextToHTTP(ctx, tracer, req) contextSpan.Finish() finishedSpans := tracer.FinishedSpans() assert.Equal(t, 1, len(finishedSpans)) endpointSpan := finishedSpans[0] assert.Equal(t, "testOp", endpointSpan.OperationName) contextContext := contextSpan.Context().(mocktracer.MockSpanContext) endpointContext := endpointSpan.Context().(mocktracer.MockSpanContext) // ...and that the ID is unmodified. assert.Equal(t, contextContext.SpanID, endpointContext.SpanID) assert.Equal(t, 3, len(req.Header)) }
explode_data.jsonl/28901
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 317 }
[ 2830, 3393, 1972, 1249, 9230, 1155, 353, 8840, 836, 8, 341, 25583, 9584, 1669, 7860, 94941, 7121, 741, 25583, 9584, 19983, 56118, 1006, 197, 39703, 23745, 4527, 27358, 10574, 11, 609, 16712, 94941, 1979, 2227, 2008, 351, 850, 90, 1866, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) { st := newServerTester(t, nil) defer st.Close() st.greet() if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil { t.Fatal(err) } gf := st.wantGoAway() if gf.ErrCode != ErrCodeFlowControl { t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl) } if gf.LastStreamID != 0 { t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0) } }
explode_data.jsonl/71642
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 202 }
[ 2830, 3393, 5475, 46267, 2646, 78, 78284, 1566, 1046, 1668, 538, 355, 60649, 4289, 1155, 353, 8840, 836, 8, 341, 18388, 1669, 501, 5475, 58699, 1155, 11, 2092, 340, 16867, 357, 10421, 741, 18388, 1302, 3744, 741, 743, 1848, 1669, 357, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestTop002(t *testing.T) { stats := "ert ert ert ert ert hgf hgf hgf hgf asf asf asf asd asd qwe" want := []string{"ert", "hgf", "asf", "asd", "qwe"} got := Top10(stats) if !equalSlice(got, want) { t.Errorf("Top10(stats) == %q, want %q", got, want) } }
explode_data.jsonl/59203
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 129 }
[ 2830, 3393, 5366, 15, 15, 17, 1155, 353, 8840, 836, 8, 341, 79659, 1669, 330, 529, 220, 529, 220, 529, 220, 529, 220, 529, 305, 45124, 305, 45124, 305, 45124, 305, 45124, 438, 69, 438, 69, 438, 69, 438, 67, 438, 67, 2804, 896, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestIntegerFilterArrayCursor(t *testing.T) { var i int expr := MockExpression{ EvalBoolFunc: func(v Valuer) bool { i++ return i%2 == 0 }, } var resultN int ac := MockIntegerArrayCursor{ CloseFunc: func() {}, ErrFunc: func() error { return nil }, StatsFunc: func() cursors.CursorStats { return cursors.CursorStats{} }, NextFunc: func() *cursors.IntegerArray { resultN++ if resultN == 4 { return cursors.NewIntegerArrayLen(0) } return cursors.NewIntegerArrayLen(900) }, } c := newIntegerFilterArrayCursor(&expr) c.reset(&ac) if got, want := len(c.Next().Timestamps), 1000; got != want { t.Fatalf("len(Next())=%d, want %d", got, want) } else if got, want := len(c.Next().Timestamps), 350; got != want { t.Fatalf("len(Next())=%d, want %d", got, want) } }
explode_data.jsonl/40970
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 338 }
[ 2830, 3393, 3486, 5632, 1857, 14543, 1155, 353, 8840, 836, 8, 341, 2405, 600, 526, 198, 8122, 649, 1669, 14563, 9595, 515, 197, 22784, 831, 11233, 9626, 25, 2915, 3747, 4104, 8801, 8, 1807, 341, 298, 8230, 22940, 298, 853, 600, 4, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1