text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestScope(t *testing.T) { tests := [][]string{ { "https://www.googleapis.com/auth/somescope", "SomescopeScope", }, { "https://mail.google.com/somescope", "MailGoogleComSomescopeScope", }, { "https://mail.google.com/", "MailGoogleComScope", }, } for _, test := range tests { if got := scopeIdentifierFromURL(test[0]); got != test[1] { t.Errorf("scopeIdentifierFromURL(%q) got %q, want %q", test[0], got, test[1]) } } }
explode_data.jsonl/77377
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 214 }
[ 2830, 3393, 10803, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 52931, 917, 515, 197, 197, 515, 298, 197, 57557, 1110, 2136, 19758, 905, 17369, 2687, 20347, 2417, 756, 298, 197, 66310, 20347, 2417, 10803, 756, 197, 197, 1583, 197, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestStatusCache(t *testing.T) { protocolVersion := 755 cooldown := time.Minute statusPacket := mc.SimpleStatus{ Name: "ultraviolet", Protocol: protocolVersion, Description: "some random motd text", }.Marshal() t.Run("normal flow", func(t *testing.T) { errCh := make(chan error) answerCh := make(chan ultraviolet.ProcessAnswer) c1, c2 := net.Pipe() connCreator := statusCacheConnCreator{conn: c1} statusCache := ultraviolet.NewStatusCache(protocolVersion, cooldown, connCreator) simulator := serverSimulator{} go func() { err := simulator.simulateServerStatus(c2, statusPacket) if err != nil { errCh <- err } }() go func() { answer, err := statusCache.Status() if err != nil { errCh <- err } answerCh <- answer }() var answer ultraviolet.ProcessAnswer select { case answer = <-answerCh: t.Log("worker has successfully responded") case err := <-errCh: t.Fatalf("didnt expect an error but got: %v", err) case <-time.After(defaultChTimeout): t.Fatal("timed out") } if answer.Action() != ultraviolet.SEND_STATUS { t.Errorf("expected %v but got %v instead", ultraviolet.SEND_STATUS, answer.Action()) } if !samePK(statusPacket, answer.Response()) { t.Error("received different packet than we expected!") t.Logf("expected: %v", statusPacket) t.Logf("received: %v", answer.Response()) } if !(answer.Latency() > 0) { t.Errorf("expected a latency greater than 0 but got %v", answer.Latency()) } if simulator.callAmount != 1 { t.Errorf("expected backend to be called 1 time but got called %v time(s)", simulator.callAmount) } }) t.Run("doesnt call again while in cooldown", func(t *testing.T) { errCh := make(chan error) targetAddr := testAddr() connCreator := ultraviolet.BasicConnCreator(targetAddr, net.Dialer{}) statusCache := ultraviolet.NewStatusCache(protocolVersion, cooldown, connCreator) simulator := serverSimulator{} go func() { listener, err := net.Listen("tcp", targetAddr) if err != nil { errCh <- err return } for { conn, err := listener.Accept() if err != nil { errCh <- err return } err = simulator.simulateServerStatus(conn, statusPacket) if err != nil { errCh <- err } } }() statusCall_TestError(t, &statusCache, errCh) statusCall_TestError(t, &statusCache, errCh) if simulator.callAmount != 1 { t.Errorf("expected backend to be called 1 time but got called %v time(s)", simulator.callAmount) } }) t.Run("does call again after cooldown", func(t *testing.T) { cooldown = time.Microsecond errCh := make(chan error) targetAddr := testAddr() connCreator := ultraviolet.BasicConnCreator(targetAddr, net.Dialer{}) statusCache := ultraviolet.NewStatusCache(protocolVersion, cooldown, connCreator) simulator := serverSimulator{} go func() { listener, err := net.Listen("tcp", targetAddr) if err != nil { errCh <- err return } for { conn, err := listener.Accept() if err != nil { errCh <- err return } err = simulator.simulateServerStatus(conn, statusPacket) if err != nil { errCh <- err } } }() statusCall_TestError(t, &statusCache, errCh) time.Sleep(cooldown) statusCall_TestError(t, &statusCache, errCh) if simulator.callAmount != 2 { t.Errorf("expected backend to be called 2 time but got called %v time(s)", simulator.callAmount) } }) t.Run("returns with error when connCreator returns error ", func(t *testing.T) { t.Run("with conn being nil", func(t *testing.T) { usedError := errors.New("cant create connection") connCreator := statusCacheConnCreator{err: usedError, conn: nil} statusCache := ultraviolet.NewStatusCache(protocolVersion, cooldown, connCreator) _, err := statusCache.Status() if !errors.Is(err, usedError) { t.Errorf("expected an error but something else: %v", err) } }) t.Run("with conn being an connection", func(t *testing.T) { usedError := errors.New("cant create connection") connCreator := statusCacheConnCreator{err: usedError, conn: &net.TCPConn{}} statusCache := ultraviolet.NewStatusCache(protocolVersion, cooldown, connCreator) _, err := statusCache.Status() if !errors.Is(err, usedError) { t.Errorf("expected an error but something else: %v", err) } }) }) t.Run("test closing connection early", func(t *testing.T) { tt := []struct { matchStatus bool shouldReturnError bool closeConnByStep int }{ { matchStatus: false, shouldReturnError: true, closeConnByStep: 1, }, { matchStatus: false, shouldReturnError: true, closeConnByStep: 2, }, { matchStatus: false, shouldReturnError: true, closeConnByStep: 3, }, { matchStatus: true, shouldReturnError: false, closeConnByStep: 4, }, { matchStatus: true, shouldReturnError: false, closeConnByStep: 5, }, } for _, tc := range tt { name := fmt.Sprintf("closeConnBy:%v", tc.closeConnByStep) t.Run(name, func(t *testing.T) { errCh := make(chan error) answerCh := make(chan ultraviolet.ProcessAnswer) c1, c2 := net.Pipe() connCreator := statusCacheConnCreator{conn: c1} statusCache := ultraviolet.NewStatusCache(protocolVersion, cooldown, connCreator) simulator := serverSimulator{ closeConnByStep: tc.closeConnByStep, } go func() { err := simulator.simulateServerStatus(c2, statusPacket) if err != nil { errCh <- err } }() go func() { answer, err := statusCache.Status() if err != nil { errCh <- err } answerCh <- answer }() var answer ultraviolet.ProcessAnswer var err error select { case answer = <-answerCh: t.Log("worker has successfully responded") case err = <-errCh: if !tc.shouldReturnError { t.Fatalf("didnt expect an error but got: %v", err) } case <-time.After(defaultChTimeout): t.Fatal("timed out") } if err == nil && tc.shouldReturnError { t.Fatal("expected an error but got nothing") } if tc.matchStatus && !samePK(statusPacket, answer.Response()) { t.Error("received different packet than we expected!") t.Logf("expected: %v", statusPacket) t.Logf("received: %v", answer.Response()) } }) } }) }
explode_data.jsonl/45423
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2588 }
[ 2830, 3393, 2522, 8233, 1155, 353, 8840, 836, 8, 341, 197, 17014, 5637, 1669, 220, 22, 20, 20, 198, 197, 90396, 1669, 882, 75770, 198, 23847, 16679, 1669, 19223, 24252, 2522, 515, 197, 21297, 25, 286, 330, 360, 376, 84211, 756, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestRefreshTokenRenewWrongIssueTimed(t *testing.T) { s := newTestService(t) ctx := context.Background() clientID := datastore.NameKey(kindClient, newRandomID(), nil) rt := &hubauth.RefreshToken{ ClientID: clientID.Encode(), CodeID: datastore.NameKey(kindCode, newRandomID(), clientID).Encode(), UserID: "foo@example.com", IssueTime: time.Now().Add(-5 * time.Minute), ExpiryTime: time.Now().Add(time.Minute), } id, err := s.CreateRefreshToken(ctx, rt) require.NoError(t, err) now := time.Now() _, err = s.RenewRefreshToken(ctx, rt.ClientID, id, now, now) require.Truef(t, errors.Is(err, hubauth.ErrRefreshTokenVersionMismatch), "wrong err %v", err) _, err = s.GetRefreshToken(ctx, id) require.Truef(t, errors.Is(err, hubauth.ErrNotFound), "wrong err %v", err) }
explode_data.jsonl/56427
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 325 }
[ 2830, 3393, 14567, 3323, 34625, 365, 29185, 42006, 20217, 291, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 501, 2271, 1860, 1155, 340, 20985, 1669, 2266, 19047, 2822, 25291, 915, 1669, 64986, 2967, 1592, 62697, 2959, 11, 501, 13999, 915, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCreateJob_ThroughWorkflowSpec(t *testing.T) { store, _, job := initWithJob(t) defer store.Close() expectedJob := &model.Job{ UUID: "123", DisplayName: "j1", Name: "j1", Namespace: "default", Enabled: true, CreatedAtInSec: 2, UpdatedAtInSec: 2, Conditions: "NO_STATUS", PipelineSpec: model.PipelineSpec{ WorkflowSpecManifest: testWorkflow.ToStringForStore(), }, ResourceReferences: []*model.ResourceReference{ { ResourceUUID: "123", ResourceType: common.Job, ReferenceUUID: DefaultFakeUUID, ReferenceType: common.Experiment, Relationship: common.Owner, }, }, } assert.Equal(t, expectedJob, job) }
explode_data.jsonl/28375
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 314 }
[ 2830, 3393, 4021, 12245, 62, 23857, 62768, 8327, 1155, 353, 8840, 836, 8, 341, 57279, 11, 8358, 2618, 1669, 13864, 12245, 1155, 340, 16867, 3553, 10421, 741, 42400, 12245, 1669, 609, 2528, 45293, 515, 197, 15980, 6463, 25, 1843, 330, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAllGrpcClientSettings(t *testing.T) { tt, err := obsreporttest.SetupTelemetry() require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) gcs := &GRPCClientSettings{ Headers: map[string]string{ "test": "test", }, Endpoint: "localhost:1234", Compression: "gzip", TLSSetting: configtls.TLSClientSetting{ Insecure: false, }, Keepalive: &KeepaliveClientConfig{ Time: time.Second, Timeout: time.Second, PermitWithoutStream: true, }, ReadBufferSize: 1024, WriteBufferSize: 1024, WaitForReady: true, BalancerName: "round_robin", Auth: &configauth.Authentication{AuthenticatorID: config.NewComponentID("testauth")}, } host := &mockHost{ ext: map[config.ComponentID]component.Extension{ config.NewComponentID("testauth"): &configauth.MockClientAuthenticator{}, }, } opts, err := gcs.ToDialOptions(host, tt.TelemetrySettings) assert.NoError(t, err) assert.Len(t, opts, 9) }
explode_data.jsonl/80324
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 430 }
[ 2830, 3393, 2403, 6464, 3992, 2959, 6086, 1155, 353, 8840, 836, 8, 341, 3244, 83, 11, 1848, 1669, 7448, 11736, 1944, 39820, 6639, 35958, 741, 17957, 35699, 1155, 11, 1848, 340, 3244, 727, 60639, 18552, 368, 314, 1373, 35699, 1155, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestThresholdBreakerResets(t *testing.T) { called := 0 success := false circuit := func() error { if called == 0 { called++ return fmt.Errorf("error") } success = true return nil } c := clock.NewMock() cb := NewThresholdBreaker(1) cb.Clock = c err := cb.Call(circuit, 0) if err == nil { t.Fatal("Expected cb to return an error") } c.Add(cb.nextBackOff + 1) for i := 0; i < 4; i++ { err = cb.Call(circuit, 0) if err != nil { t.Fatal("Expected cb to be successful") } if !success { t.Fatal("Expected cb to have been reset") } } }
explode_data.jsonl/60804
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 255 }
[ 2830, 3393, 37841, 22524, 261, 1061, 1415, 1155, 353, 8840, 836, 8, 341, 1444, 4736, 1669, 220, 15, 198, 30553, 1669, 895, 198, 1444, 37268, 1669, 2915, 368, 1465, 341, 197, 743, 2598, 621, 220, 15, 341, 298, 1444, 4736, 22940, 298, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestOnClusterSetAdd(t *testing.T) { cases := []struct { name string obj interface{} initObjs []runtime.Object queuedKeys []string }{ { name: "invalid object type", obj: "invalid object type", }, { name: "clusterset", obj: testinghelpers.NewClusterSet("clusterset1"), initObjs: []runtime.Object{ testinghelpers.NewClusterSetBinding("ns1", "clusterset1"), testinghelpers.NewPlacement("ns1", "placement1").Build(), }, queuedKeys: []string{ "ns1/placement1", }, }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { clusterClient := clusterfake.NewSimpleClientset(c.initObjs...) clusterInformerFactory := testinghelpers.NewClusterInformerFactory(clusterClient, c.initObjs...) queuedKeys := sets.NewString() handler := &clusterSetEventHandler{ clusterSetBindingLister: clusterInformerFactory.Cluster().V1beta1().ManagedClusterSetBindings().Lister(), placementLister: clusterInformerFactory.Cluster().V1alpha1().Placements().Lister(), enqueuePlacementFunc: func(namespace, name string) { queuedKeys.Insert(fmt.Sprintf("%s/%s", namespace, name)) }, } handler.OnAdd(c.obj) expectedQueuedKeys := sets.NewString(c.queuedKeys...) if !queuedKeys.Equal(expectedQueuedKeys) { t.Errorf("expected queued placements %q, but got %s", strings.Join(expectedQueuedKeys.List(), ","), strings.Join(queuedKeys.List(), ",")) } }) } }
explode_data.jsonl/45617
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 601 }
[ 2830, 3393, 1925, 28678, 1649, 2212, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 3056, 1235, 341, 197, 11609, 981, 914, 198, 197, 22671, 286, 3749, 16094, 197, 28248, 4121, 2519, 256, 3056, 22255, 8348, 198, 197, 197, 66547, 8850, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCheckDuplicates(t *testing.T) { g := goblin.Goblin(t) g.Describe("Check duplicates", func() { var arr []int g.BeforeEach(func() { arr = []int{1, 2, 3, 4, 2, 11, 27, 1, 65, 65, 3} }) g.It("Should return expected map of duplicates", func() { dups := s.CheckDuplicates(arr) g.Assert(dups[1]).IsTrue() g.Assert(dups[2]).IsTrue() g.Assert(dups[3]).IsTrue() g.Assert(dups[65]).IsTrue() g.Assert(dups[27]).IsFalse() }) }) }
explode_data.jsonl/82304
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 221 }
[ 2830, 3393, 3973, 76851, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 342, 47061, 1224, 47061, 1155, 692, 3174, 23548, 3114, 445, 3973, 42328, 497, 2915, 368, 1476, 197, 2405, 2890, 3056, 396, 271, 197, 3174, 31153, 4854, 18552, 368, 341, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAccAWSDBInstance_basic(t *testing.T) { var v rds.DBInstance resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSDBInstanceDestroy, Steps: []resource.TestStep{ { Config: testAccAWSDBInstanceConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), testAccCheckAWSDBInstanceAttributes(&v), resource.TestCheckResourceAttr( "aws_db_instance.bar", "allocated_storage", "10"), resource.TestCheckResourceAttr( "aws_db_instance.bar", "engine", "mysql"), resource.TestCheckResourceAttr( "aws_db_instance.bar", "license_model", "general-public-license"), resource.TestCheckResourceAttr( "aws_db_instance.bar", "instance_class", "db.t1.micro"), resource.TestCheckResourceAttr( "aws_db_instance.bar", "name", "baz"), resource.TestCheckResourceAttr( "aws_db_instance.bar", "username", "foo"), resource.TestCheckResourceAttr( "aws_db_instance.bar", "parameter_group_name", "default.mysql5.6"), resource.TestCheckResourceAttrSet("aws_db_instance.bar", "hosted_zone_id"), ), }, }, }) }
explode_data.jsonl/33915
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 513 }
[ 2830, 3393, 14603, 36136, 3506, 2523, 34729, 1155, 353, 8840, 836, 8, 341, 2405, 348, 435, 5356, 22537, 2523, 271, 50346, 8787, 1155, 11, 5101, 31363, 515, 197, 197, 4703, 3973, 25, 257, 2915, 368, 314, 1273, 14603, 4703, 3973, 1155, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTopologyBuilder_BuildIgnoresNamespaces(t *testing.T) { selectorAppA := map[string]string{"app": "app-a"} selectorAppB := map[string]string{"app": "app-b"} annotations := map[string]string{ "maesh.containo.us/traffic-type": "http", "maesh.containo.us/ratelimit-average": "100", "maesh.containo.us/ratelimit-burst": "200", } svcbPorts := []corev1.ServicePort{svcPort("port-8080", 8080, 8080)} svccPorts := []corev1.ServicePort{svcPort("port-9091", 9091, 9091)} svcdPorts := []corev1.ServicePort{svcPort("port-9092", 9092, 9092)} saA := createServiceAccount("ignored-ns", "service-account-a") podA := createPod("ignored-ns", "app-a", saA, selectorAppA, "10.10.1.1") saB := createServiceAccount("ignored-ns", "service-account-b") svcB := createService("ignored-ns", "svc-b", annotations, svcbPorts, selectorAppB, "10.10.1.16") podB := createPod("ignored-ns", "app-b", saB, svcB.Spec.Selector, "10.10.2.1") svcC := createService("ignored-ns", "svc-c", annotations, svccPorts, selectorAppA, "10.10.1.17") svcD := createService("ignored-ns", "svc-d", annotations, svcdPorts, selectorAppA, "10.10.1.18") apiMatch := createHTTPMatch("api", []string{"GET", "POST"}, "/api") metricMatch := createHTTPMatch("metric", []string{"GET"}, "/metric") rtGrp := createHTTPRouteGroup("ignored-ns", "http-rt-grp-ignored", []spec.HTTPMatch{apiMatch, metricMatch}) tt := createTrafficTarget("ignored-ns", "tt", saB, "8080", []*corev1.ServiceAccount{saA}, rtGrp, []string{}) ts := createTrafficSplit("ignored-ns", "ts", svcB, svcC, svcD) k8sClient := fake.NewSimpleClientset(saA, podA, saB, svcB, podB, svcC, svcD) smiAccessClient := accessfake.NewSimpleClientset(tt) smiSplitClient := splitfake.NewSimpleClientset(ts) smiSpecClient := specfake.NewSimpleClientset(rtGrp) builder, err := createBuilder(k8sClient, smiAccessClient, smiSpecClient, smiSplitClient) require.NoError(t, err) ignoredResources := mk8s.NewIgnored() ignoredResources.AddIgnoredNamespace("ignored-ns") got, err := builder.Build(ignoredResources) require.NoError(t, err) want := &topology.Topology{ Services: make(map[topology.Key]*topology.Service), Pods: make(map[topology.Key]*topology.Pod), ServiceTrafficTargets: make(map[topology.ServiceTrafficTargetKey]*topology.ServiceTrafficTarget), TrafficSplits: make(map[topology.Key]*topology.TrafficSplit), } assert.Equal(t, want, got) }
explode_data.jsonl/48673
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 993 }
[ 2830, 3393, 60954, 3297, 96686, 40, 70, 2152, 416, 7980, 27338, 1155, 353, 8840, 836, 8, 341, 197, 8925, 2164, 32, 1669, 2415, 14032, 30953, 4913, 676, 788, 330, 676, 7409, 16707, 197, 8925, 2164, 33, 1669, 2415, 14032, 30953, 4913, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetBestLocalAddress(t *testing.T) { localAddrs := []wire.NetAddress{ {IP: net.ParseIP("192.168.0.100")}, {IP: net.ParseIP("::1")}, {IP: net.ParseIP("fe80::1")}, {IP: net.ParseIP("2001:470::1")}, } var tests = []struct { remoteAddr wire.NetAddress want0 wire.NetAddress want1 wire.NetAddress want2 wire.NetAddress want3 wire.NetAddress }{ { // Remote connection from public IPv4 wire.NetAddress{IP: net.ParseIP("204.124.8.1")}, wire.NetAddress{IP: net.IPv4zero}, wire.NetAddress{IP: net.IPv4zero}, wire.NetAddress{IP: net.ParseIP("204.124.8.100")}, wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")}, }, { // Remote connection from private IPv4 wire.NetAddress{IP: net.ParseIP("172.16.0.254")}, wire.NetAddress{IP: net.IPv4zero}, wire.NetAddress{IP: net.IPv4zero}, wire.NetAddress{IP: net.IPv4zero}, wire.NetAddress{IP: net.IPv4zero}, }, { // Remote connection from public IPv6 wire.NetAddress{IP: net.ParseIP("2602:100:abcd::102")}, wire.NetAddress{IP: net.IPv6zero}, wire.NetAddress{IP: net.ParseIP("2001:470::1")}, wire.NetAddress{IP: net.ParseIP("2001:470::1")}, wire.NetAddress{IP: net.ParseIP("2001:470::1")}, }, /* XXX { // Remote connection from Tor wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43::100")}, wire.NetAddress{IP: net.IPv4zero}, wire.NetAddress{IP: net.ParseIP("204.124.8.100")}, wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")}, }, */ } amgr := New("testgetbestlocaladdress", nil) // Test against default when there's no address for x, test := range tests { got := amgr.GetBestLocalAddress(&test.remoteAddr) if !test.want0.IP.Equal(got.IP) { t.Errorf("TestGetBestLocalAddress test1 #%d failed for remote address %s: want %s got %s", x, test.remoteAddr.IP, test.want1.IP, got.IP) continue } } for _, localAddr := range localAddrs { amgr.AddLocalAddress(&localAddr, InterfacePrio) } // Test against want1 for x, test := range tests { got := amgr.GetBestLocalAddress(&test.remoteAddr) if !test.want1.IP.Equal(got.IP) { t.Errorf("TestGetBestLocalAddress test1 #%d failed for remote address %s: want %s got %s", x, test.remoteAddr.IP, test.want1.IP, got.IP) continue } } // Add a public IP to the list of local addresses. localAddr := wire.NetAddress{IP: net.ParseIP("204.124.8.100")} amgr.AddLocalAddress(&localAddr, InterfacePrio) // Test against want2 for x, test := range tests { got := amgr.GetBestLocalAddress(&test.remoteAddr) if !test.want2.IP.Equal(got.IP) { t.Errorf("TestGetBestLocalAddress test2 #%d failed for remote address %s: want %s got %s", x, test.remoteAddr.IP, test.want2.IP, got.IP) continue } } /* // Add a Tor generated IP address localAddr = wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")} amgr.AddLocalAddress(&localAddr, ManualPrio) // Test against want3 for x, test := range tests { got := amgr.GetBestLocalAddress(&test.remoteAddr) if !test.want3.IP.Equal(got.IP) { t.Errorf("TestGetBestLocalAddress test3 #%d failed for remote address %s: want %s got %s", x, test.remoteAddr.IP, test.want3.IP, got.IP) continue } } */ }
explode_data.jsonl/26483
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1410 }
[ 2830, 3393, 1949, 14470, 7319, 4286, 1155, 353, 8840, 836, 8, 341, 8854, 2212, 5428, 1669, 3056, 35531, 16993, 4286, 515, 197, 197, 90, 3298, 25, 4179, 8937, 3298, 445, 16, 24, 17, 13, 16, 21, 23, 13, 15, 13, 16, 15, 15, 79583, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestSignAndVerify(t *testing.T) { priv := PrivateKey{ PublicKey: PublicKey{ Parameters: Parameters{ P: fromHex("A9B5B793FB4785793D246BAE77E8FF63CA52F442DA763C440259919FE1BC1D6065A9350637A04F75A2F039401D49F08E066C4D275A5A65DA5684BC563C14289D7AB8A67163BFBF79D85972619AD2CFF55AB0EE77A9002B0EF96293BDD0F42685EBB2C66C327079F6C98000FBCB79AACDE1BC6F9D5C7B1A97E3D9D54ED7951FEF"), Q: fromHex("E1D3391245933D68A0714ED34BBCB7A1F422B9C1"), G: fromHex("634364FC25248933D01D1993ECABD0657CC0CB2CEED7ED2E3E8AECDFCDC4A25C3B15E9E3B163ACA2984B5539181F3EFF1A5E8903D71D5B95DA4F27202B77D2C44B430BB53741A8D59A8F86887525C9F2A6A5980A195EAA7F2FF910064301DEF89D3AA213E1FAC7768D89365318E370AF54A112EFBA9246D9158386BA1B4EEFDA"), }, Y: fromHex("32969E5780CFE1C849A1C276D7AEB4F38A23B591739AA2FE197349AEEBD31366AEE5EB7E6C6DDB7C57D02432B30DB5AA66D9884299FAA72568944E4EEDC92EA3FBC6F39F53412FBCC563208F7C15B737AC8910DBC2D9C9B8C001E72FDC40EB694AB1F06A5A2DBD18D9E36C66F31F566742F11EC0A52E9F7B89355C02FB5D32D2"), }, X: fromHexNat("5078D4D29795CBE76D3AACFE48C9AF0BCDBEE91A"), } testSignAndVerify(t, 0, &priv) }
explode_data.jsonl/23308
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 653 }
[ 2830, 3393, 7264, 3036, 32627, 1155, 353, 8840, 836, 8, 341, 71170, 1669, 9679, 1592, 515, 197, 73146, 1592, 25, 70280, 515, 298, 197, 9706, 25, 13522, 515, 571, 10025, 25, 504, 20335, 445, 32, 24, 33, 20, 33, 22, 24, 18, 16208, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRead_single(t *testing.T) { e, err := Read(strings.NewReader(testSingleText)) if err != nil { t.Fatalf("Read() = %v", err) } b, err := ioutil.ReadAll(e.Body) if err != nil { t.Fatalf("ioutil.ReadAll() = %v", err) } expected := "Message body" if string(b) != expected { t.Fatalf("Expected body to be %q, got %q", expected, string(b)) } }
explode_data.jsonl/30551
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 153 }
[ 2830, 3393, 4418, 19487, 1155, 353, 8840, 836, 8, 341, 7727, 11, 1848, 1669, 4457, 51442, 68587, 8623, 10888, 1178, 1171, 743, 1848, 961, 2092, 341, 197, 3244, 30762, 445, 4418, 368, 284, 1018, 85, 497, 1848, 340, 197, 630, 2233, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestStoreBolt(t *testing.T) { tmp := path.Join(os.TempDir(), "drandtest") require.NoError(t, os.MkdirAll(tmp, 0755)) path := tmp defer os.RemoveAll(tmp) var sig1 = []byte{0x01, 0x02, 0x03} var sig2 = []byte{0x02, 0x03, 0x04} store, err := NewBoltStore(path, nil) require.NoError(t, err) require.Equal(t, 0, store.Len()) b1 := &Beacon{ PreviousSig: sig1, Round: 145, Signature: sig2, } b2 := &Beacon{ PreviousSig: sig2, Round: 146, Signature: sig1, } require.NoError(t, store.Put(b1)) require.Equal(t, 1, store.Len()) require.NoError(t, store.Put(b1)) require.Equal(t, 1, store.Len()) require.NoError(t, store.Put(b2)) require.Equal(t, 2, store.Len()) received, err := store.Last() require.NoError(t, err) require.Equal(t, b2, received) store.Close() store, err = NewBoltStore(path, nil) require.NoError(t, err) require.NoError(t, store.Put(b1)) doneCh := make(chan bool) callback := func(b *Beacon) { require.Equal(t, b1, b) doneCh <- true } store = NewCallbackStore(store, callback) go store.Put(b1) select { case <-doneCh: return case <-time.After(50 * time.Millisecond): t.Fail() } store, err = NewBoltStore(path, nil) require.NoError(t, err) store.Put(b1) store.Put(b2) store.Cursor(func(c Cursor) { expecteds := []*Beacon{b1, b2} i := 0 for b := c.First(); b != nil; b = c.Next() { require.True(t, expecteds[i].Equal(b)) i++ } }) }
explode_data.jsonl/60225
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 667 }
[ 2830, 3393, 6093, 33, 6181, 1155, 353, 8840, 836, 8, 341, 20082, 1669, 1815, 22363, 9638, 65009, 6184, 1507, 330, 3612, 437, 1944, 1138, 17957, 35699, 1155, 11, 2643, 1321, 12438, 2403, 10368, 11, 220, 15, 22, 20, 20, 1171, 26781, 166...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_FindProcessInstanceById_ComfortFunction_ReturnsNilIfNoInstanceFound(t *testing.T) { engine := New("name") instanceInfo := engine.FindProcessInstanceById(1234) then.AssertThat(t, instanceInfo, is.Nil()) }
explode_data.jsonl/71754
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 70 }
[ 2830, 3393, 95245, 7423, 2523, 2720, 16946, 3969, 5152, 53316, 82, 19064, 2679, 2753, 2523, 6650, 1155, 353, 8840, 836, 8, 341, 80118, 1669, 1532, 445, 606, 1138, 56256, 1731, 1669, 4712, 9998, 7423, 2523, 2720, 7, 16, 17, 18, 19, 340...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestSliceProducesElementsOfSpecifiedSliceOnePerLine(t *testing.T) { t.Parallel() want := "1\n2\n3\n" got, err := script.Slice([]string{"1", "2", "3"}).String() if err != nil { t.Fatal(err) } if !cmp.Equal(want, got) { t.Error(cmp.Diff(want, got)) } }
explode_data.jsonl/51516
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 125 }
[ 2830, 3393, 33236, 49112, 11868, 2124, 8327, 1870, 33236, 3966, 3889, 2460, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 50780, 1669, 330, 16, 1699, 17, 1699, 18, 1699, 698, 3174, 354, 11, 1848, 1669, 5316, 95495, 10556, 917, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestLog_RemoveService_200(t *testing.T) { // setup context gin.SetMode(gin.TestMode) s := httptest.NewServer(server.FakeHandler()) c, _ := NewClient(s.URL, "", nil) // run test _, resp, err := c.Log.RemoveService("github", "octocat", 1, 1) if err != nil { t.Errorf("New returned err: %v", err) } if resp.StatusCode != http.StatusOK { t.Errorf("Log returned %v, want %v", resp.StatusCode, http.StatusOK) } }
explode_data.jsonl/8764
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 169 }
[ 2830, 3393, 2201, 66843, 1860, 62, 17, 15, 15, 1155, 353, 8840, 836, 8, 341, 197, 322, 6505, 2266, 198, 3174, 258, 4202, 3636, 3268, 258, 8787, 3636, 692, 1903, 1669, 54320, 70334, 7121, 5475, 21421, 991, 726, 3050, 2398, 1444, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestExpandPublicNiftyModifyAddressAttributeInput(t *testing.T) { rd := schema.TestResourceDataRaw(t, newSchema(), map[string]interface{}{ "ip_type": false, "public_ip": "192.0.2.1", "description": "test_description", }) rd.SetId("192.0.2.1") tests := []struct { name string args *schema.ResourceData want *computing.NiftyModifyAddressAttributeInput }{ { name: "expands the resource data", args: rd, want: &computing.NiftyModifyAddressAttributeInput{ PublicIp: nifcloud.String("192.0.2.1"), Attribute: "description", Value: nifcloud.String("test_description"), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := expandNiftyModifyAddressAttributeInput(tt.args) assert.Equal(t, tt.want, got) }) } }
explode_data.jsonl/35515
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 334 }
[ 2830, 3393, 38946, 12676, 45, 38624, 44427, 4286, 3907, 2505, 1155, 353, 8840, 836, 8, 341, 92356, 1669, 10802, 8787, 4783, 1043, 20015, 1155, 11, 501, 8632, 1507, 2415, 14032, 31344, 67066, 197, 197, 1, 573, 1819, 788, 257, 895, 345, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLoadMultipleNetworks(t *testing.T) { base := map[string]interface{}{ "services": map[string]interface{}{ "foo": map[string]interface{}{ "image": "baz", }, }, "volumes": map[string]interface{}{}, "networks": map[string]interface{}{ "hostnet": map[string]interface{}{ "driver": "overlay", "ipam": map[string]interface{}{ "driver": "default", "config": []interface{}{ map[string]interface{}{ "subnet": "10.0.0.0/20", }, }, }, }, }, "secrets": map[string]interface{}{}, "configs": map[string]interface{}{}, } override := map[string]interface{}{ "services": map[string]interface{}{}, "volumes": map[string]interface{}{}, "networks": map[string]interface{}{ "hostnet": map[string]interface{}{ "external": map[string]interface{}{ "name": "host", }, }, }, "secrets": map[string]interface{}{}, "configs": map[string]interface{}{}, } configDetails := types.ConfigDetails{ ConfigFiles: []types.ConfigFile{ {Filename: "base.yml", Config: base}, {Filename: "override.yml", Config: override}, }, } config, err := loadTestProject(configDetails) assert.NilError(t, err) assert.DeepEqual(t, &types.Project{ Name: "", WorkingDir: "", Services: []types.ServiceConfig{ { Name: "foo", Image: "baz", Environment: types.MappingWithEquals{}, Scale: 1, }}, Networks: map[string]types.NetworkConfig{ "hostnet": { Name: "host", External: types.External{ External: true, }, }, }, Volumes: types.Volumes{}, Secrets: types.Secrets{}, Configs: types.Configs{}, Extensions: types.Extensions{}, }, config) }
explode_data.jsonl/59636
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 762 }
[ 2830, 3393, 5879, 32089, 12320, 82, 1155, 353, 8840, 836, 8, 341, 24195, 1669, 2415, 14032, 31344, 67066, 197, 197, 1, 12779, 788, 2415, 14032, 31344, 67066, 298, 197, 1, 7975, 788, 2415, 14032, 31344, 67066, 571, 197, 1, 1805, 788, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMInsert(t *testing.T) { animals := map[string]interface{}{ "elephant": Animal{"elephant"}, "monkey": Animal{"monkey"}, } m := NewSharedMap() m.MStore(animals) count := 0 m.Range(func(key, value interface{}) bool { count++ return true }) if count != 2 { t.Error("map should contain exactly two elements.") } }
explode_data.jsonl/77837
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 130 }
[ 2830, 3393, 44, 13780, 1155, 353, 8840, 836, 8, 341, 197, 76796, 1669, 2415, 14032, 31344, 67066, 197, 197, 1, 10068, 26924, 788, 21292, 4913, 10068, 26924, 7115, 197, 197, 1, 96016, 788, 256, 21292, 4913, 96016, 7115, 197, 532, 2109, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidatingRefreshable_SubscriptionRaceCondition(t *testing.T) { r := &updateImmediatelyRefreshable{r: refreshable.NewDefaultRefreshable(1), newValue: 2} vr, err := refreshable.NewValidatingRefreshable(r, func(i interface{}) error { return nil }) require.NoError(t, err) // If this returns 1, it is likely because the VR contains a stale value assert.Equal(t, 2, vr.Current()) }
explode_data.jsonl/30066
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 126 }
[ 2830, 3393, 4088, 1095, 14567, 480, 36359, 12124, 55991, 10547, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 609, 2386, 95693, 14567, 480, 90, 81, 25, 10408, 480, 7121, 3675, 14567, 480, 7, 16, 701, 24174, 25, 220, 17, 532, 5195, 81, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEncoder_Encode_symetric(t *testing.T) { symetricEncode := func(s string) func(t *testing.T) { return func(t *testing.T) { d := Decoder{} run, err := d.Decode(strings.NewReader(s)) require.NoError(t, err) e := Encoder{} var buf bytes.Buffer require.NoError(t, e.Encode(&buf, run)) require.Equal(t, s, buf.String()) } } t.Run("case=empty", symetricEncode("")) t.Run("case=readme", symetricEncode(`commit: 7cd9055 BenchmarkDecode/text=digits/level=speed/size=1e4-8 100 154125 ns/op 64.88 MB/s 40418 B/op 7 allocs/op `)) t.Run("case=nokeys", symetricEncode(`BenchmarkDecode/text=digits/level=speed/size=1e4-8 100 154125 ns/op 64.88 MB/s 40418 B/op 7 allocs/op `)) t.Run("case=changekeys", symetricEncode(`commit: 7cd9055 BenchmarkDecode/text=digits/level=speed/size=1e4-8 100 154125 ns/op 64.88 MB/s 40418 B/op 7 allocs/op commit: 7cd9056 BenchmarkDecode/text=digits/level=speed/size=1e4-8 100 154125 ns/op 64.88 MB/s 40418 B/op 8 allocs/op `)) }
explode_data.jsonl/39871
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 436 }
[ 2830, 3393, 19921, 93529, 534, 26825, 16340, 1155, 353, 8840, 836, 8, 341, 1903, 1600, 16340, 32535, 1669, 2915, 1141, 914, 8, 2915, 1155, 353, 8840, 836, 8, 341, 197, 853, 2915, 1155, 353, 8840, 836, 8, 341, 298, 2698, 1669, 50472, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestClient_UpdateAggregator(t *testing.T) { ctx := context.Background() asUser := userClient(t) withToken := withToken(t, asUser) created, err := withToken.CreateAggregator(ctx, types.CreateAggregator{ Name: "test-aggregator", AddHealthCheckPipeline: true, }) wantEqual(t, err, nil) err = withToken.UpdateAggregator(ctx, created.ID, types.UpdateAggregator{ Name: ptrStr("test-aggregator-updated"), }) wantEqual(t, err, nil) t.Run("version", func(t *testing.T) { err = withToken.UpdateAggregator(ctx, created.ID, types.UpdateAggregator{ Version: ptrStr(types.DefaultAggregatorVersion), }) wantEqual(t, err, nil) err = withToken.UpdateAggregator(ctx, created.ID, types.UpdateAggregator{ Version: ptrStr("non-semver-version"), }) wantErrMsg(t, err, "invalid aggregator version") err = withToken.UpdateAggregator(ctx, created.ID, types.UpdateAggregator{ Version: ptrStr(""), }) wantErrMsg(t, err, "invalid aggregator version") }) }
explode_data.jsonl/30419
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 389 }
[ 2830, 3393, 2959, 47393, 9042, 58131, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 741, 60451, 1474, 1669, 1196, 2959, 1155, 340, 46948, 3323, 1669, 448, 3323, 1155, 11, 438, 1474, 692, 197, 7120, 11, 1848, 1669, 448, 3323, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidPOSTURL(t *testing.T) { wfe, _ := setupWFE(t) // A JWS and HTTP request with no extra headers noHeadersJWS, noHeadersJWSBody := signExtraHeaders(t, nil, wfe.nonceService) noHeadersRequest := makePostRequestWithPath("test-path", noHeadersJWSBody) // A JWS and HTTP request with extra headers, but no "url" extra header noURLHeaders := map[jose.HeaderKey]interface{}{ "nifty": "swell", } noURLHeaderJWS, noURLHeaderJWSBody := signExtraHeaders(t, noURLHeaders, wfe.nonceService) noURLHeaderRequest := makePostRequestWithPath("test-path", noURLHeaderJWSBody) // A JWS and HTTP request with a mismatched HTTP URL to JWS "url" header wrongURLHeaders := map[jose.HeaderKey]interface{}{ "url": "foobar", } wrongURLHeaderJWS, wrongURLHeaderJWSBody := signExtraHeaders(t, wrongURLHeaders, wfe.nonceService) wrongURLHeaderRequest := makePostRequestWithPath("test-path", wrongURLHeaderJWSBody) correctURLHeaderJWS, _, correctURLHeaderJWSBody := signRequestEmbed(t, nil, "http://localhost/test-path", "", wfe.nonceService) correctURLHeaderRequest := makePostRequestWithPath("test-path", correctURLHeaderJWSBody) testCases := []struct { Name string JWS *jose.JSONWebSignature Request *http.Request ExpectedResult *probs.ProblemDetails ErrorStatType string }{ { Name: "No extra headers in JWS", JWS: noHeadersJWS, Request: noHeadersRequest, ExpectedResult: &probs.ProblemDetails{ Type: probs.MalformedProblem, Detail: "JWS header parameter 'url' required", HTTPStatus: http.StatusBadRequest, }, ErrorStatType: "JWSNoExtraHeaders", }, { Name: "No URL header in JWS", JWS: noURLHeaderJWS, Request: noURLHeaderRequest, ExpectedResult: &probs.ProblemDetails{ Type: probs.MalformedProblem, Detail: "JWS header parameter 'url' required", HTTPStatus: http.StatusBadRequest, }, ErrorStatType: "JWSMissingURL", }, { Name: "Wrong URL header in JWS", JWS: wrongURLHeaderJWS, Request: wrongURLHeaderRequest, ExpectedResult: &probs.ProblemDetails{ Type: probs.MalformedProblem, Detail: "JWS header parameter 'url' incorrect. Expected \"http://localhost/test-path\" got \"foobar\"", HTTPStatus: http.StatusBadRequest, }, ErrorStatType: "JWSMismatchedURL", }, { Name: "Correct URL header in JWS", JWS: correctURLHeaderJWS, Request: correctURLHeaderRequest, ExpectedResult: nil, }, } for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { wfe.stats.joseErrorCount.Reset() prob := wfe.validPOSTURL(tc.Request, tc.JWS) if tc.ExpectedResult == nil && prob != nil { t.Fatal(fmt.Sprintf("Expected nil result, got %#v", prob)) } else { test.AssertMarshaledEquals(t, prob, tc.ExpectedResult) } if tc.ErrorStatType != "" { test.AssertEquals(t, test.CountCounterVec( "type", tc.ErrorStatType, wfe.stats.joseErrorCount), 1) } }) } }
explode_data.jsonl/15352
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1245 }
[ 2830, 3393, 4088, 2946, 3144, 1155, 353, 8840, 836, 8, 341, 6692, 1859, 11, 716, 1669, 6505, 54, 11419, 1155, 692, 197, 322, 362, 619, 7433, 323, 10130, 1681, 448, 902, 4960, 7102, 198, 72104, 10574, 41, 7433, 11, 902, 10574, 41, 74...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestCompileShouldSucceedWithValidRules(t *testing.T) { rules := []ProcessingRule{{Pattern: "[[:alnum:]]{5}", Type: IncludeAtMatch}} config := &LogsConfig{ProcessingRules: rules} err := config.Compile() assert.Nil(t, err) assert.NotNil(t, rules[0].Reg) assert.True(t, rules[0].Reg.MatchString("abcde")) }
explode_data.jsonl/41173
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 120 }
[ 2830, 3393, 46126, 14996, 50, 29264, 2354, 4088, 26008, 1155, 353, 8840, 836, 8, 341, 7000, 2425, 1669, 3056, 28892, 11337, 2979, 15760, 25, 10545, 3447, 93362, 25, 5053, 90, 20, 9545, 3990, 25, 29734, 1655, 8331, 11248, 25873, 1669, 60...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNodeID_distcmpEqual(t *testing.T) { base := common.Hash{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} x := common.Hash{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0} if distcmp(base, x, x) != 0 { t.Errorf("distcmp(base, x, x) != 0") } }
explode_data.jsonl/49020
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 137 }
[ 2830, 3393, 1955, 915, 16031, 7293, 2993, 1155, 353, 8840, 836, 8, 341, 24195, 1669, 4185, 15103, 90, 15, 11, 220, 16, 11, 220, 17, 11, 220, 18, 11, 220, 19, 11, 220, 20, 11, 220, 21, 11, 220, 22, 11, 220, 23, 11, 220, 24, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMemoryOverheadStats(t *testing.T) { if testing.Short() { return } var ( shardNumbers = []int{1, 32, 256} quantities = []int{1000, 10 * 1000, 100 * 1000, 1000 * 1000} valueSizes = []int{32, 512, 2048, 8192} expirations = []time.Duration{0, time.Hour} ) for _, expiration := range expirations { for _, shardNumber := range shardNumbers { for _, quantity := range quantities { for _, valueSize := range valueSizes { var ( c, _ = New(&Config{ ShardNumber: shardNumber, CleanInterval: time.Hour, }) memStats runtime.MemStats ) runtime.ReadMemStats(&memStats) before := memStats.Alloc for i := 0; i < quantity; i++ { c.ESet(getStr(16), make([]byte, valueSize), expiration) } runtime.ReadMemStats(&memStats) after := memStats.Alloc c.Close() runtime.GC() // NOTE: Can't skip this op. total, payload := after-before, uint64(quantity*(16+valueSize)) t.Logf("expiration(%v) shard-number(%d) quantity(%d) value-size(%s) total(%s) payload(%s) overhead(%s) ratio(%.2f%%)\n", expiration != 0, shardNumber, quantity, sizeReadable(uint64(valueSize)), sizeReadable(total), sizeReadable(payload), sizeReadable(total-payload), float64(payload)/float64(total)*100, ) } } } } }
explode_data.jsonl/5402
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 579 }
[ 2830, 3393, 10642, 1918, 1983, 16635, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 853, 198, 197, 630, 2405, 2399, 197, 36196, 567, 27237, 284, 3056, 396, 90, 16, 11, 220, 18, 17, 11, 220, 17, 20, 21, 532, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestSpecIntOpt(t *testing.T) { var f *int init := func(c *Cmd) { f = c.IntOpt("f", -1, "") } spec := "-f" cases := [][]string{ {"-f42"}, {"-f", "42"}, {"-f=42"}, } for _, args := range cases { okCmd(t, spec, init, args) require.Equal(t, 42, *f) } badCases := [][]string{ {}, {"-f", "x"}, {"-g"}, {"-f", "-g"}, {"-g", "-f"}, {"-f", "xxx"}, {"xxx", "-f"}, } for _, args := range badCases { failCmd(t, spec, init, args) } }
explode_data.jsonl/23919
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 244 }
[ 2830, 3393, 8327, 1072, 21367, 1155, 353, 8840, 836, 8, 341, 2405, 282, 353, 396, 198, 28248, 1669, 2915, 1337, 353, 15613, 8, 341, 197, 1166, 284, 272, 7371, 21367, 445, 69, 497, 481, 16, 11, 14676, 197, 630, 98100, 1669, 6523, 69,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_NewMessageDispatcher(t *testing.T) { testCases := []struct { name string input api.Message expectedMessageFixture string }{ { "session event sends as expected and does not include socket ids", api.Message{ Type: "SESSION_UPDATED", Body: session.CompleteSessionView{ SessionID: "123", VotesShown: true, FacilitatorSessionKey: "123345", Facilitator: session.User{ UserID: "a", Name: "b", Handle: "c", CurrentVote: aws.String("123"), SocketID: "123", }, FacilitatorPoints: false, Participants: []session.User{ { UserID: "f", Name: "g", Handle: "h", CurrentVote: aws.String("521"), SocketID: "987", }, }, }, }, "fixture/sessionUpdate.json", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { asserter := assert.New(t) inputCtx := testutil.NewTestContext() inputConnectionID := "weeee" expectedMessageContent, err := ioutil.ReadFile(tc.expectedMessageFixture) if !asserter.NoError(err) { return } expectedError := "stuff went wrong" poster := &MockConnectionPoster{} poster.On("PostToConnectionWithContext", inputCtx, &apigatewaymanagementapi.PostToConnectionInput{ ConnectionId: aws.String(inputConnectionID), Data: expectedMessageContent, }, emptyOpts).Return(nil, errors.New(expectedError)) err = api.NewMessageDispatcher(poster)(inputCtx, inputConnectionID, tc.input) asserter.EqualError(err, expectedError) }) } }
explode_data.jsonl/19893
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 778 }
[ 2830, 3393, 39582, 2052, 21839, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 11609, 4293, 914, 198, 197, 22427, 1698, 6330, 8472, 198, 197, 42400, 2052, 18930, 914, 198, 197, 59403, 197, 197, 515, 298, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestEncodeSingleCharUnquoted(t *testing.T) { encoded := Encode("GET A") expected := "*2\r\n$3\r\nGET\r\n$1\r\nA\r\n" if encoded != expected { t.Fatal("Encode should handle one char unquoted args") } }
explode_data.jsonl/71594
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 93 }
[ 2830, 3393, 32535, 10888, 4768, 1806, 63725, 1155, 353, 8840, 836, 8, 341, 197, 19329, 1669, 56562, 445, 3806, 362, 1138, 42400, 1669, 15630, 17, 12016, 1699, 3, 18, 12016, 1699, 3806, 12016, 1699, 3, 16, 12016, 1699, 32, 12016, 1699, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestValidateIntegerInRange(t *testing.T) { validIntegers := []int{-259, 0, 1, 5, 999} min := -259 max := 999 for _, v := range validIntegers { _, errors := validateIntegerInRange(min, max)(v, "name") if len(errors) != 0 { t.Fatalf("%q should be an integer in range (%d, %d): %q", v, min, max, errors) } } invalidIntegers := []int{-260, -99999, 1000, 25678} for _, v := range invalidIntegers { _, errors := validateIntegerInRange(min, max)(v, "name") if len(errors) == 0 { t.Fatalf("%q should be an integer outside range (%d, %d)", v, min, max) } } }
explode_data.jsonl/78581
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 235 }
[ 2830, 3393, 17926, 3486, 76059, 1155, 353, 8840, 836, 8, 341, 56322, 1072, 67592, 1669, 3056, 396, 19999, 17, 20, 24, 11, 220, 15, 11, 220, 16, 11, 220, 20, 11, 220, 24, 24, 24, 532, 25320, 1669, 481, 17, 20, 24, 198, 22543, 166...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestJetStreamSubscribe_AckDup(t *testing.T) { s := RunBasicJetStreamServer() defer s.Shutdown() if config := s.JetStreamConfig(); config != nil { defer os.RemoveAll(config.StoreDir) } nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() js, err := nc.JetStream() if err != nil { t.Fatalf("Unexpected error: %v", err) } // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } js.Publish("foo", []byte("hello")) ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) defer cancel() pings := make(chan struct{}, 6) nc.Subscribe("$JS.ACK.TEST.>", func(msg *nats.Msg) { pings <- struct{}{} }) nc.Flush() ch := make(chan error, 6) _, err = js.Subscribe("foo", func(m *nats.Msg) { // Only first ack will be sent, auto ack that will occur after // this won't be sent either. ch <- m.Ack() // Any following acks should fail. ch <- m.Ack() ch <- m.Nak() ch <- m.AckSync() ch <- m.Term() ch <- m.InProgress() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } <-ctx.Done() ackErr1 := <-ch if ackErr1 != nil { t.Errorf("Unexpected error: %v", ackErr1) } for i := 0; i < 5; i++ { e := <-ch if e != nats.ErrInvalidJSAck { t.Errorf("Expected error: %v", e) } } if len(pings) != 1 { t.Logf("Expected to receive a single ack, got: %v", len(pings)) } }
explode_data.jsonl/29173
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 666 }
[ 2830, 3393, 35641, 3027, 28573, 1566, 377, 85713, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 6452, 15944, 35641, 3027, 5475, 741, 16867, 274, 10849, 18452, 2822, 743, 2193, 1669, 274, 3503, 295, 3027, 2648, 2129, 2193, 961, 2092, 341, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReaderMaxOffset(t *testing.T) { ctx := context.Background() flags.Set(t, "auth.enable_anonymous_usage", "true") te := getTestEnv(t, emptyUserMap) ctx, err := prefix.AttachUserPrefixToContext(ctx, te) if err != nil { t.Errorf("error attaching user prefix: %v", err) } peer := fmt.Sprintf("localhost:%d", app.FreePort(t)) c := cacheproxy.NewCacheProxy(te, te.GetCache(), peer) if err := c.StartListening(); err != nil { t.Fatalf("Error setting up cacheproxy: %s", err) } waitUntilServerIsAlive(peer) randomSrc := &randomDataMaker{rand.NewSource(time.Now().Unix())} // Read some random bytes. buf := new(bytes.Buffer) io.CopyN(buf, randomSrc, 100) readSeeker := bytes.NewReader(buf.Bytes()) // Compute a digest for the random bytes. d, err := digest.Compute(readSeeker) if err != nil { t.Fatal(err) } readSeeker.Seek(0, 0) instanceName := "foo" isolation := &dcpb.Isolation{ RemoteInstanceName: instanceName, CacheType: dcpb.Isolation_CAS_CACHE, } // Set the random bytes in the cache (with a prefix) cache, err := te.GetCache().WithIsolation(ctx, interfaces.CASCacheType, instanceName) require.NoError(t, err) err = cache.Set(ctx, d, buf.Bytes()) if err != nil { t.Fatal(err) } // Remote-read the random bytes back. r, err := c.RemoteReader(ctx, peer, isolation, d, d.GetSizeBytes()) if err != nil { t.Fatal(err) } d2 := testdigest.ReadDigestAndClose(t, r) emptyHash := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" if emptyHash != d2.GetHash() { t.Fatalf("Digest uploaded %q != %q downloaded", emptyHash, d2.GetHash()) } }
explode_data.jsonl/69638
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 642 }
[ 2830, 3393, 5062, 5974, 6446, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 741, 59516, 4202, 1155, 11, 330, 3242, 28697, 12008, 9757, 31507, 497, 330, 1866, 1138, 197, 665, 1669, 633, 2271, 14359, 1155, 11, 4287, 1474, 2227, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestTile38(t *testing.T) { if os.Getenv("TEST_TILE38_URI") == "" { t.Skipf("TEST_TILE38_URI not set - skipping") } for _, isTile38 := range []bool{true, false} { e, _ := NewRedisExporter(os.Getenv("TEST_TILE38_URI"), Options{Namespace: "test", IsTile38: isTile38}) chM := make(chan prometheus.Metric) go func() { e.Collect(chM) close(chM) }() found := false want := "tile38_threads_total" for m := range chM { if strings.Contains(m.Desc().String(), want) { found = true } } if isTile38 && !found { t.Errorf("%s was *not* found in tile38 metrics but expected", want) } else if !isTile38 && found { t.Errorf("%s was *found* in tile38 metrics but *not* expected", want) } } }
explode_data.jsonl/46980
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 315 }
[ 2830, 3393, 15628, 18, 23, 1155, 353, 8840, 836, 8, 341, 743, 2643, 64883, 445, 10033, 75810, 18, 23, 23116, 899, 621, 1591, 341, 197, 3244, 57776, 69, 445, 10033, 75810, 18, 23, 23116, 537, 738, 481, 42659, 1138, 197, 630, 2023, 83...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestOrdersData(t *testing.T) { if checkSkipTest(t) { return } indexesById := map[string]*Index{} initIndexer := func(indexer *Indexer) (*Indexer, errors.Error) { if indexer.IndexesById == nil { indexer.IndexesById = initIndexesById(t, map[string]*Index{ "ftsIdx": { SourceName: "orders", Parent: indexer, IdStr: "ftsIdx", NameStr: "ftsIdx", IndexMapping: &mapping.IndexMappingImpl{ DefaultAnalyzer: "keyword", DefaultDateTimeParser: "disabled", DefaultMapping: &mapping.DocumentMapping{ Enabled: true, Properties: map[string]*mapping.DocumentMapping{ "custId": { Enabled: true, Fields: []*mapping.FieldMapping{ { Name: "custId", Type: "text", Index: true, }, }, }, "orderlines": { Enabled: true, Properties: map[string]*mapping.DocumentMapping{ "productId": { Enabled: true, Fields: []*mapping.FieldMapping{ { Name: "productId", Type: "text", Index: true, }, }, }, }, }, }, }, }, }, }) for id, v := range indexer.IndexesById { indexesById[id] = v } } return indexer, nil } c := MakeWrapCallbacksForIndexType(datastore.IndexType("FTS"), initIndexer) s, err := NewServer("./", c) if err != nil { t.Fatalf("did not expect err: %v", err) } testOrdersData(t, s, indexesById, []testOrdersDataCase{ { `SELECT * FROM data:orders as o UNNEST o.orderlines as orderline WHERE orderline.productId = "sugar22"`, 3, flex.FieldTracks{ flex.FieldTrack("orderlines.productId"): 1, }, true, `{"field":"orderlines.productId","term":"sugar22"}`, }, { `SELECT * FROM data:orders as o UNNEST o.orderlines as orderline WHERE orderline.productId = "sugar22" AND (o.custId = "ccc" OR o.custId = "abc")`, 3, flex.FieldTracks{ flex.FieldTrack("orderlines.productId"): 1, flex.FieldTrack("custId"): 2, }, true, `{"conjuncts":[{"field":"orderlines.productId","term":"sugar22"},{"disjuncts":[{"field":"custId","term":"ccc"},{"field":"custId","term":"abc"}]}]}`, }, { `SELECT * FROM data:orders as o UNNEST orderlines as orderline LEFT OUTER JOIN [] as o2 ON o.id = o2.id WHERE o.custId = "ccc" OR o.custId = "abc"`, 6, flex.FieldTracks{ flex.FieldTrack("custId"): 2, }, true, `{"disjuncts":[{"field":"custId","term":"ccc"},{"field":"custId","term":"abc"}]}`, }, { `SELECT * FROM data:orders as o LEFT OUTER JOIN [] as o2 ON o.id = o2.id UNNEST o.orderlines as orderline LET c = o.custId WHERE c = "ccc" OR c = "abc"`, 6, flex.FieldTracks{ flex.FieldTrack("custId"): 2, }, true, `{"disjuncts":[{"field":"custId","term":"ccc"},{"field":"custId","term":"abc"}]}`, }, }) }
explode_data.jsonl/45730
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1678 }
[ 2830, 3393, 24898, 1043, 1155, 353, 8840, 836, 8, 341, 743, 1779, 35134, 2271, 1155, 8, 341, 197, 853, 198, 197, 630, 26327, 288, 2720, 1669, 2415, 14032, 8465, 1552, 31483, 28248, 1552, 261, 1669, 2915, 7195, 261, 353, 1552, 261, 8, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestStrIntToBinary(t *testing.T) { cases := []struct { num int32 nums string order string length int signed bool }{ {0, "0", "LittleEndian", 4, true}, {10, "10", "LittleEndian", 4, true}, {-10, "-10", "LittleEndian", 4, true}, {-111, "-111", "LittleEndian", 4, true}, {2147483647, "2147483647", "LittleEndian", 0, true}, {-2147483648, "-2147483648", "LittleEndian", 0, true}, {-2147483648, "2147483648", "LittleEndian", 0, false}, {0, "0", "BigEndian", 4, true}, {10, "10", "BigEndian", 4, true}, {-10, "-10", "BigEndian", 4, true}, {-111, "-111", "BigEndian", 4, true}, {2147483647, "2147483647", "BigEndian", 0, true}, {-2147483648, "-2147483648", "BigEndian", 0, true}, {-2147483648, "2147483648", "BigEndian", 0, false}, } for _, c := range cases { buf := new(bytes.Buffer) if c.order == "LittleEndian" { binary.Write(buf, binary.LittleEndian, c.num) } else { binary.Write(buf, binary.BigEndian, c.num) } expect := string(buf.Bytes()) res := StrIntToBinary(c.nums, c.order, c.length, c.signed) if res != expect { t.Errorf("StrIntToBinary error %b, expect %b, get %b", c.num, []byte(expect), []byte(res)) } } }
explode_data.jsonl/29862
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 552 }
[ 2830, 3393, 2580, 1072, 1249, 21338, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 3056, 1235, 341, 197, 22431, 262, 526, 18, 17, 198, 197, 22431, 82, 256, 914, 198, 197, 42245, 220, 914, 198, 197, 49046, 526, 198, 197, 1903, 1542...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestGetOrCreateIbFromRootIb_InvalidChild(t *testing.T) { im := NewIfdMapping() err := LoadStandardIfds(im) log.PanicIf(err) ti := NewTagIndex() rootIb := NewIfdBuilder(im, ti, exifcommon.IfdStandardIfdIdentity, exifcommon.TestDefaultByteOrder) _, err = GetOrCreateIbFromRootIb(rootIb, "IFD/Invalid") if err == nil { t.Fatalf("Expected failure for invalid IFD child in IB get-or-create.") } else if err.Error() != "ifd child with name [Invalid] not registered: [IFD/Invalid]" { log.Panic(err) } }
explode_data.jsonl/36650
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 198 }
[ 2830, 3393, 1949, 57111, 40, 65, 3830, 8439, 40, 65, 62, 7928, 3652, 1155, 353, 8840, 836, 8, 341, 54892, 1669, 1532, 2679, 67, 6807, 2822, 9859, 1669, 8893, 19781, 2679, 5356, 25107, 340, 6725, 1069, 31270, 2679, 3964, 692, 72859, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestInitCmd_dry(t *testing.T) { var buf bytes.Buffer cmd := &initCmd{ out: &buf, fs: afero.NewMemMapFs(), dryRun: true, } if err := cmd.run(); err != nil { t.Errorf("expected error: %v", err) } expected := "" if !strings.Contains(buf.String(), expected) { t.Errorf("expected %q, got %q", expected, buf.String()) } }
explode_data.jsonl/53638
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 149 }
[ 2830, 3393, 3803, 15613, 814, 884, 1155, 353, 8840, 836, 8, 1476, 2405, 6607, 5820, 22622, 271, 25920, 1669, 609, 2327, 15613, 515, 197, 13967, 25, 262, 609, 5909, 345, 197, 53584, 25, 257, 264, 802, 78, 7121, 18816, 2227, 48300, 3148...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestDevSyncAPITrigger(t *testing.T) { MarkIntegrationTest(t, CanRunWithoutGcp) ns, client := SetupNamespace(t) skaffold.Build().InDir("testdata/file-sync").WithConfig("skaffold-manual.yaml").InNs(ns.Name).RunOrFail(t) rpcAddr := randomPort() skaffold.Dev("--auto-sync=false", "--rpc-port", rpcAddr).InDir("testdata/file-sync").WithConfig("skaffold-manual.yaml").InNs(ns.Name).RunBackground(t) rpcClient, entries := apiEvents(t, rpcAddr) // throw away first 5 entries of log (from first run of dev loop) for i := 0; i < 5; i++ { <-entries } client.WaitForPodsReady("test-file-sync") ioutil.WriteFile("testdata/file-sync/foo", []byte("foo"), 0644) defer func() { os.Truncate("testdata/file-sync/foo", 0) }() rpcClient.Execute(context.Background(), &proto.UserIntentRequest{ Intent: &proto.Intent{ Sync: true, }, }) verifySyncCompletedWithEvents(t, entries, ns.Name, "foo") }
explode_data.jsonl/4866
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 359 }
[ 2830, 3393, 14592, 12154, 2537, 952, 81, 4500, 1155, 353, 8840, 836, 8, 341, 197, 8949, 52464, 2271, 1155, 11, 2980, 6727, 26040, 38, 4672, 692, 84041, 11, 2943, 1669, 18626, 22699, 1155, 692, 1903, 74, 2649, 813, 25212, 1005, 641, 61...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestServer_ListBlocks_Errors(t *testing.T) { db, _ := dbTest.SetupDB(t) ctx := context.Background() bs := &Server{BeaconDB: db} exceedsMax := int32(cmd.Get().MaxRPCPageSize + 1) wanted := fmt.Sprintf("Requested page size %d can not be greater than max size %d", exceedsMax, cmd.Get().MaxRPCPageSize) req := &ethpb.ListBlocksRequest{PageToken: strconv.Itoa(0), PageSize: exceedsMax} _, err := bs.ListBlocks(ctx, req) assert.ErrorContains(t, wanted, err) wanted = "Must specify a filter criteria for fetching" req = &ethpb.ListBlocksRequest{} _, err = bs.ListBlocks(ctx, req) assert.ErrorContains(t, wanted, err) req = &ethpb.ListBlocksRequest{QueryFilter: &ethpb.ListBlocksRequest_Slot{Slot: 0}} res, err := bs.ListBlocks(ctx, req) require.NoError(t, err) assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list") assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0") req = &ethpb.ListBlocksRequest{QueryFilter: &ethpb.ListBlocksRequest_Slot{}} res, err = bs.ListBlocks(ctx, req) require.NoError(t, err) assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list") assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0") req = &ethpb.ListBlocksRequest{QueryFilter: &ethpb.ListBlocksRequest_Root{Root: []byte{'A'}}} res, err = bs.ListBlocks(ctx, req) require.NoError(t, err) assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list") assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0") req = &ethpb.ListBlocksRequest{QueryFilter: &ethpb.ListBlocksRequest_Root{Root: []byte{'A'}}} res, err = bs.ListBlocks(ctx, req) require.NoError(t, err) assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list") assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0") }
explode_data.jsonl/36476
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 667 }
[ 2830, 3393, 5475, 27104, 29804, 93623, 1087, 1155, 353, 8840, 836, 8, 341, 20939, 11, 716, 1669, 2927, 2271, 39820, 3506, 1155, 340, 20985, 1669, 2266, 19047, 2822, 93801, 1669, 609, 5475, 90, 3430, 22379, 3506, 25, 2927, 532, 8122, 463...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPathEscape(t *testing.T) { for _, tt := range pathEscapeTests { actual := PathEscape(tt.in) if tt.out != actual { t.Errorf("PathEscape(%q) = %q, want %q", tt.in, actual, tt.out) } // for bonus points, verify that escape:unescape is an identity. roundtrip, err := PathUnescape(actual) if roundtrip != tt.in || err != nil { t.Errorf("PathUnescape(%q) = %q, %s; want %q, %s", actual, roundtrip, err, tt.in, "[no error]") } } }
explode_data.jsonl/71723
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 198 }
[ 2830, 3393, 1820, 48124, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 17853, 1669, 2088, 1815, 48124, 18200, 341, 197, 88814, 1669, 7933, 48124, 47152, 1858, 340, 197, 743, 17853, 2532, 961, 5042, 341, 298, 3244, 13080, 445, 1820, 48124, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestStrPathBackwardsCompat(t *testing.T) { for i, tc := range []struct { path *pb.Path str string }{{ path: &pb.Path{ Element: p("foo[a=1][b=2]", "bar"), }, str: "/foo[a=1][b=2]/bar", }} { got := StrPath(tc.path) if got != tc.str { t.Errorf("[%d] want %q, got %q", i, tc.str, got) } } }
explode_data.jsonl/78333
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 163 }
[ 2830, 3393, 2580, 1820, 3707, 4014, 8712, 1155, 353, 8840, 836, 8, 341, 2023, 600, 11, 17130, 1669, 2088, 3056, 1235, 341, 197, 26781, 353, 16650, 17474, 198, 197, 11355, 220, 914, 198, 197, 15170, 515, 197, 26781, 25, 609, 16650, 174...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestMemCacheRetrieve(t *testing.T) { ip := net.ParseIP("127.0.0.1") mCache := GetCache("cache.file") d := NewDecoder(ip, tpl) d.Decode(mCache) v, ok := mCache.retrieve(256, ip, 33792) if !ok { t.Error("expected mCache retrieve status true, got", ok) } if v.TemplateID != 256 { t.Error("expected template id#:256, got", v.TemplateID) } }
explode_data.jsonl/7805
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 149 }
[ 2830, 3393, 18816, 8233, 87665, 1155, 353, 8840, 836, 8, 341, 46531, 1669, 4179, 8937, 3298, 445, 16, 17, 22, 13, 15, 13, 15, 13, 16, 1138, 2109, 8233, 1669, 2126, 8233, 445, 9360, 9715, 1138, 2698, 1669, 1532, 20732, 23443, 11, 609...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestNextPanicAndDirectCall(t *testing.T) { // Next should not step into a deferred function if it is called // directly, only if it is called through a panic or a deferreturn. // Here we test the case where the function is called by a panic if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) { testseq("defercall", contNext, []nextTest{ {15, 16}, {16, 17}, {17, 18}, {18, 6}}, "main.callAndPanic2", t) } else { testseq("defercall", contNext, []nextTest{ {15, 16}, {16, 17}, {17, 18}, {18, 5}}, "main.callAndPanic2", t) } }
explode_data.jsonl/56259
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 226 }
[ 2830, 3393, 5847, 47, 31270, 3036, 16027, 7220, 1155, 353, 8840, 836, 8, 341, 197, 322, 9295, 1265, 537, 3019, 1119, 264, 26239, 729, 421, 432, 374, 2598, 198, 197, 322, 5961, 11, 1172, 421, 432, 374, 2598, 1526, 264, 21975, 476, 26...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestVRF(t *testing.T) { _, pk, pv := crypto.GenerateTestKeyPair() signer := crypto.NewSigner(pv) for i := 0; i < 100; i++ { h := crypto.GenerateTestHash() vrf := NewVRF(signer) max := int64(i + 1*1000) vrf.SetMax(max) index, proof := vrf.Evaluate(h) //fmt.Printf("index is : %v \n", index) assert.LessOrEqual(t, index, max) index2, result := vrf.Verify(h, pk, proof) assert.Equal(t, result, true) assert.Equal(t, index, index2) } }
explode_data.jsonl/2406
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 216 }
[ 2830, 3393, 53, 17612, 1155, 353, 8840, 836, 8, 341, 197, 6878, 22458, 11, 33491, 1669, 19028, 57582, 2271, 1592, 12443, 741, 69054, 261, 1669, 19028, 7121, 7264, 261, 1295, 85, 340, 2023, 600, 1669, 220, 15, 26, 600, 366, 220, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDiskQueueReaderResetConfirmed(t *testing.T) { dqName := "test_disk_queue" + strconv.Itoa(int(time.Now().Unix())) tmpDir, err := ioutil.TempDir("", fmt.Sprintf("nsq-test-%d", time.Now().UnixNano())) test.Nil(t, err) defer os.RemoveAll(tmpDir) queue, _ := NewDiskQueueWriter(dqName, tmpDir, 1024, 4, 1<<10, 1) dqWriter := queue.(*diskQueueWriter) defer dqWriter.Close() test.NotNil(t, dqWriter) msg := []byte("test") msgNum := 1000 for i := 0; i < msgNum; i++ { dqWriter.Put(msg) } dqWriter.Flush() end := dqWriter.GetQueueWriteEnd() test.Nil(t, err) dqReader := newDiskQueueReader(dqName, dqName, tmpDir, 1024, 4, 1<<10, 1, 2*time.Second, nil, true) dqReader.UpdateQueueEnd(end, false) msgOut, _ := dqReader.TryReadOne() equal(t, msgOut.Data, msg) test.Equal(t, msgOut.Offset+BackendOffset(msgOut.MovedSize), dqReader.(*diskQueueReader).readQueueInfo.Offset()) test.Equal(t, msgOut.CurCnt, dqReader.(*diskQueueReader).readQueueInfo.TotalMsgCnt()) defer dqReader.Close() oldConfirm := dqReader.GetQueueConfirmed() curConfirmed, err := dqReader.ResetReadToConfirmed() test.Nil(t, err) test.Equal(t, oldConfirm, curConfirmed) test.Equal(t, oldConfirm.(*diskQueueEndInfo), &dqReader.(*diskQueueReader).readQueueInfo) msgOut2, _ := dqReader.TryReadOne() test.Equal(t, msgOut, msgOut2) err = dqReader.ConfirmRead(msgOut.Offset+BackendOffset(msgOut.MovedSize), msgOut.CurCnt) test.Nil(t, err) oldConfirm = dqReader.GetQueueConfirmed() test.Equal(t, msgOut.Offset+BackendOffset(msgOut.MovedSize), oldConfirm.Offset()) curConfirmed, err = dqReader.ResetReadToConfirmed() test.Nil(t, err) test.Equal(t, oldConfirm, curConfirmed) test.Equal(t, oldConfirm.(*diskQueueEndInfo), &dqReader.(*diskQueueReader).readQueueInfo) var confirmMsg ReadResult for i := 0; i < msgNum/2; i++ { msgOut, _ = dqReader.TryReadOne() equal(t, msgOut.Data, msg) if i == msgNum/4 { confirmMsg = msgOut } } curRead := dqReader.(*diskQueueReader).readQueueInfo test.Equal(t, true, curRead.Offset() > dqReader.GetQueueConfirmed().Offset()) err = dqReader.ConfirmRead(confirmMsg.Offset+BackendOffset(confirmMsg.MovedSize), confirmMsg.CurCnt) test.Nil(t, err) test.Equal(t, true, curRead.Offset() > dqReader.GetQueueConfirmed().Offset()) curConfirmed, err = dqReader.ResetReadToConfirmed() test.Nil(t, err) test.Equal(t, curConfirmed, dqReader.GetQueueConfirmed()) test.Equal(t, curConfirmed.(*diskQueueEndInfo), &dqReader.(*diskQueueReader).readQueueInfo) msgOut2, _ = dqReader.TryReadOne() test.Equal(t, confirmMsg.CurCnt+1, msgOut2.CurCnt) test.Equal(t, confirmMsg.Offset+confirmMsg.MovedSize, msgOut2.Offset) }
explode_data.jsonl/58218
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1064 }
[ 2830, 3393, 47583, 7554, 5062, 14828, 47948, 1155, 353, 8840, 836, 8, 1476, 2698, 80, 675, 1669, 330, 1944, 41687, 10841, 1, 488, 33317, 64109, 1548, 9730, 13244, 1005, 55832, 12145, 20082, 6184, 11, 1848, 1669, 43144, 65009, 6184, 19814,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestDataReset(t *testing.T) { var ( buffers1 = make([]*memory.Buffer, 0, 3) buffers2 = make([]*memory.Buffer, 0, 3) ) for i := 0; i < cap(buffers1); i++ { buffers1 = append(buffers1, memory.NewBufferBytes([]byte("some-bytes1"))) buffers2 = append(buffers2, memory.NewBufferBytes([]byte("some-bytes2"))) } data := NewData(&arrow.StringType{}, 10, buffers1, nil, 0, 0) data.Reset(&arrow.Int64Type{}, 5, buffers2, nil, 1, 2) for i := 0; i < 2; i++ { assert.Equal(t, buffers2, data.Buffers()) assert.Equal(t, &arrow.Int64Type{}, data.DataType()) assert.Equal(t, 1, data.NullN()) assert.Equal(t, 2, data.Offset()) assert.Equal(t, 5, data.Len()) // Make sure it works when resetting the data with its own buffers (new buffers are retained // before old ones are released.) data.Reset(&arrow.Int64Type{}, 5, data.Buffers(), nil, 1, 2) } }
explode_data.jsonl/47202
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 353 }
[ 2830, 93200, 14828, 1155, 353, 8840, 836, 8, 341, 2405, 2399, 197, 2233, 20342, 16, 284, 1281, 85288, 17269, 22622, 11, 220, 15, 11, 220, 18, 340, 197, 2233, 20342, 17, 284, 1281, 85288, 17269, 22622, 11, 220, 15, 11, 220, 18, 340, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestAgent_RegisterCheck_Passing(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") defer a.Shutdown() args := &structs.CheckDefinition{ Name: "test", TTL: 15 * time.Second, Status: api.HealthPassing, } req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(args)) obj, err := a.srv.AgentRegisterCheck(nil, req) if err != nil { t.Fatalf("err: %v", err) } if obj != nil { t.Fatalf("bad: %v", obj) } // Ensure we have a check mapping checkID := types.CheckID("test") if _, ok := a.State.Checks()[checkID]; !ok { t.Fatalf("missing test check") } if _, ok := a.checkTTLs[checkID]; !ok { t.Fatalf("missing test check ttl") } state := a.State.Checks()[checkID] if state.Status != api.HealthPassing { t.Fatalf("bad: %v", state) } }
explode_data.jsonl/33612
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 336 }
[ 2830, 3393, 16810, 73124, 3973, 1088, 72832, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 11323, 1669, 1532, 2271, 16810, 1155, 2967, 1507, 14676, 16867, 264, 10849, 18452, 2822, 31215, 1669, 609, 1235, 82, 10600, 10398, 515, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestCachedChartsChartVersionsFromRepo(t *testing.T) { charts, err := chartsImplementation.ChartVersionsFromRepo(testutil.RepoName, testutil.ChartName) assert.NoErr(t, err) assert.True(t, len(charts) > 0, "returned charts") noCharts, err := chartsImplementation.ChartVersionsFromRepo(testutil.BogusRepo, testutil.ChartName) assert.ExistsErr(t, err, "sent bogus repo name to GetChartsInRepo") assert.True(t, len(noCharts) == 0, "empty charts slice") }
explode_data.jsonl/37968
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 165 }
[ 2830, 3393, 70293, 64878, 14488, 69015, 3830, 25243, 1155, 353, 8840, 836, 8, 341, 23049, 7038, 11, 1848, 1669, 26131, 36850, 42667, 69015, 3830, 25243, 8623, 1314, 2817, 5368, 675, 11, 1273, 1314, 42667, 675, 340, 6948, 16766, 7747, 1155...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReconcileSecurityGroupNewInternalServiceAddsPort(t *testing.T) { az := getTestCloud() getTestSecurityGroup(az) svc1 := getInternalTestService("serviceea", 80) addTestSubnet(t, az, &svc1) clusterResources := getClusterResources(az, 1, 1) lb, _ := az.reconcileLoadBalancer(testClusterName, &svc1, clusterResources.nodes, true) lbStatus, _ := az.getServiceLoadBalancerStatus(&svc1, lb) sg, err := az.reconcileSecurityGroup(testClusterName, &svc1, &lbStatus.Ingress[0].IP, true /* wantLb */) if err != nil { t.Errorf("Unexpected error: %q", err) } validateSecurityGroup(t, sg, svc1) }
explode_data.jsonl/50400
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 231 }
[ 2830, 3393, 693, 40446, 457, 15352, 2808, 3564, 11569, 1860, 72111, 7084, 1155, 353, 8840, 836, 8, 341, 197, 1370, 1669, 633, 2271, 16055, 741, 10366, 2271, 15352, 2808, 7, 1370, 340, 1903, 7362, 16, 1669, 633, 11569, 2271, 1860, 445, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestEditorDimensions(t *testing.T) { e := new(Editor) tq := &testQueue{ events: []event.Event{ key.EditEvent{Text: "A"}, }, } gtx := layout.Context{ Ops: new(op.Ops), Constraints: layout.Constraints{Max: image.Pt(100, 100)}, Queue: tq, } cache := text.NewCache(gofont.Collection()) fontSize := unit.Px(10) font := text.Font{} dims := e.Layout(gtx, cache, font, fontSize, nil) if dims.Size.X == 0 { t.Errorf("EditEvent was not reflected in Editor width") } }
explode_data.jsonl/27262
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 222 }
[ 2830, 3393, 9410, 21351, 1155, 353, 8840, 836, 8, 341, 7727, 1669, 501, 87136, 340, 3244, 80, 1669, 609, 1944, 7554, 515, 197, 90873, 25, 3056, 3087, 6904, 515, 298, 23634, 35823, 1556, 90, 1178, 25, 330, 32, 7115, 197, 197, 1583, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestHistogramDataPointSlice(t *testing.T) { es := NewHistogramDataPointSlice() assert.EqualValues(t, 0, es.Len()) es = newHistogramDataPointSlice(&[]*otlpmetrics.HistogramDataPoint{}) assert.EqualValues(t, 0, es.Len()) es.EnsureCapacity(7) emptyVal := newHistogramDataPoint(&otlpmetrics.HistogramDataPoint{}) testVal := generateTestHistogramDataPoint() assert.EqualValues(t, 7, cap(*es.orig)) for i := 0; i < es.Len(); i++ { el := es.AppendEmpty() assert.EqualValues(t, emptyVal, el) fillTestHistogramDataPoint(el) assert.EqualValues(t, testVal, el) } }
explode_data.jsonl/32728
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 224 }
[ 2830, 3393, 77210, 1043, 2609, 33236, 1155, 353, 8840, 836, 8, 341, 78966, 1669, 1532, 77210, 1043, 2609, 33236, 741, 6948, 12808, 6227, 1155, 11, 220, 15, 11, 1531, 65819, 2398, 78966, 284, 501, 77210, 1043, 2609, 33236, 2099, 1294, 9,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestAccCollection_ClusteringKey(t *testing.T) { var collection openapi.Collection resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, ProviderFactories: testAccProviderFactories, CheckDestroy: testAccCheckRocksetCollectionDestroy, Steps: []resource.TestStep{ { Config: testAccCheckCollectionClusteringKeyAuto(), Check: resource.ComposeTestCheckFunc( testAccCheckRocksetCollectionExists("rockset_collection.test", &collection), resource.TestCheckResourceAttr("rockset_collection.test", "name", testCollectionNameClustering), resource.TestCheckResourceAttr("rockset_collection.test", "workspace", testCollectionWorkspace), resource.TestCheckResourceAttr("rockset_collection.test", "description", testCollectionDescription), testAccCheckClusteringKeyMatches(&collection, "population", "AUTO", []string{}), testAccCheckRetentionSecsMatches(&collection, 60), ), ExpectNonEmptyPlan: false, }, }, }) }
explode_data.jsonl/7138
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 349 }
[ 2830, 3393, 14603, 6482, 85110, 36694, 1592, 1155, 353, 8840, 836, 8, 341, 2405, 4426, 1787, 2068, 28629, 271, 50346, 8787, 1155, 11, 5101, 31363, 515, 197, 197, 4703, 3973, 25, 688, 2915, 368, 314, 1273, 14603, 4703, 3973, 1155, 8, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBlockByHash_RPCBlock_0x00(t *testing.T) { t.Parallel() srv := rpctest.NewFileServer(t, "testdata/get_block_by_hash__0x00.golden") defer srv.Close() client := w3.MustDial(srv.URL()) defer client.Close() var ( block = new(eth.RPCBlock) wantErr = fmt.Errorf("w3: response handling failed: not found") ) if gotErr := client.Call( eth.BlockByHash(common.Hash{}).ReturnsRAW(block), ); wantErr.Error() != gotErr.Error() { t.Fatalf("want %v, got %v", wantErr, gotErr) } }
explode_data.jsonl/62506
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 216 }
[ 2830, 3393, 4713, 1359, 6370, 76022, 4713, 62, 15, 87, 15, 15, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 1903, 10553, 1669, 33109, 67880, 7121, 1703, 5475, 1155, 11, 330, 92425, 23302, 7113, 3710, 8950, 563, 15, 87, 15, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestLintCharset(t *testing.T) { ctx := context.TODO() for _, f := range []string{"latin1", "utf8"} { for _, err := range eclint.Lint(ctx, fmt.Sprintf("./testdata/charset/%s.txt", f)) { if err != nil { t.Errorf("no errors where expected, got %s", err) } } } }
explode_data.jsonl/82392
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 129 }
[ 2830, 3393, 47556, 78172, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 90988, 2822, 2023, 8358, 282, 1669, 2088, 3056, 917, 4913, 60085, 16, 497, 330, 4762, 23, 9207, 341, 197, 2023, 8358, 1848, 1669, 2088, 59958, 396, 1214, 396, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestSecureNamingSAN(t *testing.T) { pod := &coreV1.Pod{} pod.Annotations = make(map[string]string) ns := "anything" sa := "foo" pod.Namespace = ns pod.Spec.ServiceAccountName = sa san := SecureNamingSAN(pod) expectedSAN := fmt.Sprintf("spiffe://%v/ns/%v/sa/%v", spiffe.GetTrustDomain(), ns, sa) if san != expectedSAN { t.Fatalf("SAN match failed, SAN:%v expectedSAN:%v", san, expectedSAN) } }
explode_data.jsonl/73809
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 163 }
[ 2830, 3393, 49813, 85410, 68691, 1155, 353, 8840, 836, 8, 341, 3223, 347, 1669, 609, 2153, 53, 16, 88823, 31483, 3223, 347, 91172, 284, 1281, 9147, 14032, 30953, 692, 84041, 1669, 330, 72154, 698, 1903, 64, 1669, 330, 7975, 698, 3223, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_APHash64(t *testing.T) { var x uint64 = 2531023058543352243 gtest.C(t, func(t *gtest.T) { j := ghash.APHash64(strBasic) t.Assert(j, x) }) }
explode_data.jsonl/60241
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 80 }
[ 2830, 3393, 21899, 6370, 21, 19, 1155, 353, 8840, 836, 8, 341, 2405, 856, 2622, 21, 19, 284, 220, 17, 20, 18, 16, 15, 17, 18, 15, 20, 23, 20, 19, 18, 18, 20, 17, 17, 19, 18, 198, 3174, 1944, 727, 1155, 11, 2915, 1155, 353, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDeadCallbackNoEntity(t *testing.T) { // Make sure the dead callback doesn't crash when entity is missing messageBus, err := messaging.NewWizardBus(messaging.WizardBusConfig{ RingGetter: &mockring.Getter{}, }) if err != nil { t.Fatal(err) } if err := messageBus.Start(); err != nil { t.Fatal(err) } tsub := testSubscriber{ ch: make(chan interface{}, 1), } if _, err := messageBus.Subscribe(messaging.TopicEvent, "testSubscriber", tsub); err != nil { t.Fatal(err) } store := &mockstore.MockStore{} keepalived, err := New(Config{Store: store, Bus: messageBus, LivenessFactory: fakeFactory}) if err != nil { t.Fatal(err) } store.On("GetEntityByName", mock.Anything, mock.Anything).Return((*corev2.Entity)(nil), nil) // Smoke test - just want to make sure there is no panic keepalived.dead("default/testSubscriber", liveness.Alive, true) }
explode_data.jsonl/45981
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 321 }
[ 2830, 3393, 28320, 7494, 2753, 3030, 1155, 353, 8840, 836, 8, 341, 197, 322, 7405, 2704, 279, 5593, 4822, 3171, 944, 9920, 979, 5387, 374, 7402, 198, 24753, 15073, 11, 1848, 1669, 29522, 7121, 63094, 15073, 1255, 32140, 1175, 13722, 150...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestFileHelper_IsPathFileString_11(t *testing.T) { fh := FileHelper{} pathFile := "" _, _, err := fh.IsPathFileString(pathFile) if err == nil { t.Error("Expected an error return from fh.IsPathFileString(pathFile) " + "because 'pathFile' is an empty string. However, NO ERROR WAS RETURNED! ") } }
explode_data.jsonl/14500
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 121 }
[ 2830, 3393, 1703, 5511, 31879, 1820, 1703, 703, 62, 16, 16, 1155, 353, 8840, 836, 8, 1476, 220, 36075, 1669, 2887, 5511, 16094, 220, 1815, 1703, 1669, 35829, 220, 8358, 8358, 1848, 1669, 36075, 4506, 1820, 1703, 703, 5581, 1703, 692, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestCheckErrorOrdering(t *testing.T) { mod := MustParseModule(` package test q = true p { data.test.q = 1 } # type error: bool = number p { data.test.q = 2 } # type error: bool = number `) input := make([]util.T, len(mod.Rules)) inputReversed := make([]util.T, len(mod.Rules)) for i := range input { input[i] = mod.Rules[i] inputReversed[i] = mod.Rules[i] } tmp := inputReversed[1] inputReversed[1] = inputReversed[2] inputReversed[2] = tmp _, errs1 := newTypeChecker().CheckTypes(nil, input, nil) _, errs2 := newTypeChecker().CheckTypes(nil, inputReversed, nil) if errs1.Error() != errs2.Error() { t.Fatalf("Expected error slices to be equal. errs1:\n\n%v\n\nerrs2:\n\n%v\n\n", errs1, errs2) } }
explode_data.jsonl/14572
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 318 }
[ 2830, 3393, 3973, 1454, 4431, 287, 1155, 353, 8840, 836, 8, 1476, 42228, 1669, 15465, 14463, 3332, 61528, 197, 197, 1722, 1273, 271, 197, 18534, 284, 830, 271, 197, 3223, 314, 821, 5958, 11354, 284, 220, 16, 335, 220, 671, 943, 1465, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestValidatorBulkLoadingOfCache(t *testing.T) { testDBEnv := testEnvs[couchDBtestEnvName] testDBEnv.Init(t) defer testDBEnv.Cleanup() db := testDBEnv.GetDBHandle("testdb") testValidator := &validator{db: db, hashFunc: testHashFunc} //populate db with initial data batch := privacyenabledstate.NewUpdateBatch() // Create two public KV pairs pubKV1 := keyValue{namespace: "ns1", key: "key1", value: []byte("value1"), version: version.NewHeight(1, 0)} pubKV2 := keyValue{namespace: "ns1", key: "key2", value: []byte("value2"), version: version.NewHeight(1, 1)} // Create two hashed KV pairs hashedKV1 := keyValue{namespace: "ns2", collection: "col1", key: "hashedPvtKey1", keyHash: util.ComputeStringHash("hashedPvtKey1"), value: []byte("value1"), version: version.NewHeight(1, 2)} hashedKV2 := keyValue{namespace: "ns2", collection: "col2", key: "hashedPvtKey2", keyHash: util.ComputeStringHash("hashedPvtKey2"), value: []byte("value2"), version: version.NewHeight(1, 3)} // Store the public and hashed KV pairs to DB batch.PubUpdates.Put(pubKV1.namespace, pubKV1.key, pubKV1.value, pubKV1.version) batch.PubUpdates.Put(pubKV2.namespace, pubKV2.key, pubKV2.value, pubKV2.version) batch.HashUpdates.Put(hashedKV1.namespace, hashedKV1.collection, hashedKV1.keyHash, hashedKV1.value, hashedKV1.version) batch.HashUpdates.Put(hashedKV2.namespace, hashedKV2.collection, hashedKV2.keyHash, hashedKV2.value, hashedKV2.version) db.ApplyPrivacyAwareUpdates(batch, version.NewHeight(1, 4)) // Construct read set for transaction 1. It contains two public KV pairs (pubKV1, pubKV2) and two // hashed KV pairs (hashedKV1, hashedKV2). rwsetBuilder1 := rwsetutil.NewRWSetBuilder() rwsetBuilder1.AddToReadSet(pubKV1.namespace, pubKV1.key, pubKV1.version) rwsetBuilder1.AddToReadSet(pubKV2.namespace, pubKV2.key, pubKV2.version) rwsetBuilder1.AddToHashedReadSet(hashedKV1.namespace, hashedKV1.collection, hashedKV1.key, hashedKV1.version) rwsetBuilder1.AddToHashedReadSet(hashedKV2.namespace, hashedKV2.collection, hashedKV2.key, hashedKV2.version) // Construct read set for transaction 1. It contains KV pairs which are not in the state db. rwsetBuilder2 := rwsetutil.NewRWSetBuilder() rwsetBuilder2.AddToReadSet("ns3", "key1", nil) rwsetBuilder2.AddToHashedReadSet("ns3", "col1", "hashedPvtKey1", nil) // Construct internal block transRWSets := getTestPubSimulationRWSet(t, rwsetBuilder1, rwsetBuilder2) var trans []*transaction for i, tranRWSet := range transRWSets { tx := &transaction{ id: fmt.Sprintf("txid-%d", i), indexInBlock: i, validationCode: peer.TxValidationCode_VALID, rwset: tranRWSet, } trans = append(trans, tx) } blk := &block{num: 1, txs: trans} if testValidator.db.IsBulkOptimizable() { db := testValidator.db bulkOptimizable, _ := db.VersionedDB.(statedb.BulkOptimizable) // Clear cache loaded during ApplyPrivacyAwareUpdates() testValidator.db.ClearCachedVersions() testValidator.preLoadCommittedVersionOfRSet(blk) // pubKV1 should be found in cache version, keyFound := bulkOptimizable.GetCachedVersion(pubKV1.namespace, pubKV1.key) require.True(t, keyFound) require.Equal(t, pubKV1.version, version) // pubKV2 should be found in cache version, keyFound = bulkOptimizable.GetCachedVersion(pubKV2.namespace, pubKV2.key) require.True(t, keyFound) require.Equal(t, pubKV2.version, version) // [ns3, key1] should be found in cache as it was in the readset of transaction 1 though it is // not in the state db but the version would be nil version, keyFound = bulkOptimizable.GetCachedVersion("ns3", "key1") require.True(t, keyFound) require.Nil(t, version) // [ns4, key1] should not be found in cache as it was not loaded version, keyFound = bulkOptimizable.GetCachedVersion("ns4", "key1") require.False(t, keyFound) require.Nil(t, version) // hashedKV1 should be found in cache version, keyFound = testValidator.db.GetCachedKeyHashVersion(hashedKV1.namespace, hashedKV1.collection, hashedKV1.keyHash) require.True(t, keyFound) require.Equal(t, hashedKV1.version, version) // hashedKV2 should be found in cache version, keyFound = testValidator.db.GetCachedKeyHashVersion(hashedKV2.namespace, hashedKV2.collection, hashedKV2.keyHash) require.True(t, keyFound) require.Equal(t, hashedKV2.version, version) // [ns3, col1, hashedPvtKey1] should be found in cache as it was in the readset of transaction 2 though it is // not in the state db version, keyFound = testValidator.db.GetCachedKeyHashVersion("ns3", "col1", util.ComputeStringHash("hashedPvtKey1")) require.True(t, keyFound) require.Nil(t, version) // [ns4, col, key1] should not be found in cache as it was not loaded version, keyFound = testValidator.db.GetCachedKeyHashVersion("ns4", "col1", util.ComputeStringHash("key1")) require.False(t, keyFound) require.Nil(t, version) // Clear cache testValidator.db.ClearCachedVersions() // pubKV1 should not be found in cache as cahce got emptied version, keyFound = bulkOptimizable.GetCachedVersion(pubKV1.namespace, pubKV1.key) require.False(t, keyFound) require.Nil(t, version) // [ns3, col1, key1] should not be found in cache as cahce got emptied version, keyFound = testValidator.db.GetCachedKeyHashVersion("ns3", "col1", util.ComputeStringHash("hashedPvtKey1")) require.False(t, keyFound) require.Nil(t, version) } }
explode_data.jsonl/65026
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2104 }
[ 2830, 3393, 14256, 88194, 8578, 2124, 8233, 1155, 353, 8840, 836, 8, 341, 18185, 3506, 14359, 1669, 1273, 1702, 11562, 12447, 3026, 3506, 1944, 14359, 675, 921, 18185, 3506, 14359, 26849, 1155, 340, 16867, 1273, 3506, 14359, 727, 60639, 7...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestBadReplicaValues(t *testing.T) { g := gomega.NewGomegaWithT(t) kfsvc := makeTestKFService() kfsvc.Spec.Default.Predictor.MinReplicas = -1 g.Expect(kfsvc.ValidateCreate()).Should(gomega.MatchError(MinReplicasLowerBoundExceededError)) kfsvc.Spec.Default.Predictor.MinReplicas = 1 kfsvc.Spec.Default.Predictor.MaxReplicas = -1 g.Expect(kfsvc.ValidateCreate()).Should(gomega.MatchError(MaxReplicasLowerBoundExceededError)) kfsvc.Spec.Default.Predictor.MinReplicas = 2 kfsvc.Spec.Default.Predictor.MaxReplicas = 1 g.Expect(kfsvc.ValidateCreate()).Should(gomega.MatchError(MinReplicasShouldBeLessThanMaxError)) }
explode_data.jsonl/7102
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 249 }
[ 2830, 3393, 17082, 18327, 15317, 6227, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 342, 32696, 7121, 38, 32696, 2354, 51, 1155, 340, 16463, 69, 58094, 1669, 1281, 2271, 65008, 1860, 741, 16463, 69, 58094, 36473, 13275, 1069, 8861, 269, 17...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNodeInitialized(t *testing.T) { fnh := &testutil.FakeNodeHandler{ Existing: []*v1.Node{ { ObjectMeta: metav1.ObjectMeta{ Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: v1.NodeStatus{ Conditions: []v1.NodeCondition{ { Type: v1.NodeReady, Status: v1.ConditionUnknown, LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, Spec: v1.NodeSpec{ Taints: []v1.Taint{ { Key: algorithm.TaintExternalCloudProvider, Value: "true", Effect: v1.TaintEffectNoSchedule, }, }, }, }, }, Clientset: fake.NewSimpleClientset(&v1.PodList{}), DeleteWaitChan: make(chan struct{}), } factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc()) fakeCloud := &fakecloud.FakeCloud{ InstanceTypes: map[types.NodeName]string{ types.NodeName("node0"): "t1.micro", }, Addresses: []v1.NodeAddress{ { Type: v1.NodeHostName, Address: "node0.cloud.internal", }, { Type: v1.NodeInternalIP, Address: "10.0.0.1", }, { Type: v1.NodeExternalIP, Address: "132.143.154.163", }, }, Err: nil, } eventBroadcaster := record.NewBroadcaster() cloudNodeController := &CloudNodeController{ kubeClient: fnh, nodeInformer: factory.Core().V1().Nodes(), cloud: fakeCloud, nodeMonitorPeriod: 1 * time.Second, recorder: eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}), nodeStatusUpdateFrequency: 1 * time.Second, } eventBroadcaster.StartLogging(glog.Infof) cloudNodeController.AddCloudNode(fnh.Existing[0]) if len(fnh.UpdatedNodes) != 1 || fnh.UpdatedNodes[0].Name != "node0" { t.Errorf("Node was not updated") } if len(fnh.UpdatedNodes[0].Spec.Taints) != 0 { t.Errorf("Node Taint was not removed after cloud init") } }
explode_data.jsonl/20607
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1042 }
[ 2830, 3393, 1955, 22495, 1155, 353, 8840, 836, 8, 341, 1166, 16719, 1669, 609, 1944, 1314, 991, 726, 1955, 3050, 515, 197, 197, 53067, 25, 29838, 85, 16, 21714, 515, 298, 197, 515, 571, 23816, 12175, 25, 77520, 16, 80222, 515, 464, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestAddImport(t *testing.T) { for _, test := range addTests { file := parse(t, test.name, test.in) var before bytes.Buffer ast.Fprint(&before, fset, file, nil) AddNamedImport(fset, file, test.renamedPkg, test.pkg) if got := print(t, test.name, file); got != test.out { if test.broken { t.Logf("%s is known broken:\ngot: %s\nwant: %s", test.name, got, test.out) } else { t.Errorf("%s:\ngot: %s\nwant: %s", test.name, got, test.out) } var after bytes.Buffer ast.Fprint(&after, fset, file, nil) t.Logf("AST before:\n%s\nAST after:\n%s\n", before.String(), after.String()) } } }
explode_data.jsonl/5959
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 276 }
[ 2830, 3393, 2212, 11511, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 1273, 1669, 2088, 912, 18200, 341, 197, 17661, 1669, 4715, 1155, 11, 1273, 2644, 11, 1273, 1858, 340, 197, 2405, 1573, 5820, 22622, 198, 197, 88836, 991, 1350, 2099, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestWALSegmentSizeOptions(t *testing.T) { tests := map[int]func(dbdir string, segmentSize int){ // Default Wal Size. 0: func(dbDir string, segmentSize int) { files, err := ioutil.ReadDir(filepath.Join(dbDir, "wal")) testutil.Ok(t, err) for _, f := range files[:len(files)-1] { testutil.Equals(t, int64(DefaultOptions.WALSegmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name()) } lastFile := files[len(files)-1] testutil.Assert(t, int64(DefaultOptions.WALSegmentSize) > lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name()) }, // Custom Wal Size. 2 * 32 * 1024: func(dbDir string, segmentSize int) { files, err := ioutil.ReadDir(filepath.Join(dbDir, "wal")) testutil.Assert(t, len(files) > 1, "current WALSegmentSize should result in more than a single WAL file.") testutil.Ok(t, err) for _, f := range files[:len(files)-1] { testutil.Equals(t, int64(segmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name()) } lastFile := files[len(files)-1] testutil.Assert(t, int64(segmentSize) > lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name()) }, // Wal disabled. -1: func(dbDir string, segmentSize int) { if _, err := os.Stat(filepath.Join(dbDir, "wal")); !os.IsNotExist(err) { t.Fatal("wal directory is present when the wal is disabled") } }, } for segmentSize, testFunc := range tests { t.Run(fmt.Sprintf("WALSegmentSize %d test", segmentSize), func(t *testing.T) { options := *DefaultOptions options.WALSegmentSize = segmentSize db, delete := openTestDB(t, &options) defer delete() app := db.Appender() for i := int64(0); i < 155; i++ { _, err := app.Add(labels.Labels{labels.Label{Name: "wal", Value: "size"}}, i, rand.Float64()) testutil.Ok(t, err) testutil.Ok(t, app.Commit()) } dbDir := db.Dir() db.Close() testFunc(dbDir, options.WALSegmentSize) }) } }
explode_data.jsonl/64374
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 843 }
[ 2830, 3393, 54, 969, 21086, 1695, 3798, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 2415, 18640, 60, 2830, 9791, 3741, 914, 11, 10238, 1695, 526, 1264, 197, 197, 322, 7899, 14574, 8478, 624, 197, 197, 15, 25, 2915, 9791, 6184, 914, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestScanCmd_Invalid(t *testing.T) { cases := []struct { args []string expected string }{ {args: []string{"scan", "test"}, expected: `unknown command "test" for "root scan"`}, {args: []string{"scan", "-e"}, expected: `unknown shorthand flag: 'e' in -e`}, {args: []string{"scan", "--error"}, expected: `unknown flag: --error`}, {args: []string{"scan", "-t"}, expected: `flag needs an argument: 't' in -t`}, {args: []string{"scan", "-t", "glou"}, expected: "unsupported cloud provider 'glou'\nValid values are: aws+tf,github+tf"}, {args: []string{"scan", "--to"}, expected: `flag needs an argument: --to`}, {args: []string{"scan", "--to", "glou"}, expected: "unsupported cloud provider 'glou'\nValid values are: aws+tf,github+tf"}, {args: []string{"scan", "-f"}, expected: `flag needs an argument: 'f' in -f`}, {args: []string{"scan", "--from"}, expected: `flag needs an argument: --from`}, {args: []string{"scan", "--from"}, expected: `flag needs an argument: --from`}, {args: []string{"scan", "--from", "tosdgjhgsdhgkjs"}, expected: "Unable to parse from flag 'tosdgjhgsdhgkjs': \nAccepted schemes are: tfstate://,tfstate+s3://,tfstate+http://,tfstate+https://,tfstate+tfcloud://"}, {args: []string{"scan", "--from", "://"}, expected: "Unable to parse from flag '://': \nAccepted schemes are: tfstate://,tfstate+s3://,tfstate+http://,tfstate+https://,tfstate+tfcloud://"}, {args: []string{"scan", "--from", "://test"}, expected: "Unable to parse from flag '://test': \nAccepted schemes are: tfstate://,tfstate+s3://,tfstate+http://,tfstate+https://,tfstate+tfcloud://"}, {args: []string{"scan", "--from", "tosdgjhgsdhgkjs://"}, expected: "Unable to parse from flag 'tosdgjhgsdhgkjs://': \nAccepted schemes are: tfstate://,tfstate+s3://,tfstate+http://,tfstate+https://,tfstate+tfcloud://"}, {args: []string{"scan", "--from", "terraform+foo+bar://test"}, expected: "Unable to parse from scheme 'terraform+foo+bar': \nAccepted schemes are: tfstate://,tfstate+s3://,tfstate+http://,tfstate+https://,tfstate+tfcloud://"}, {args: []string{"scan", "--from", "unsupported://test"}, expected: "Unsupported IaC source 'unsupported': \nAccepted values are: tfstate"}, {args: []string{"scan", "--from", "tfstate+foobar://test"}, expected: "Unsupported IaC backend 'foobar': \nAccepted values are: s3,http,https,tfcloud"}, {args: []string{"scan", "--from", "tfstate:///tmp/test", "--from", "tfstate+toto://test"}, expected: "Unsupported IaC backend 'toto': \nAccepted values are: s3,http,https,tfcloud"}, {args: []string{"scan", "--filter", "Type='test'"}, expected: "unable to parse filter expression: SyntaxError: Expected tRbracket, received: tUnknown"}, {args: []string{"scan", "--filter", "Type='test'", "--filter", "Type='test2'"}, expected: "Filter flag should be specified only once"}, {args: []string{"scan", "--tf-provider-version", ".30.2"}, expected: "Invalid version argument .30.2, expected a valid semver string (e.g. 2.13.4)"}, {args: []string{"scan", "--tf-provider-version", "foo"}, expected: "Invalid version argument foo, expected a valid semver string (e.g. 2.13.4)"}, {args: []string{"scan", "--driftignore"}, expected: "flag needs an argument: --driftignore"}, } for _, tt := range cases { rootCmd := &cobra.Command{Use: "root"} rootCmd.AddCommand(NewScanCmd()) _, err := test.Execute(rootCmd, tt.args...) if err == nil { t.Errorf("Invalid arg should generate error") } if err.Error() != tt.expected { t.Errorf("Expected '%v', got '%v'", tt.expected, err) } } }
explode_data.jsonl/60887
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1276 }
[ 2830, 3393, 26570, 15613, 62, 7928, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 3056, 1235, 341, 197, 31215, 257, 3056, 917, 198, 197, 42400, 914, 198, 197, 59403, 197, 197, 90, 2116, 25, 3056, 917, 4913, 16405, 497, 330, 1944, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestApplyToColumn4(t *testing.T) { ts := common.NextGlobalSeqNum() chain := updates.MockColumnUpdateChain() node := updates.NewCommittedColumnNode(ts, ts, nil, nil) node.AttachTo(chain) node.UpdateLocked(3, int32(8)) vec := &gvec.Vector{} vec.Typ.Oid = types.T_int32 vec.Col = []int32{1, 2, 3, 4} fmt.Printf("%v\n->\n", vec.Col) res := node.ApplyToColumn(vec, nil) fmt.Printf("%v\n", res.Col) }
explode_data.jsonl/14649
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 179 }
[ 2830, 3393, 28497, 1249, 2933, 19, 1155, 353, 8840, 836, 8, 341, 57441, 1669, 4185, 18501, 11646, 20183, 4651, 741, 197, 8819, 1669, 8837, 24664, 2933, 4289, 18837, 741, 20831, 1669, 8837, 7121, 1092, 5483, 2933, 1955, 35864, 11, 10591, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAction_GetRepoLink(t *testing.T) { assert.NoError(t, PrepareTestDatabase()) repo := AssertExistsAndLoadBean(t, &Repository{}).(*Repository) owner := AssertExistsAndLoadBean(t, &User{ID: repo.OwnerID}).(*User) action := &Action{RepoID: repo.ID} setting.AppSubURL = "/suburl/" expected := path.Join(setting.AppSubURL, owner.Name, repo.Name) assert.Equal(t, expected, action.GetRepoLink()) }
explode_data.jsonl/74204
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 153 }
[ 2830, 3393, 2512, 13614, 25243, 3939, 1155, 353, 8840, 836, 8, 341, 6948, 35699, 1155, 11, 31166, 2271, 5988, 2398, 17200, 5368, 1669, 5319, 15575, 3036, 5879, 10437, 1155, 11, 609, 4624, 6257, 568, 4071, 4624, 340, 197, 8118, 1669, 531...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMultistoreLoadWithUpgrade(t *testing.T) { var db dbm.DB = dbm.NewMemDB() store := newMultiStoreWithMounts(db, types.PruneNothing) err := store.LoadLatestVersion() require.Nil(t, err) // write some data in all stores k1, v1 := []byte("first"), []byte("store") s1, _ := store.getStoreByName("store1").(types.KVStore) require.NotNil(t, s1) s1.Set(k1, v1) k2, v2 := []byte("second"), []byte("restore") s2, _ := store.getStoreByName("store2").(types.KVStore) require.NotNil(t, s2) s2.Set(k2, v2) k3, v3 := []byte("third"), []byte("dropped") s3, _ := store.getStoreByName("store3").(types.KVStore) require.NotNil(t, s3) s3.Set(k3, v3) s4, _ := store.getStoreByName("store4").(types.KVStore) require.Nil(t, s4) // do one commit commitID := store.Commit() expectedCommitID := getExpectedCommitID(store, 1) checkStore(t, store, expectedCommitID, commitID) ci, err := getCommitInfo(db, 1) require.NoError(t, err) require.Equal(t, int64(1), ci.Version) require.Equal(t, 3, len(ci.StoreInfos)) checkContains(t, ci.StoreInfos, []string{"store1", "store2", "store3"}) // Load without changes and make sure it is sensible store = newMultiStoreWithMounts(db, types.PruneNothing) err = store.LoadLatestVersion() require.Nil(t, err) commitID = getExpectedCommitID(store, 1) checkStore(t, store, commitID, commitID) // let's query data to see it was saved properly s2, _ = store.getStoreByName("store2").(types.KVStore) require.NotNil(t, s2) require.Equal(t, v2, s2.Get(k2)) // now, let's load with upgrades... restore, upgrades := newMultiStoreWithModifiedMounts(db, types.PruneNothing) err = restore.LoadLatestVersionAndUpgrade(upgrades) require.Nil(t, err) // s1 was not changed s1, _ = restore.getStoreByName("store1").(types.KVStore) require.NotNil(t, s1) require.Equal(t, v1, s1.Get(k1)) // store3 is mounted, but data deleted are gone s3, _ = restore.getStoreByName("store3").(types.KVStore) require.NotNil(t, s3) require.Nil(t, s3.Get(k3)) // data was deleted // store4 is mounted, with empty data s4, _ = restore.getStoreByName("store4").(types.KVStore) require.NotNil(t, s4) iterator := s4.Iterator(nil, nil) values := 0 for ; iterator.Valid(); iterator.Next() { values += 1 } require.Zero(t, values) require.NoError(t, iterator.Close()) // write something inside store4 k4, v4 := []byte("fourth"), []byte("created") s4.Set(k4, v4) // store2 is no longer mounted st2 := restore.getStoreByName("store2") require.Nil(t, st2) // restore2 has the old data rs2, _ := restore.getStoreByName("restore2").(types.KVStore) require.NotNil(t, rs2) require.Equal(t, v2, rs2.Get(k2)) // store this migrated data, and load it again without migrations migratedID := restore.Commit() require.Equal(t, migratedID.Version, int64(2)) reload, _ := newMultiStoreWithModifiedMounts(db, types.PruneNothing) err = reload.LoadLatestVersion() require.Nil(t, err) require.Equal(t, migratedID, reload.LastCommitID()) // query this new store rl1, _ := reload.getStoreByName("store1").(types.KVStore) require.NotNil(t, rl1) require.Equal(t, v1, rl1.Get(k1)) rl2, _ := reload.getStoreByName("restore2").(types.KVStore) require.NotNil(t, rl2) require.Equal(t, v2, rl2.Get(k2)) rl4, _ := reload.getStoreByName("store4").(types.KVStore) require.NotNil(t, rl4) require.Equal(t, v4, rl4.Get(k4)) // check commitInfo in storage ci, err = getCommitInfo(db, 2) require.NoError(t, err) require.Equal(t, int64(2), ci.Version) require.Equal(t, 4, len(ci.StoreInfos), ci.StoreInfos) checkContains(t, ci.StoreInfos, []string{"store1", "restore2", "store3", "store4"}) }
explode_data.jsonl/44868
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1481 }
[ 2830, 3393, 40404, 380, 460, 5879, 2354, 43861, 1155, 353, 8840, 836, 8, 341, 2405, 2927, 2927, 76, 22537, 284, 2927, 76, 7121, 18816, 3506, 741, 57279, 1669, 501, 20358, 6093, 2354, 16284, 82, 9791, 11, 4494, 17947, 2886, 23780, 340, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUpdateRepo(t *testing.T) { expectedArgs := []string{"repo", "update"} helm, runner := createHelm(t, nil, "") err := helm.UpdateRepo() assert.NoError(t, err, "should update helm repo without any error") verifyArgs(t, helm, runner, expectedArgs...) }
explode_data.jsonl/4645
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 91 }
[ 2830, 3393, 4289, 25243, 1155, 353, 8840, 836, 8, 341, 42400, 4117, 1669, 3056, 917, 4913, 23476, 497, 330, 2386, 16707, 9598, 23162, 11, 22259, 1669, 1855, 39, 23162, 1155, 11, 2092, 11, 85617, 9859, 1669, 33765, 16689, 25243, 2822, 69...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAllowsReferencedImagePullSecrets(t *testing.T) { ns := "myns" admit := NewServiceAccount() informerFactory := informers.NewSharedInformerFactory(nil, controller.NoResyncPeriodFunc()) admit.SetExternalKubeInformerFactory(informerFactory) admit.LimitSecretReferences = true admit.RequireAPIToken = false // Add the default service account for the ns with a secret reference into the cache informerFactory.Core().V1().ServiceAccounts().Informer().GetStore().Add(&corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: DefaultServiceAccountName, Namespace: ns, }, ImagePullSecrets: []corev1.LocalObjectReference{ {Name: "foo"}, }, }) pod := &api.Pod{ Spec: api.PodSpec{ ImagePullSecrets: []api.LocalObjectReference{{Name: "foo"}}, }, } attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil) err := admit.Admit(attrs) if err != nil { t.Errorf("Unexpected error: %v", err) } }
explode_data.jsonl/61352
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 369 }
[ 2830, 3393, 79595, 47447, 5767, 1906, 36068, 19773, 82, 1155, 353, 8840, 836, 8, 341, 84041, 1669, 330, 76, 1872, 82, 1837, 98780, 1763, 1669, 1532, 1860, 7365, 741, 17430, 34527, 4153, 1669, 6051, 388, 7121, 16997, 641, 34527, 4153, 27...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestHbdmSwap_GetFutureDepth(t *testing.T) { dep, err := swap.GetFutureDepth(goex.BTC_USD, goex.SWAP_CONTRACT, 5) t.Log(err) t.Log(dep.AskList) t.Log(dep.BidList) }
explode_data.jsonl/43250
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 83 }
[ 2830, 3393, 39, 8940, 76, 46179, 13614, 24206, 19776, 1155, 353, 8840, 836, 8, 341, 197, 14891, 11, 1848, 1669, 14291, 2234, 24206, 19776, 47415, 327, 1785, 7749, 13467, 35, 11, 728, 327, 808, 54, 2537, 4307, 41105, 11, 220, 20, 340, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestListIssues(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { t.Errorf("Bad method: %s", r.Method) } if r.URL.Path == "/repos/k8s/kuber/issues" { ics := []Issue{{Number: 1}} b, err := json.Marshal(ics) if err != nil { t.Fatalf("Didn't expect error: %v", err) } w.Header().Set("Link", fmt.Sprintf(`<blorp>; rel="first", <https://%s/someotherpath>; rel="next"`, r.Host)) fmt.Fprint(w, string(b)) } else if r.URL.Path == "/someotherpath" { ics := []Issue{{Number: 2}} b, err := json.Marshal(ics) if err != nil { t.Fatalf("Didn't expect error: %v", err) } fmt.Fprint(w, string(b)) } else { t.Errorf("Bad request path: %s", r.URL.Path) } })) defer ts.Close() c := getClient(ts.URL) ics, err := c.ListOpenIssues("k8s", "kuber") if err != nil { t.Errorf("Didn't expect error: %v", err) } else if len(ics) != 2 { t.Errorf("Expected two issues, found %d: %v", len(ics), ics) } else if ics[0].Number != 1 || ics[1].Number != 2 { t.Errorf("Wrong issue IDs: %v", ics) } }
explode_data.jsonl/6259
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 518 }
[ 2830, 3393, 852, 85828, 1155, 353, 8840, 836, 8, 341, 57441, 1669, 54320, 70334, 7121, 13470, 1220, 2836, 19886, 89164, 18552, 3622, 1758, 37508, 11, 435, 353, 1254, 9659, 8, 341, 197, 743, 435, 20798, 961, 1758, 20798, 1949, 341, 298, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestValidateMultipleMethodsAuthInfo(t *testing.T) { config := clientcmdapi.NewConfig() config.AuthInfos["error"] = &clientcmdapi.AuthInfo{ Token: "token", Username: "username", } test := configValidationTest{ config: config, expectedErrorSubstring: []string{"more than one authentication method", "token", "basicAuth"}, } test.testAuthInfo("error", t) test.testConfig(t) }
explode_data.jsonl/13499
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 151 }
[ 2830, 3393, 17926, 32089, 17856, 5087, 1731, 1155, 353, 8840, 836, 8, 341, 25873, 1669, 2943, 8710, 2068, 7121, 2648, 741, 25873, 25233, 38059, 1183, 841, 1341, 284, 609, 2972, 8710, 2068, 25233, 1731, 515, 197, 33299, 25, 262, 330, 583...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestExtends4(t *testing.T) { assert.NoError(t, PrepareEngine()) err := testEngine.DropTables(&Message{}, &MessageUser{}, &MessageType{}) assert.NoError(t, err) err = testEngine.CreateTables(&Message{}, &MessageUser{}, &MessageType{}) assert.NoError(t, err) var sender = MessageUser{Name: "sender"} var msgtype = MessageType{Name: "type"} _, err = testEngine.Insert(&sender, &msgtype) assert.NoError(t, err) msg := Message{ MessageBase: MessageBase{ Id: msgtype.Id, }, Title: "test", Content: "test", Uid: sender.Id, } session := testEngine.NewSession() defer session.Close() // MSSQL deny insert identity column excep declare as below if testEngine.Dialect().URI().DBType == schemas.MSSQL { err = session.Begin() assert.NoError(t, err) _, err = session.Exec("SET IDENTITY_INSERT message ON") assert.NoError(t, err) } _, err = session.Insert(&msg) assert.NoError(t, err) if testEngine.Dialect().URI().DBType == schemas.MSSQL { err = session.Commit() assert.NoError(t, err) } var mapper = testEngine.GetTableMapper().Obj2Table var quote = testEngine.Quote userTableName := quote(testEngine.TableName(mapper("MessageUser"), true)) typeTableName := quote(testEngine.TableName(mapper("MessageType"), true)) msgTableName := quote(testEngine.TableName(mapper("Message"), true)) list := make([]MessageExtend4, 0) err = session.Table(msgTableName).Join("LEFT", userTableName, userTableName+".`"+mapper("Id")+"`="+msgTableName+".`"+mapper("Uid")+"`"). Join("LEFT", typeTableName, typeTableName+".`"+mapper("Id")+"`="+msgTableName+".`"+mapper("Id")+"`"). Find(&list) assert.NoError(t, err) assert.EqualValues(t, len(list), 1) assert.EqualValues(t, list[0].Message.Id, msg.Id) assert.EqualValues(t, list[0].MessageUser.Id, sender.Id) assert.EqualValues(t, list[0].MessageUser.Name, sender.Name) assert.EqualValues(t, list[0].MessageType.Id, msgtype.Id) assert.EqualValues(t, list[0].MessageType.Name, msgtype.Name) }
explode_data.jsonl/19197
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 758 }
[ 2830, 3393, 6756, 1412, 19, 1155, 353, 8840, 836, 8, 341, 6948, 35699, 1155, 11, 31166, 4571, 12367, 9859, 1669, 1273, 4571, 21688, 21670, 2099, 2052, 22655, 609, 2052, 1474, 22655, 609, 82107, 37790, 6948, 35699, 1155, 11, 1848, 692, 9...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestLoadPackage(t *testing.T) { si, err := simple_icons_go.Load() if err != nil { t.Error("unable to load package") } if reflect.TypeOf(si) != reflect.TypeOf(simple_icons_go.SimpleIcon{}) { t.Errorf("unable to load package") } }
explode_data.jsonl/37384
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 95 }
[ 2830, 3393, 5879, 13100, 1155, 353, 8840, 836, 8, 341, 1903, 72, 11, 1848, 1669, 4285, 59819, 25515, 13969, 741, 743, 1848, 961, 2092, 341, 197, 3244, 6141, 445, 45928, 311, 2795, 6328, 1138, 197, 532, 743, 8708, 73921, 76352, 8, 961,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestCalculateChangesManagedAnnotations(t *testing.T) { tests := map[string]struct { platformFixture string templateFixture string expectedAction string expectedDiffGoldenFile string }{ "Without annotations": { platformFixture: "is-platform", templateFixture: "is-template", expectedAction: "Noop", }, "Present in template, not in platform": { platformFixture: "is-platform", templateFixture: "is-template-annotation", expectedAction: "Update", expectedDiffGoldenFile: "present-in-template-not-in-platform", }, "Present in platform, not in template": { platformFixture: "is-platform-annotation", templateFixture: "is-template", expectedAction: "Update", expectedDiffGoldenFile: "present-in-platform-not-in-template", }, "Present in both": { platformFixture: "is-platform-annotation", templateFixture: "is-template-annotation", expectedAction: "Noop", }, "Present in platform, changed in template": { platformFixture: "is-platform-annotation", templateFixture: "is-template-annotation-changed", expectedAction: "Update", expectedDiffGoldenFile: "present-in-platform-changed-in-template", }, "Present in platform, different key in template": { platformFixture: "is-platform-annotation", templateFixture: "is-template-different-annotation", expectedAction: "Update", expectedDiffGoldenFile: "present-in-platform-different-key-in-template", }, "Unmanaged in platform added to template": { platformFixture: "is-platform-unmanaged", templateFixture: "is-template-annotation", expectedAction: "Noop", }, "Unmanaged in platform, none in template": { platformFixture: "is-platform-unmanaged", templateFixture: "is-template", expectedAction: "Noop", }, "Unmanaged in platform, none in template, and other change in template": { platformFixture: "is-platform-unmanaged", templateFixture: "is-template-other-change", expectedAction: "Update", expectedDiffGoldenFile: "unmanaged-in-platform-none-in-template-other-change-in-template", }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { platformItem := getPlatformItem(t, "item-managed-annotations/"+tc.platformFixture+".yml") templateItem := getTemplateItem(t, "item-managed-annotations/"+tc.templateFixture+".yml") changes, err := calculateChanges(templateItem, platformItem, []string{}, true) if err != nil { t.Fatal(err) } if len(changes) != 1 { t.Fatalf("Expected 1 change, got: %d", len(changes)) } actualChange := changes[0] if actualChange.Action != tc.expectedAction { t.Fatalf("Expected change action to be: %s, got: %s", tc.expectedAction, actualChange.Action) } if len(tc.expectedDiffGoldenFile) > 0 { want := strings.TrimSpace(getGoldenDiff(t, "item-managed-annotations", tc.expectedDiffGoldenFile+".txt")) got := strings.TrimSpace(actualChange.Diff(true)) if diff := cmp.Diff(want, got); diff != "" { t.Errorf("Change diff mismatch (-want +got):\n%s", diff) } } }) } }
explode_data.jsonl/33770
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1233 }
[ 2830, 3393, 47866, 11317, 27192, 21418, 1155, 353, 8840, 836, 8, 1476, 78216, 1669, 2415, 14032, 60, 1235, 341, 197, 197, 15734, 18930, 286, 914, 198, 197, 22832, 18930, 286, 914, 198, 197, 42400, 2512, 260, 914, 198, 197, 42400, 21751,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestEquals(t *testing.T) { t.Parallel() s1 := []float64{1, 2, 3, 4} s2 := []float64{1, 2, 3, 4} if !Equal(s1, s2) { t.Errorf("Equal slices returned as unequal") } s2 = []float64{1, 2, 3, 4 + 1e-14} if Equal(s1, s2) { t.Errorf("Unequal slices returned as equal") } if Equal(s1, []float64{}) { t.Errorf("Unequal slice lengths returned as equal") } }
explode_data.jsonl/1219
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 174 }
[ 2830, 3393, 4315, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 1903, 16, 1669, 3056, 3649, 21, 19, 90, 16, 11, 220, 17, 11, 220, 18, 11, 220, 19, 532, 1903, 17, 1669, 3056, 3649, 21, 19, 90, 16, 11, 220, 17, 11, 220, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestSelectLockInShare(t *testing.T) { store, clean := realtikvtest.CreateMockStoreAndSetup(t) defer clean() tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("DROP TABLE IF EXISTS t_sel_in_share") tk.MustExec("CREATE TABLE t_sel_in_share (id int DEFAULT NULL)") tk.MustExec("insert into t_sel_in_share values (11)") require.Error(t, tk.ExecToErr("select * from t_sel_in_share lock in share mode")) tk.MustExec("set @@tidb_enable_noop_functions = 1") tk.MustQuery("select * from t_sel_in_share lock in share mode").Check(testkit.Rows("11")) tk.MustExec("DROP TABLE t_sel_in_share") }
explode_data.jsonl/5753
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 240 }
[ 2830, 3393, 3379, 11989, 641, 12115, 1155, 353, 8840, 836, 8, 341, 57279, 11, 4240, 1669, 1931, 83, 1579, 85, 1944, 7251, 11571, 6093, 3036, 21821, 1155, 340, 16867, 4240, 2822, 3244, 74, 1669, 1273, 8226, 7121, 2271, 7695, 1155, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCloseLogFile(t *testing.T) { Logger = &Logging{ Enabled: trueptr, Level: "DEBUG", ColourOutput: false, File: "", Rotate: false, } SetupLogger() err := CloseLogFile() if err != nil { t.Fatalf("CloseLogFile failed with %v", err) } os.Remove(path.Join(LogPath, Logger.File)) }
explode_data.jsonl/63109
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 153 }
[ 2830, 3393, 7925, 98857, 1155, 353, 8840, 836, 8, 341, 55861, 284, 609, 34575, 515, 197, 197, 5462, 25, 414, 830, 3505, 345, 197, 197, 4449, 25, 286, 330, 5150, 756, 197, 197, 33281, 5097, 25, 895, 345, 197, 24848, 25, 260, 8324, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSpaceService_Create(t *testing.T) { testCases := []struct { name string payload *model.CreateSpaceScheme private bool mockFile string wantHTTPMethod string endpoint string context context.Context wantHTTPCodeReturn int wantErr bool }{ { name: "CreateSpaceWhenTheParametersAreCorrect", payload: &model.CreateSpaceScheme{ Key: "DUM", Name: "Dum Confluence Space", Description: &model.CreateSpaceDescriptionScheme{ Plain: &model.CreateSpaceDescriptionPlainScheme{ Value: "Confluence Space Description Sample", Representation: "plain", }, }, AnonymousAccess: true, UnlicensedAccess: false, }, private: false, mockFile: "./mocks/get-space.json", wantHTTPMethod: http.MethodPost, endpoint: "/wiki/rest/api/space", context: context.Background(), wantHTTPCodeReturn: http.StatusCreated, wantErr: false, }, { name: "CreateSpaceWhenTheSpaceKeyIsNotProvided", payload: &model.CreateSpaceScheme{ Key: "", Name: "Dum Confluence Space", Description: &model.CreateSpaceDescriptionScheme{ Plain: &model.CreateSpaceDescriptionPlainScheme{ Value: "Confluence Space Description Sample", Representation: "plain", }, }, AnonymousAccess: true, UnlicensedAccess: false, }, private: false, mockFile: "./mocks/get-space.json", wantHTTPMethod: http.MethodPost, endpoint: "/wiki/rest/api/space", context: context.Background(), wantHTTPCodeReturn: http.StatusCreated, wantErr: true, }, { name: "CreateSpaceWhenTheSpaceNameIsNotProvided", payload: &model.CreateSpaceScheme{ Key: "DUM", Name: "", Description: &model.CreateSpaceDescriptionScheme{ Plain: &model.CreateSpaceDescriptionPlainScheme{ Value: "Confluence Space Description Sample", Representation: "plain", }, }, AnonymousAccess: true, UnlicensedAccess: false, }, private: false, mockFile: "./mocks/get-space.json", wantHTTPMethod: http.MethodPost, endpoint: "/wiki/rest/api/space", context: context.Background(), wantHTTPCodeReturn: http.StatusCreated, wantErr: true, }, { name: "CreateSpaceWhenThePayloadIsNotProvided", payload: nil, private: false, mockFile: "./mocks/get-space.json", wantHTTPMethod: http.MethodPost, endpoint: "/wiki/rest/api/space", context: context.Background(), wantHTTPCodeReturn: http.StatusCreated, wantErr: true, }, { name: "CreateSpaceWhenTheContextIsNotProvided", payload: &model.CreateSpaceScheme{ Key: "DUM", Name: "Dum Confluence Space", Description: &model.CreateSpaceDescriptionScheme{ Plain: &model.CreateSpaceDescriptionPlainScheme{ Value: "Confluence Space Description Sample", Representation: "plain", }, }, AnonymousAccess: true, UnlicensedAccess: false, }, private: false, mockFile: "./mocks/get-space.json", wantHTTPMethod: http.MethodPost, endpoint: "/wiki/rest/api/space", context: nil, wantHTTPCodeReturn: http.StatusCreated, wantErr: true, }, { name: "CreateSpaceWhenTheRequestMethodIsIncorrect", payload: &model.CreateSpaceScheme{ Key: "DUM", Name: "Dum Confluence Space", Description: &model.CreateSpaceDescriptionScheme{ Plain: &model.CreateSpaceDescriptionPlainScheme{ Value: "Confluence Space Description Sample", Representation: "plain", }, }, AnonymousAccess: true, UnlicensedAccess: false, }, private: false, mockFile: "./mocks/get-space.json", wantHTTPMethod: http.MethodDelete, endpoint: "/wiki/rest/api/space", context: context.Background(), wantHTTPCodeReturn: http.StatusCreated, wantErr: true, }, { name: "CreateSpaceWhenTheStatusCodeIsIncorrect", payload: &model.CreateSpaceScheme{ Key: "DUM", Name: "Dum Confluence Space", Description: &model.CreateSpaceDescriptionScheme{ Plain: &model.CreateSpaceDescriptionPlainScheme{ Value: "Confluence Space Description Sample", Representation: "plain", }, }, AnonymousAccess: true, UnlicensedAccess: false, }, private: false, mockFile: "./mocks/get-space.json", wantHTTPMethod: http.MethodPost, endpoint: "/wiki/rest/api/space", context: context.Background(), wantHTTPCodeReturn: http.StatusBadRequest, wantErr: true, }, { name: "CreateSpaceWhenTheSpaceRequestedIsPrivate", payload: &model.CreateSpaceScheme{ Key: "DUM", Name: "Dum Confluence Space", Description: &model.CreateSpaceDescriptionScheme{ Plain: &model.CreateSpaceDescriptionPlainScheme{ Value: "Confluence Space Description Sample", Representation: "plain", }, }, AnonymousAccess: true, UnlicensedAccess: false, }, private: true, mockFile: "./mocks/get-space.json", wantHTTPMethod: http.MethodPost, endpoint: "/wiki/rest/api/space/_private", context: context.Background(), wantHTTPCodeReturn: http.StatusCreated, wantErr: false, }, } for _, testCase := range testCases { testCase := testCase t.Run(testCase.name, func(t *testing.T) { t.Parallel() //Init a new HTTP mock server mockOptions := mockServerOptions{ Endpoint: testCase.endpoint, MockFilePath: testCase.mockFile, MethodAccepted: testCase.wantHTTPMethod, ResponseCodeWanted: testCase.wantHTTPCodeReturn, } mockServer, err := startMockServer(&mockOptions) if err != nil { t.Fatal(err) } defer mockServer.Close() //Init the library instance mockClient, err := startMockClient(mockServer.URL) if err != nil { t.Fatal(err) } service := &SpaceService{client: mockClient} gotResult, gotResponse, err := service.Create(testCase.context, testCase.payload, testCase.private) if testCase.wantErr { if err != nil { t.Logf("error returned: %v", err.Error()) } assert.Error(t, err) } else { assert.NoError(t, err) assert.NotEqual(t, gotResponse, nil) assert.NotEqual(t, gotResult, nil) apiEndpoint, err := url.Parse(gotResponse.Endpoint) if err != nil { t.Fatal(err) } var endpointToAssert string if apiEndpoint.Query().Encode() != "" { endpointToAssert = fmt.Sprintf("%v?%v", apiEndpoint.Path, apiEndpoint.Query().Encode()) } else { endpointToAssert = apiEndpoint.Path } t.Logf("HTTP Endpoint Wanted: %v, HTTP Endpoint Returned: %v", testCase.endpoint, endpointToAssert) assert.Equal(t, testCase.endpoint, endpointToAssert) } }) } }
explode_data.jsonl/39244
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 3382 }
[ 2830, 3393, 9914, 1860, 34325, 1155, 353, 8840, 836, 8, 1476, 18185, 37302, 1669, 3056, 1235, 341, 197, 11609, 2290, 914, 198, 197, 76272, 310, 353, 2528, 7251, 9914, 28906, 198, 197, 2455, 310, 1807, 198, 197, 77333, 1703, 1843, 914, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestNetPrioSetIfPrio(t *testing.T) { helper := NewCgroupTestUtil("net_prio", t) helper.CgroupData.config.Resources.NetPrioIfpriomap = prioMap netPrio := &NetPrioGroup{} if err := netPrio.Set(helper.CgroupPath, helper.CgroupData.config.Resources); err != nil { t.Fatal(err) } value, err := fscommon.GetCgroupParamString(helper.CgroupPath, "net_prio.ifpriomap") if err != nil { t.Fatal(err) } if !strings.Contains(value, "test 5") { t.Fatal("Got the wrong value, set net_prio.ifpriomap failed.") } }
explode_data.jsonl/54998
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 217 }
[ 2830, 3393, 6954, 47, 10383, 1649, 2679, 47, 10383, 1155, 353, 8840, 836, 8, 341, 9598, 2947, 1669, 1532, 34, 4074, 2271, 2742, 445, 4711, 620, 10383, 497, 259, 692, 9598, 2947, 727, 4074, 1043, 5423, 21703, 16993, 47, 10383, 2679, 68...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestParse(t *testing.T) { table := map[string]Version{ "0.0.0": Version{0, 0, 0, 0}, "0.01.0": Version{0, 1, 0, 0}, "2.0.0.1": Version{2, 0, 0, 1}, "3.3.3": Version{3, 3, 3, 0}, "0.0.1": Version{0, 0, 1, 0}, "0.0.0.1": Version{0, 0, 0, 1}, "1.3.3.7": Version{1, 3, 3, 7}, "1.203": Version{1, 203, 0, 0}, } for input, expected := range table { got, err := Parse(input) if err != nil { t.Errorf("failed to parse %q: %v", input, err) } if expected.Compare(got) != 0 { t.Errorf("failed to parse %q: expected %v, got %v", input, expected, got) } } }
explode_data.jsonl/74324
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 305 }
[ 2830, 3393, 14463, 1155, 353, 8840, 836, 8, 341, 26481, 1669, 2415, 14032, 60, 5637, 515, 197, 197, 1, 15, 13, 15, 13, 15, 788, 256, 6079, 90, 15, 11, 220, 15, 11, 220, 15, 11, 220, 15, 1583, 197, 197, 1, 15, 13, 15, 16, 13,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestResolveMapParam(t *testing.T) { m := &Manifest{ Parameters: []ParameterDefinition{ ParameterDefinition{ Name: "person", }, }, } os.Setenv("PERSON", "Ralpha") s := &Step{ Description: "a test step", Data: map[string]interface{}{ "Parameters": map[string]interface{}{ "Thing": map[string]interface{}{ "source": "bundle.parameters.person", }, }, }, } err := m.ResolveStep(s) require.NoError(t, err) pms, ok := s.Data["Parameters"].(map[string]interface{}) assert.True(t, ok) val, ok := pms["Thing"].(string) assert.True(t, ok) assert.Equal(t, "Ralpha", val) }
explode_data.jsonl/10949
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 267 }
[ 2830, 3393, 56808, 2227, 2001, 1155, 353, 8840, 836, 8, 341, 2109, 1669, 609, 38495, 515, 197, 197, 9706, 25, 3056, 4971, 10398, 515, 298, 197, 4971, 10398, 515, 571, 21297, 25, 330, 8987, 756, 298, 197, 1583, 197, 197, 1583, 197, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMCP23017DriverStart(t *testing.T) { mcp, adaptor := initTestMCP23017DriverWithStubbedAdaptor(0) gobottest.Assert(t, mcp.Start(), nil) adaptor.i2cWriteImpl = func([]byte) (int, error) { return 0, errors.New("write error") } err := mcp.Start() gobottest.Assert(t, err, errors.New("write error")) }
explode_data.jsonl/42311
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 125 }
[ 2830, 3393, 44, 7123, 17, 18, 15, 16, 22, 11349, 3479, 1155, 353, 8840, 836, 8, 341, 2109, 4672, 11, 91941, 1669, 2930, 2271, 44, 7123, 17, 18, 15, 16, 22, 11349, 2354, 33838, 2721, 2589, 32657, 7, 15, 340, 3174, 674, 1716, 477, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_EPD_002(t *testing.T) { tool.Test(t, nil, new(App), func(app *App) { if err := app.EPD.Clear(context.Background()); err != nil { t.Error(err) } }) }
explode_data.jsonl/65640
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 76 }
[ 2830, 3393, 2089, 23025, 62, 15, 15, 17, 1155, 353, 8840, 836, 8, 341, 197, 14172, 8787, 1155, 11, 2092, 11, 501, 23231, 701, 2915, 11462, 353, 2164, 8, 341, 197, 743, 1848, 1669, 906, 5142, 23025, 13524, 5378, 19047, 13426, 1848, 9...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestGovGenesis(t *testing.T) { genesis := genutil.AppMap{ "gov": basic034Gov, } require.NotPanics(t, func() { Migrate(genesis) }) }
explode_data.jsonl/70010
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 59 }
[ 2830, 3393, 66901, 84652, 1155, 353, 8840, 836, 8, 341, 82281, 13774, 1669, 4081, 1314, 5105, 2227, 515, 197, 197, 1, 53203, 788, 6770, 15, 18, 19, 66901, 345, 197, 630, 17957, 15000, 35693, 1211, 1155, 11, 2915, 368, 314, 386, 34479,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestParseSeries(t *testing.T) { for _, test := range testSeries { metric, vals, err := ParseSeriesDesc(test.input) // Unexpected errors are always caused by a bug. require.NotEqual(t, err, errUnexpected, "unexpected error occurred") if !test.fail { require.NoError(t, err) require.Equal(t, test.expectedMetric, metric, "error on input '%s'", test.input) require.Equal(t, test.expectedValues, vals, "error in input '%s'", test.input) } else { require.Error(t, err) } } }
explode_data.jsonl/3394
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 194 }
[ 2830, 3393, 14463, 25544, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 1273, 1669, 2088, 1273, 25544, 341, 197, 2109, 16340, 11, 28356, 11, 1848, 1669, 14775, 25544, 11065, 8623, 10046, 692, 197, 197, 322, 70400, 5975, 525, 2677, 8881, 553...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestReconcileTaskResourceResolutionAndValidation(t *testing.T) { for _, tt := range []struct { desc string d test.Data wantFailedReason string wantEvents []string }{{ desc: "Fail ResolveTaskResources", d: test.Data{ Tasks: []*v1alpha1.Task{ tb.Task("test-task-missing-resource", tb.TaskSpec( tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit)), ), tb.TaskNamespace("foo")), }, TaskRuns: []*v1alpha1.TaskRun{ tb.TaskRun("test-taskrun-missing-resource", tb.TaskRunNamespace("foo"), tb.TaskRunSpec( tb.TaskRunTaskRef("test-task-missing-resource", tb.TaskRefAPIVersion("a1")), tb.TaskRunInputs( tb.TaskRunInputsResource("workspace", tb.TaskResourceBindingRef("git")), ), )), }, ClusterTasks: nil, PipelineResources: nil, }, wantFailedReason: podconvert.ReasonFailedResolution, wantEvents: []string{ "Normal Started ", "Warning Failed", }, }, { desc: "Fail ValidateResolvedTaskResources", d: test.Data{ Tasks: []*v1alpha1.Task{ tb.Task("test-task-missing-resource", tb.TaskSpec( tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit)), ), tb.TaskNamespace("foo")), }, TaskRuns: []*v1alpha1.TaskRun{ tb.TaskRun("test-taskrun-missing-resource", tb.TaskRunNamespace("foo"), tb.TaskRunSpec( tb.TaskRunTaskRef("test-task-missing-resource", tb.TaskRefAPIVersion("a1")), )), }, ClusterTasks: nil, PipelineResources: nil, }, wantFailedReason: podconvert.ReasonFailedValidation, wantEvents: []string{ "Normal Started ", "Warning Failed", }, }} { t.Run(tt.desc, func(t *testing.T) { names.TestingSeed() testAssets, cancel := getTaskRunController(t, tt.d) defer cancel() clients := testAssets.Clients reconciler := testAssets.Controller.Reconciler.(*Reconciler) fr := reconciler.Recorder.(*record.FakeRecorder) if err := reconciler.Reconcile(context.Background(), getRunName(tt.d.TaskRuns[0])); err != nil { t.Errorf("expected no error reconciling valid TaskRun but got %v", err) } tr, err := clients.Pipeline.TektonV1alpha1().TaskRuns(tt.d.TaskRuns[0].Namespace).Get(tt.d.TaskRuns[0].Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", tt.d.TaskRuns[0].Name, err) } for _, c := range tr.Status.Conditions { if c.Type != apis.ConditionSucceeded || c.Status != corev1.ConditionFalse || c.Reason != tt.wantFailedReason { t.Errorf("Expected TaskRun to \"%s\" but it did not. Final conditions were:\n%#v", tt.wantFailedReason, tr.Status.Conditions) } } err = checkEvents(fr, tt.desc, tt.wantEvents) if !(err == nil) { t.Errorf(err.Error()) } }) } }
explode_data.jsonl/896
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1255 }
[ 2830, 3393, 693, 40446, 457, 6262, 4783, 38106, 3036, 13799, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 17853, 1669, 2088, 3056, 1235, 341, 197, 41653, 1797, 914, 198, 197, 2698, 394, 1273, 3336, 198, 197, 50780, 9408, 25139, 914, 198, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestAutoscalingGroupTerraformRender(t *testing.T) { cases := []*renderTest{ { Resource: &AutoscalingGroup{ Name: fi.String("test"), Granularity: fi.String("5min"), LaunchConfiguration: &LaunchConfiguration{Name: fi.String("test_lc")}, MaxSize: fi.Int64(10), Metrics: []string{"test"}, MinSize: fi.Int64(1), Subnets: []*Subnet{ { Name: fi.String("test-sg"), ID: fi.String("sg-1111"), }, }, Tags: map[string]string{ "test": "tag", "cluster": "test", }, }, Expected: `provider "aws" { region = "eu-west-2" } resource "aws_autoscaling_group" "test" { enabled_metrics = ["test"] launch_configuration = aws_launch_configuration.test_lc.id max_size = 10 metrics_granularity = "5min" min_size = 1 name = "test" tag { key = "cluster" propagate_at_launch = true value = "test" } tag { key = "test" propagate_at_launch = true value = "tag" } vpc_zone_identifier = [aws_subnet.test-sg.id] } terraform { required_version = ">= 0.12.26" required_providers { aws = { "source" = "hashicorp/aws" "version" = ">= 2.46.0" } } } `, }, { Resource: &AutoscalingGroup{ Name: fi.String("test1"), LaunchTemplate: &LaunchTemplate{Name: fi.String("test_lt")}, MaxSize: fi.Int64(10), Metrics: []string{"test"}, MinSize: fi.Int64(5), MixedInstanceOverrides: []string{"t2.medium", "t2.large"}, MixedOnDemandBase: fi.Int64(4), MixedOnDemandAboveBase: fi.Int64(30), MixedSpotAllocationStrategy: fi.String("capacity-optimized"), Subnets: []*Subnet{ { Name: fi.String("test-sg"), ID: fi.String("sg-1111"), }, }, Tags: map[string]string{ "test": "tag", "cluster": "test", }, }, Expected: `provider "aws" { region = "eu-west-2" } resource "aws_autoscaling_group" "test1" { enabled_metrics = ["test"] max_size = 10 min_size = 5 mixed_instances_policy { instances_distribution { on_demand_base_capacity = 4 on_demand_percentage_above_base_capacity = 30 spot_allocation_strategy = "capacity-optimized" } launch_template { launch_template_specification { launch_template_id = aws_launch_template.test_lt.id version = aws_launch_template.test_lt.latest_version } override { instance_type = "t2.medium" } override { instance_type = "t2.large" } } } name = "test1" tag { key = "cluster" propagate_at_launch = true value = "test" } tag { key = "test" propagate_at_launch = true value = "tag" } vpc_zone_identifier = [aws_subnet.test-sg.id] } terraform { required_version = ">= 0.12.26" required_providers { aws = { "source" = "hashicorp/aws" "version" = ">= 2.46.0" } } } `, }, } doRenderTests(t, "RenderTerraform", cases) }
explode_data.jsonl/29820
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1765 }
[ 2830, 3393, 19602, 436, 81552, 2808, 51, 13886, 627, 6750, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 29838, 7322, 2271, 515, 197, 197, 515, 298, 79487, 25, 609, 19602, 436, 81552, 2808, 515, 571, 21297, 25, 394, 9136, 6431, 445,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestImmudbCommandFlagParserPriority(t *testing.T) { defer tearDown() o := DefaultTestOptions() var options server.Options var err error cmd := &cobra.Command{ Use: "immudb", RunE: func(cmd *cobra.Command, args []string) (err error) { options, err = parseOptions(cmd) if err != nil { return err } return nil }, } setupFlags(cmd, server.DefaultOptions(), server.DefaultMTLsOptions()) bindFlags(cmd) setupDefaults(server.DefaultOptions(), server.DefaultMTLsOptions()) // 4. config file _, err = executeCommand(cmd) assert.NoError(t, err) assert.Equal(t, "", options.Logfile) // 4-b. config file specified in command line _, err = executeCommand(cmd, "--config=../../../test/immudb.toml") assert.NoError(t, err) assert.Equal(t, "ConfigFileThatsNameIsDeclaredOnTheCommandLine", options.Logfile) // 3. env. variables os.Setenv("IMMUDB_LOGFILE", "EnvironmentVars") _, err = executeCommand(cmd) assert.NoError(t, err) assert.Equal(t, "EnvironmentVars", options.Logfile) // 2. flags _, err = executeCommand(cmd, "--logfile="+o.Logfile) assert.NoError(t, err) assert.Equal(t, o.Logfile, options.Logfile) // 1. overrides viper.Set("logfile", "override") _, err = executeCommand(cmd, "--logfile="+o.Logfile) assert.NoError(t, err) assert.Equal(t, "override", options.Logfile) }
explode_data.jsonl/67845
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 509 }
[ 2830, 3393, 50739, 661, 65, 4062, 12135, 6570, 20555, 1155, 353, 8840, 836, 8, 341, 16867, 32825, 741, 22229, 1669, 7899, 2271, 3798, 741, 2405, 2606, 3538, 22179, 198, 2405, 1848, 1465, 198, 25920, 1669, 609, 59410, 12714, 515, 197, 95...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestExtractFromHttpBadness(t *testing.T) { ch := make(chan interface{}, 1) c := sourceURL{"http://localhost:49575/_not_found_", http.Header{}, "other", ch, nil, 0, http.DefaultClient} if err := c.extractFromURL(); err == nil { t.Errorf("Expected error") } expectEmptyChannel(t, ch) }
explode_data.jsonl/48248
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 110 }
[ 2830, 3393, 28959, 3830, 2905, 17082, 2090, 1155, 353, 8840, 836, 8, 341, 23049, 1669, 1281, 35190, 3749, 22655, 220, 16, 340, 1444, 1669, 2530, 3144, 4913, 1254, 1110, 8301, 25, 19, 24, 20, 22, 20, 19632, 1921, 21480, 60102, 1758, 15...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUserStoreGetByUsername(t *testing.T) { Setup() teamId := model.NewId() u1 := &model.User{} u1.Email = model.NewId() u1.Username = model.NewId() Must(store.User().Save(u1)) Must(store.Team().SaveMember(&model.TeamMember{TeamId: teamId, UserId: u1.Id})) if err := (<-store.User().GetByUsername(u1.Username)).Err; err != nil { t.Fatal(err) } if err := (<-store.User().GetByUsername("")).Err; err == nil { t.Fatal("Should have failed because of missing username") } }
explode_data.jsonl/5100
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 198 }
[ 2830, 3393, 1474, 6093, 1949, 91519, 1155, 353, 8840, 836, 8, 341, 197, 21821, 2822, 197, 9196, 764, 1669, 1614, 7121, 764, 2822, 10676, 16, 1669, 609, 2528, 7344, 16094, 10676, 16, 24066, 284, 1614, 7121, 764, 741, 10676, 16, 42777, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestReadExperiment(t *testing.T) { b, err := ioutil.ReadFile(CompletePath("../testdata", "experiment.yaml")) assert.NoError(t, err) es := &ExperimentSpec{} err = yaml.Unmarshal(b, es) assert.NoError(t, err) assert.Equal(t, 4, len(*es)) b, err = ioutil.ReadFile(CompletePath("../testdata", "experiment_grpc.yaml")) assert.NoError(t, err) es = &ExperimentSpec{} err = yaml.Unmarshal(b, es) assert.NoError(t, err) assert.Equal(t, 3, len(*es)) }
explode_data.jsonl/6854
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 198 }
[ 2830, 3393, 4418, 77780, 1155, 353, 8840, 836, 8, 341, 2233, 11, 1848, 1669, 43144, 78976, 7, 12548, 1820, 17409, 92425, 497, 330, 59429, 33406, 5455, 6948, 35699, 1155, 11, 1848, 340, 78966, 1669, 609, 77780, 8327, 16094, 9859, 284, 32...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFormatEndpointWithoutIngress(t *testing.T) { j := &v1alpha1.Jira{ ObjectMeta: metav1.ObjectMeta{ Name: "test-jira", Namespace: "test-jira-namespace", }, } e := formatEndpoint(j) assert.NotNil(t, e) assert.Equal(t, "http://test-jira:8080/", e) }
explode_data.jsonl/29197
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 129 }
[ 2830, 3393, 4061, 27380, 26040, 641, 2483, 1155, 353, 8840, 836, 8, 341, 12428, 1669, 609, 85, 16, 7141, 16, 3503, 8832, 515, 197, 23816, 12175, 25, 77520, 16, 80222, 515, 298, 21297, 25, 414, 330, 1944, 13333, 8832, 756, 298, 90823, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestArtistService_Query(t *testing.T) { s := NewArtistService(newMockArtistDAO()) result, err := s.Query(nil, 1, 2) if assert.Nil(t, err) { assert.Equal(t, 2, len(result)) } }
explode_data.jsonl/70213
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 79 }
[ 2830, 3393, 40309, 1860, 48042, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 1532, 40309, 1860, 1755, 11571, 40309, 19532, 2398, 9559, 11, 1848, 1669, 274, 15685, 27907, 11, 220, 16, 11, 220, 17, 340, 743, 2060, 59678, 1155, 11, 1848, 8,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestRequest_HasScreen(t *testing.T) { tests := []struct { name string request *alice.Request want bool }{ { name: "", request: getReq(0), want: true, }, { name: "", request: getReq(1), want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { req := tt.request if got := req.HasScreen(); got != tt.want { t.Errorf("Request.HasScreen() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/18221
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 237 }
[ 2830, 3393, 1900, 2039, 300, 7971, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 262, 914, 198, 197, 23555, 353, 63195, 9659, 198, 197, 50780, 262, 1807, 198, 197, 59403, 197, 197, 515, 298, 11609, 25, 262, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestTableType(t *testing.T) { expected := "gpt" table := GetValidTable() tableType := table.Type() if tableType != expected { t.Errorf("Type() returned unexpected table type, actual %s expected %s", tableType, expected) } }
explode_data.jsonl/61025
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 77 }
[ 2830, 3393, 2556, 929, 1155, 353, 8840, 836, 8, 341, 42400, 1669, 330, 70, 417, 698, 26481, 1669, 2126, 4088, 2556, 741, 26481, 929, 1669, 1965, 10184, 741, 743, 1965, 929, 961, 3601, 341, 197, 3244, 13080, 445, 929, 368, 5927, 16500,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestValueLogTrigger(t *testing.T) { t.Skip("Difficult to trigger compaction, so skipping. Re-enable after fixing #226") dir, err := ioutil.TempDir("", "badger-test") require.NoError(t, err) defer removeDir(dir) opt := getTestOptions(dir) opt.ValueLogFileSize = 1 << 20 kv, err := Open(opt) require.NoError(t, err) // Write a lot of data, so it creates some work for valug log GC. sz := 32 << 10 txn := kv.NewTransaction(true) for i := 0; i < 100; i++ { v := make([]byte, sz) rand.Read(v[:rand.Intn(sz)]) require.NoError(t, txn.SetEntry(NewEntry([]byte(fmt.Sprintf("key%d", i)), v))) if i%20 == 0 { require.NoError(t, txn.Commit()) txn = kv.NewTransaction(true) } } require.NoError(t, txn.Commit()) for i := 0; i < 45; i++ { txnDelete(t, kv, []byte(fmt.Sprintf("key%d", i))) } require.NoError(t, kv.RunValueLogGC(0.5)) require.NoError(t, kv.Close()) err = kv.RunValueLogGC(0.5) require.Equal(t, ErrRejected, err, "Error should be returned after closing DB.") }
explode_data.jsonl/39098
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 429 }
[ 2830, 3393, 1130, 2201, 17939, 1155, 353, 8840, 836, 8, 341, 3244, 57776, 445, 21751, 3866, 311, 8183, 1367, 1311, 11, 773, 42659, 13, 1032, 64211, 1283, 35251, 671, 17, 17, 21, 1138, 48532, 11, 1848, 1669, 43144, 65009, 6184, 19814, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestIsValidDataType(t *testing.T) { // nolint:gocritic is := is.New(t) is.True(!isValidDataType(nil)) // nil is not a valid dataType is.True(isValidDataType(xsdString)) }
explode_data.jsonl/48553
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 73 }
[ 2830, 3393, 55470, 22653, 1155, 353, 8840, 836, 8, 341, 197, 322, 308, 337, 396, 70418, 509, 49208, 198, 19907, 1669, 374, 7121, 1155, 692, 19907, 32443, 3471, 29192, 22653, 27907, 593, 442, 2092, 374, 537, 264, 2697, 22285, 271, 19907,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1