File size: 71,096 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
{
    "paper_id": "P01-1044",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T09:29:45.420099Z"
    },
    "title": "Parsing with Treebank Grammars: Empirical Bounds, Theoretical Models, and the Structure of the Penn Treebank",
    "authors": [
        {
            "first": "Dan",
            "middle": [],
            "last": "Klein",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Stanford University Stanford",
                "location": {
                    "postCode": "94305-9040",
                    "region": "CA"
                }
            },
            "email": "klein@cs.stanford.edu"
        },
        {
            "first": "Christopher",
            "middle": [
                "D"
            ],
            "last": "Manning",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Stanford University Stanford",
                "location": {
                    "postCode": "94305-9040",
                    "region": "CA"
                }
            },
            "email": "manning\u00a1@cs.stanford.edu"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "This paper presents empirical studies and closely corresponding theoretical models of the performance of a chart parser exhaustively parsing the Penn Treebank with the Treebank's own CFG grammar. We show how performance is dramatically affected by rule representation and tree transformations, but little by top-down vs. bottom-up strategies. We discuss grammatical saturation, including analysis of the strongly connected components of the phrasal nonterminals in the Treebank, and model how, as sentence length increases, the effective grammar rule size increases as regions of the grammar are unlocked, yielding super-cubic observed time behavior in some configurations. \u00a2 \u00a4 \u00a3 \u00a6 \u00a5 \u00a7 \u00a9 worst-case time bound for exhaustively parsing arbitrary context-free grammars. In what follows, we do not make use of the probabilistic aspects of the grammar or parser.",
    "pdf_parse": {
        "paper_id": "P01-1044",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "This paper presents empirical studies and closely corresponding theoretical models of the performance of a chart parser exhaustively parsing the Penn Treebank with the Treebank's own CFG grammar. We show how performance is dramatically affected by rule representation and tree transformations, but little by top-down vs. bottom-up strategies. We discuss grammatical saturation, including analysis of the strongly connected components of the phrasal nonterminals in the Treebank, and model how, as sentence length increases, the effective grammar rule size increases as regions of the grammar are unlocked, yielding super-cubic observed time behavior in some configurations. \u00a2 \u00a4 \u00a3 \u00a6 \u00a5 \u00a7 \u00a9 worst-case time bound for exhaustively parsing arbitrary context-free grammars. In what follows, we do not make use of the probabilistic aspects of the grammar or parser.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "This paper originated from examining the empirical performance of an exhaustive active chart parser using an untransformed treebank grammar over the Penn Treebank. Our initial experiments yielded the surprising result that for many configurations empirical parsing speed was super-cubic in the sentence length. This led us to look more closely at the structure of the treebank grammar. The resulting analysis builds on the presentation of Charniak (1996) , but extends it by elucidating the structure of non-terminal interrelationships in the Penn Treebank grammar. On the basis of these studies, we build simple theoretical models which closely predict observed parser performance, and, in particular, explain the originally observed super-cubic behavior.",
                "cite_spans": [
                    {
                        "start": 439,
                        "end": 454,
                        "text": "Charniak (1996)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We used treebank grammars induced directly from the local trees of the entire WSJ section of the Penn Treebank (Marcus et al., 1993 ) (release 3). For each length and parameter setting, 25 sentences evenly distributed through the treebank were parsed. Since we were parsing sentences from among those from which our grammar was derived, coverage was never an is-sue. Every sentence parsed had at least one parse -the parse with which it was originally observed. 1 The sentences were parsed using an implementation of the probabilistic chart-parsing algorithm presented in (Klein and Manning, 2001) . In that paper, we present a theoretical analysis showing an The default settings are shown above in bold face. We do not discuss all possible combinations of these settings. Rather, we take the bottom-up parser using an untransformed grammar with trie rule encodings to be the basic form of the parser. Except where noted, we will discuss how each factor affects this baseline, as most of the effects are orthogonal. When we name a setting, any omitted parameters are assumed to be the defaults.",
                "cite_spans": [
                    {
                        "start": 111,
                        "end": 131,
                        "text": "(Marcus et al., 1993",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 462,
                        "end": 463,
                        "text": "1",
                        "ref_id": null
                    },
                    {
                        "start": 572,
                        "end": 597,
                        "text": "(Klein and Manning, 2001)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In all cases, the grammar was directly induced from (transformed) Penn treebank trees. The transforms used are shown in figure 1. For all settings, functional tags and crossreferencing annotations were stripped. For NOTRANSFORM, no other modification was made. In particular, empty nodes (represented as -NONE-in the treebank) were turned into rules that generated the empty string ( ), and there was no collapsing of categories (such as PRT and ADVP) as is often done in parsing work (Collins, 1997, etc.) . For NOEMPTIES, empties were removed by pruning nonterminals which covered no overt words. For NOUNA-RIESHIGH, and NOUNARIESLOW, unary nodes were removed as well, by keeping only the tops and the bottoms of unary chains, respectively. 2",
                "cite_spans": [
                    {
                        "start": 485,
                        "end": 506,
                        "text": "(Collins, 1997, etc.)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Tree Transforms",
                "sec_num": "2.1"
            },
            {
                "text": "The parser operates on Finite State Automata (FSA) grammar representations. We compiled grammar rules into FSAs in three ways: LISTs, TRIEs, and MINimized FSAs. An example of each representation is given in figure 2. For LIST encodings, each local tree type was encoded in its own, linearly structured FSA, corresponding to Earley (1970) -style dotted rules. For TRIE, there was one FSA per category, encoding together all rule types producing that category. For MIN, state-minimized FSAs were constructed from the trie FSAs. Note that while the rule encoding may dramatically affect the efficiency of a parser, it does not change the actual set of parses for a given sentence in any way. 3",
                "cite_spans": [
                    {
                        "start": 324,
                        "end": 337,
                        "text": "Earley (1970)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar Rule Encodings",
                "sec_num": "2.2"
            },
            {
                "text": "2 In no case were the nonterminal-to-word or TOP-tononterminal unaries altered.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar Rule Encodings",
                "sec_num": "2.2"
            },
            {
                "text": "3 FSAs are not the only method of representing and compacting grammars. For example, the prefix compacted tries we use are the same as the common practice of ignoring items before the dot in a dotted rule (Moore, 2000) . Another ",
                "cite_spans": [
                    {
                        "start": 205,
                        "end": 218,
                        "text": "(Moore, 2000)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar Rule Encodings",
                "sec_num": "2.2"
            },
            {
                "text": "In this section, we outline the observed performance of the parser for various settings. We frequently speak in terms of the following: span: a range of words in the chart, e.g., [1,3] 4 edge: a category over a span, e.g., NP:[1, 3] traversal: a way of making an edge from an active and a passive edge, e.g., NP:[1, 3] (NP DT.NN:",
                "cite_spans": [
                    {
                        "start": 179,
                        "end": 186,
                        "text": "[1,3] 4",
                        "ref_id": null
                    },
                    {
                        "start": 223,
                        "end": 229,
                        "text": "NP:[1,",
                        "ref_id": null
                    },
                    {
                        "start": 230,
                        "end": 232,
                        "text": "3]",
                        "ref_id": null
                    },
                    {
                        "start": 309,
                        "end": 315,
                        "text": "NP:[1,",
                        "ref_id": null
                    },
                    {
                        "start": 316,
                        "end": 318,
                        "text": "3]",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Observed Performance",
                "sec_num": "3"
            },
            {
                "text": "[1,2] + NN:[2,3])",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Observed Performance",
                "sec_num": "3"
            },
            {
                "text": "The parser has an \u00a2 \u00a4 \u00a3 \u00a5 \u00a7\u00a9 theoretical time bound, where \u00a5 is the number of words in the sentence to be parsed, is the number of nonterminal categories in the grammar and is the number of (active) states in the FSA encoding of the grammar. The time bound is derived from counting the number of traversals processed by the parser, each taking",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Time",
                "sec_num": "3.1"
            },
            {
                "text": "\u00a2 \u00a4 \u00a3 \" ! # \u00a9",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Time",
                "sec_num": "3.1"
            },
            {
                "text": "time. In figure 3, we see the average time 5 taken per sentence length for several settings, with the empirical exponent (and correlation $ -value) from the best-fit simple power law model to the right. Notice that most settings show time growth greater than",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Time",
                "sec_num": "3.1"
            },
            {
                "text": "\u00a2 \u00a4 \u00a3 % \u00a5 \u00a7\u00a9 . Although, \u00a2 \u00a4 \u00a3 % \u00a5 \u00a7\u00a9",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Time",
                "sec_num": "3.1"
            },
            {
                "text": "is simply an asymptotic bound, there are good explanations for the observed behavior. There are two primary causes for the super-cubic time values. The first is theoretically uninteresting. The parser is implemented in Java, which uses garbage collection for memory management. Even when there is plenty of memory for a parse's primary data structures, \"garbage collection thrashing\" can occur when logical possibility would be trie encodings which compact the grammar states by common suffix rather than common prefix, as in (Leermakers, 1992) . The savings are less than for prefix compaction. The ratio of the number of edges and traversals produced with a top-down strategy over the number produced with a bottom-up strategy (shown for TRIE-NOTRANSFORM, others are similar).",
                "cite_spans": [
                    {
                        "start": 526,
                        "end": 544,
                        "text": "(Leermakers, 1992)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Time",
                "sec_num": "3.1"
            },
            {
                "text": "parsing longer sentences as temporary objects cause increasingly frequent reclamation. To see past this effect, which inflates the empirical exponents, we turn to the actual traversal counts, which better illuminate the issues at hand. Figures 4 (a) and (b) show the traversal curves corresponding to the times in figure 3. The interesting cause of the varying exponents comes from the \"constant\" terms in the theoretical bound. The second half of this paper shows how modeling growth in these terms can accurately predict parsing performance (see figures 9 to 13).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 236,
                        "end": 249,
                        "text": "Figures 4 (a)",
                        "ref_id": "FIGREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Time",
                "sec_num": "3.1"
            },
            {
                "text": "The memory bound for the parser is",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Memory",
                "sec_num": "3.2"
            },
            {
                "text": "\u00a2 \u00a4 \u00a3 & \u00a5 ' \u00a9",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Memory",
                "sec_num": "3.2"
            },
            {
                "text": ". Since the parser is running in a garbage-collected environment, it is hard to distinguish required memory from utilized memory. However, unlike time and traversals which in practice can diverge, memory requirements match the number of edges in the chart almost exactly, since the large data structures are all proportional in size to the number of edges",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Memory",
                "sec_num": "3.2"
            },
            {
                "text": "( 0 ) 1 \u00a2 \u00a4 \u00a3 & \u00a5 ' \u00a9",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Memory",
                "sec_num": "3.2"
            },
            {
                "text": ". 6 Almost all edges stored are active edges (2 4 3 6 5 8 7 for sentences longer than 30 words), of which there can be",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Memory",
                "sec_num": "3.2"
            },
            {
                "text": "\u00a2 \u00a4 \u00a3 9 & \u00a5 ' \u00a9",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Memory",
                "sec_num": "3.2"
            },
            {
                "text": ": one for every grammar state and span. Passive edges, of which there can be \u00a2 \u00a4 \u00a3 9 \u00a5 ' @ \u00a9",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Memory",
                "sec_num": "3.2"
            },
            {
                "text": ", one for every category and span, are a shrinking minority. This is because, while is bounded above by 27 in the treebank 7 (for spans 2 2), numbers in the thousands (see figure 12 ). Thus, required memory will be implicitly modeled when we model active edges in section 4.3.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 172,
                        "end": 181,
                        "text": "figure 12",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Memory",
                "sec_num": "3.2"
            },
            {
                "text": "Figure 4 (a) shows the effect of the tree transforms on traversal counts. The NOUNARIES settings are much more efficient than the others, however this efficiency comes at a price in terms of the utility of the final parse. For example, regardless of which NOUNARIES 6 A standard chart parser might conceivably require storing more than",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Tree Transforms",
                "sec_num": "3.3"
            },
            {
                "text": "A C B \u00a6 D F E",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Tree Transforms",
                "sec_num": "3.3"
            },
            {
                "text": "traversals on its agenda, but ours provably never does.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Tree Transforms",
                "sec_num": "3.3"
            },
            {
                "text": "7 This count is the number of phrasal categories with the introduction of a TOP label for the unlabeled top treebank nodes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Tree Transforms",
                "sec_num": "3.3"
            },
            {
                "text": "transform is chosen, there will be NP nodes missing from the parses, making the parses less useful for any task requiring NP identification. For the remainder of the paper, we will focus on the settings NOTRANS-FORM and NOEMPTIES.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Tree Transforms",
                "sec_num": "3.3"
            },
            {
                "text": "Figure 4 (b) shows the effect of each tree transform on traversal counts. The more compacted the grammar representation, the more time-efficient the parser is.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar Encodings",
                "sec_num": "3.4"
            },
            {
                "text": "Figure 4 (c) shows the effect on total edges and traversals of using top-down and bottom-up strategies. There are some extremely minimal savings in traversals due to top-down filtering effects, but there is a corresponding penalty in edges as rules whose left-corner cannot be built are introduced. Given the highly unrestrictive nature of the treebank grammar, it is not very surprising that top-down filtering provides such little benefit. However, this is a useful observation about real world parsing performance. The advantages of top-down chart parsing in providing grammar-driven prediction are often advanced (e.g., Allen 1995:66), but in practice we find almost no value in this for broad coverage CFGs. While some part of this is perhaps due to errors in the treebank, a large part just reflects the true nature of broad coverage grammars: e.g., once you allow adverbial phrases almost anywhere and allow PPs, (participial) VPs, and (temporal) NPs to be adverbial phrases, along with phrases headed by adverbs, then there is very little useful top-down control left. With such a permissive grammar, the only real constraints are in the POS tags which anchor the local trees (see section 4.3). Therefore, for the remainder of the paper, we consider only bottom-up settings.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Top-Down vs. Bottom-Up",
                "sec_num": "3.5"
            },
            {
                "text": "In the remainder of the paper we provide simple models that nevertheless accurately capture the varying magnitudes and exponents seen for different grammar encodings and tree transformations. Since the",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Models",
                "sec_num": "4"
            },
            {
                "text": "\u00a5 \u00a7 term of \u00a2 \u00a4 \u00a3 \u00a5 \u00a7\u00a9",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Models",
                "sec_num": "4"
            },
            {
                "text": "comes directly from the number of start, split, and end points for traversals, it is certainly not responsible for the varying growth rates. An initially plausible possibility is that the quantity bounded by the term is non-constant in \u00a5 in practice, because longer spans are more ambiguous in terms of the number of categories they can form. This turns out to be generally false, as discussed in section 4.2. Alternately, the effective term could be growing with \u00a5 , which turns out to be true, as discussed in section 4.3.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Models",
                "sec_num": "4"
            },
            {
                "text": "The number of (possibly zero-size) spans for a sentence of length \u00a5 is fixed:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Models",
                "sec_num": "4"
            },
            {
                "text": "\u00a3 % \u00a5 H G I ! \u00a9 P \u00a3 \u00a6 \u00a5 Q G S R T \u00a9 V U T R",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Models",
                "sec_num": "4"
            },
            {
                "text": ". Thus, to be able to evaluate and model the total edge counts, we look to the number of edges over a given span.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Models",
                "sec_num": "4"
            },
            {
                "text": "The passive (or active) saturation of a given span is the number of passive (or active) edges over that span.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Definition 1",
                "sec_num": null
            },
            {
                "text": "In the total time and traversal bound \u00a2 \u00a4 \u00a3 \u00a5 \u00a7\u00a9",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Definition 1",
                "sec_num": null
            },
            {
                "text": ", the effective value of is determined by the active saturation, while the effective value of is determined by the passive saturation. An interesting fact is that the saturation of a span is, for the treebank grammar and sentences, essentially independent of what size sentence the span is from and where in the sentence the span begins. Thus, for a given span size, we report the average over all spans of that size occurring anywhere in any sentence parsed.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Definition 1",
                "sec_num": null
            },
            {
                "text": "The reason that effective growth is not found in the component is that passive saturation stays almost constant as span size increases. However, the more interesting result is not that saturation is relatively constant (for spans beyond a small, grammar-dependent size), but that the saturation values are extremely large compared to (see section 4.2). For the NOTRANS-FORM and NOEMPTIES grammars, most categories are reachable from most other categories using rules which can be applied over a single span. Once you get one of these categories over a span, you will get the rest as well. We now formalize this.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Treebank Grammar Structure",
                "sec_num": "4.1"
            },
            {
                "text": "Definition 2 A category W is empty-reachable in a grammar X if W",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Treebank Grammar Structure",
                "sec_num": "4.1"
            },
            {
                "text": "can be built using only empty terminals.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Treebank Grammar Structure",
                "sec_num": "4.1"
            },
            {
                "text": "The empty-reachable set for the NOTRANSFORM grammar is shown in figure 5. 8 These 23 categories plus the tag -NONE-create a passive saturation of 24 for zero-spans for NOTRANSFORM (see figure 9) . The same-span-reachability relation induces a graph over the 27 non-terminal categories. The stronglyconnected component (SCC) reduction of that graph is shown in figures 6 and 7. 9 Unsurprisingly, the largest SCC, which contains most \"common\" categories (S, NP, VP, PP, etc.) is slightly larger for the NOTRANS-FORM grammar, since the empty-reachable set is nonempty. However, note that even for NOTRANSFORM, the largest SCC is smaller than the empty-reachable set, since empties provide direct entry into some of the lower SCCs, in particular because of WH-gaps.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 185,
                        "end": 194,
                        "text": "figure 9)",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Treebank Grammar Structure",
                "sec_num": "4.1"
            },
            {
                "text": "Interestingly, this same high-reachability effect occurs even for the NOUNARIES grammars, as shown in the next section.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Treebank Grammar Structure",
                "sec_num": "4.1"
            },
            {
                "text": "The total growth and saturation of passive edges is relatively easy to describe. Figure 8 shows the total num- 9 Implied arcs have been removed for clarity. The relation is in fact the transitive closure of this graph. Figure 9 : The average passive saturation (number of passive edges) for a span of a given size as processed in practice (left), and as predicted by our models (right).",
                "cite_spans": [
                    {
                        "start": 111,
                        "end": 112,
                        "text": "9",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 81,
                        "end": 89,
                        "text": "Figure 8",
                        "ref_id": "FIGREF5"
                    },
                    {
                        "start": 219,
                        "end": 227,
                        "text": "Figure 9",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Passive Edges",
                "sec_num": "4.2"
            },
            {
                "text": "ber of passive edges by sentence length, and figure 9 shows the saturation as a function of span size. 10 The grammar representation does not affect which passive edges will occur for a given span.",
                "cite_spans": [
                    {
                        "start": 103,
                        "end": 105,
                        "text": "10",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Passive Edges",
                "sec_num": "4.2"
            },
            {
                "text": "The large SCCs cause the relative independence of passive saturation from span size for the NOTRANS-FORM and NOEMPTIES settings. Once any category in the SCC is found, all will be found, as well as all categories reachable from that SCC. For these settings, the passive saturation can be summarized by three saturation numbers: zero-spans (empties)ba",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Passive Edges",
                "sec_num": "4.2"
            },
            {
                "text": ", one-spans (words)ba",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "# c 6 d f e",
                "sec_num": null
            },
            {
                "text": ", and all larger spans (categories)ba",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "# c 6 d V g",
                "sec_num": null
            },
            {
                "text": ". Taking averages directly from the data, we have our first model, shown on the right in figure 9.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "# c 6 d '",
                "sec_num": null
            },
            {
                "text": "For the NOUNARIES settings, there will be no same-span reachability and hence no SCCs. To reach a new category always requires the use of at least one overt word. However, for spans of size 6 or so, enough words exist that the same high saturation effect will still be observed. This can be modeled quite simply by assuming each terminal unlocks a fixed fraction of the nonterminals, as seen in the right graph of figure 9, but we omit the details here.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "# c 6 d '",
                "sec_num": null
            },
            {
                "text": "Using these passive saturation models, we can directly estimate the total passive edge counts by summation:`h",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "# c 6 d '",
                "sec_num": null
            },
            {
                "text": "d p i d P \u00a3 % \u00a5 q \u00a9 ) S r 1 s t v u e \u00a3 \u00a6 \u00a5 w G x ! y p \u00a9 b a # c 6 d t 10",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "# c 6 d '",
                "sec_num": null
            },
            {
                "text": "The maximum possible passive saturation for any span greater than one is equal to the number of phrasal categories in the treebank grammar: 27. However, empty and size-one spans can additionally be covered by POS tag edges.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "# c 6 d '",
                "sec_num": null
            },
            {
                "text": "The predictions are shown in figure 8. For the NO-TRANSFORM or NOEMPTIES settings, this reduces to:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "# c 6 d '",
                "sec_num": null
            },
            {
                "text": "d p i d P \u00a3 \u00a6 \u00a5 q \u00a9 ) s g \" s ' b a c 8 d ' G x \u00a3 \u00a6 \u00a5 q \u00a9 \u00a6 a # c 8 d V g G 1 \u00a3 % \u00a5 G ! \u00a9 a # c 8 d f e",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "# c 6 d '",
                "sec_num": null
            },
            {
                "text": "We correctly predict that the passive edge total exponents will be slightly less than 2.0 when unaries are present, and greater than 2.0 when they are not. With unaries, the linear terms in the reduced equation are significant over these sentence lengths and drag down the exponent. The linear terms are larger for NO-TRANSFORM and therefore drag the exponent down more. 11 Without unaries, the more gradual saturation growth increases the total exponent, more so for NOUNARIESLOW than NOUNARIESHIGH. However, note that for spans around 8 and onward, the saturation curves are essentially constant for all settings.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "# c 6 d '",
                "sec_num": null
            },
            {
                "text": "Active edges are the vast majority of edges and essentially determine (non-transient) memory requirements. While passive counts depend only on the grammar transform, active counts depend primarily on the encoding for general magnitude but also on the transform for the details (and exponent effects). Figure 10 shows the total active edges by sentence size for three settings chosen to illustrate the main effects. Total active growth is sub-quadratic for LIST, but has an exponent of up to about 2.4 for the TRIE settings. h i jk l for the same reason. Moreover, note that m e f # g has a higher best-fit exponent, yet will never actually outgrow it. To model the active totals, we again begin by modeling the active saturation curves, shown in figure 11. The active saturation for any span is bounded above by , the number of active grammar states (states in the grammar FSAs which correspond to active edges). For list grammars, this number is the sum of the lengths of all rules in the grammar. For trie grammars, it is the number of unique rule prefixes (including the LHS) in the grammar. For minimized grammars, it is the number of states with outgoing transitions (non-black states in figure 2). The value of is shown for each setting in figure 12. Note that the maximum number of active states is dramatically larger for lists since common rule prefixes are duplicated many times. For minimized FSAs, the state reduction is even greater. Since states which are earlier in a rule are much more likely to match a span, the fact that tries (and min FSAs) compress early states is particularly advantageous.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 301,
                        "end": 310,
                        "text": "Figure 10",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "Unlike passive saturation, which was relatively close to its bound , active saturation is much farther below . Furthermore, while passive saturation was relatively constant in span size, at least after a point, active saturation quite clearly grows with span size, even for spans well beyond those shown in figure 11. We now model these active saturation curves.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "What does it take for a given active state to match a given span? For TRIE and LIST, an active state cor-responds to a prefix of a rule and is a mix of POS tags and phrasal categories, each of which must be matched, in order, over that span for that state to be reached. Given the large SCCs seen in section 4.1, phrasal categories, to a first approximation, might as well be wildcards, able to match any span, especially if empties are present. However, the tags are, in comparison, very restricted. Tags must actually match a word in the span.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "More precisely, consider an active state c in the grammar and a span a . In the TRIE and LIST encodings, there is some, possibly empty, list n of labels that must be matched over a before an active edge with this state can be constructed over that span. 12 Assume that the phrasal categories in n can match any span (or any non-zero span in NOEMPTIES). 13 Therefore, phrasal categories in n do not constrain whether c can match a . The real issue is whether the tags in n will match words in a . Assume that a random tag matches a random word with a fixed probability`, independently of where the tag is in the rule and where the word is in the sentence. 14 Assume further that, although tags occur more often than categories in rules (63.9% of rule items are tags in the NOTRANSFORM case 15 ), given a fixed number of tags and categories, all permutations are equally likely to appear as rules. 16 Under these assumptions, the probability that an active state c is in the treebank grammar will depend only on the number We then have an expression for the chance of matching a specific alignment of an active state to a specific span. Clearly, there can be many alignments which differ only in the spans of the categories, but line up the same tags with the same words. However, there will be a certain number of unique ways in which the words and tags can be lined up between c and a . If we know this number, we can calculate the total probability that there is some alignment which matches. For example, consider the state NP NP CC NP . PP (which has signature (1,2) -the PP has no effect) over a span of length \u00a5 , with empties available. The NPs can match any span, so there are \u00a5 alignments which are distinct from the standpoint of the CC tag -it can be in any position. The chance that some alignment will match is therefore",
                "cite_spans": [
                    {
                        "start": 896,
                        "end": 898,
                        "text": "16",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "p \u00a3 % c q \u00a9 r ) s \u00a3 \u00a6 d e t V",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "! \u00ff d \u00a3 p ! y z \u00a9s",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": ", which, for small`is roughly linear in \u00a5 . It should be clear that for an active state like this, the longer the span, the more likely it is that this state will be found over that span.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "It is unfortunately not the case that all states with the same signature will match a span length with the same probability. For example, the state NP NP NP CC . NP has the same signature, but must align the CC with the final element of the span. A state like this will not become more likely (in our model) as span size increases. However, with some straightforward but space-consuming recurrences, we can calculate the expected chance that a random rule of a given signature will match a given span length. Since we know how many states have a given signature, we can calculate the total active saturation c q a",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "c 8 d P \u00a3 % \u00a5 q \u00a9 as c q a # c 6 d P \u00a3 % \u00a5 q \u00a9 { ) x r x | } o P i u \u00a5 v d P \u00a3 9 p \u00a9 p ( F | \u00a3 % z c 8 d p o x \u00a3 % c t V \u00a5 q \u00a9",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "more efficient than NOUNARIESHIGH despite having more active states, largely because using the bottoms of chains increases the frequency of tags relative to categories. 16 This is also false; tags occur slightly more often at the beginnings of rules and less often at the ends. This model has two parameters. First, there is`which we estimated directly by looking at the expected match between the distribution of tags in rules and the distribution of tags in the Treebank text (which is around 1/17.7). No factor for POS tag ambiguity was used, another simplification. 17 Second, there is the map",
                "cite_spans": [
                    {
                        "start": 169,
                        "end": 171,
                        "text": "16",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "o P i u \u00a5 v d",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "from signatures to a number of active states, which was read directly from the compiled grammars.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "This model predicts the active saturation curves shown to the right in figure 11 . Note that the model, though not perfect, exhibits the qualitative differences between the settings, both in magnitudes and exponents. 18 In particular:",
                "cite_spans": [
                    {
                        "start": 217,
                        "end": 219,
                        "text": "18",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 71,
                        "end": 80,
                        "text": "figure 11",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "The transform primarily changes the saturation over short spans, while the encoding determines the overall magnitudes. For example, in TRIE-NOEMPTIES the low-span saturation is lower than in TRIE-NOTRANSFORM since short spans in the former case can match only signatures which have both d and o small, while in the latter only d needs to be small. Therefore, the several hundred states which are reachable only via categories all match every span starting from size 0 for NOTRANSFORM, but are accessed only gradually for NOEMPTIES. However, for larger spans, the behavior converges to counts characteristic for TRIE encodings.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "For LIST encodings, the early saturations are huge, due to the fact that most of the states which are available early for trie grammars are precisely the ones duplicated up to thousands of times in the list grammars. However, the additive gain over the initial states is roughly the same for both, as after a few items are specified, the tries become sparse.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "The actual magnitudes and exponents 19 of the saturations are surprisingly well predicted, suggesting that this model captures the essential behavior.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "These active saturation curves produce the active total curves in figure 10, which are also qualitatively correct in both magnitudes and exponents.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Edges",
                "sec_num": "4.3"
            },
            {
                "text": "Now that we have models for active and passive edges, we can combine them to model traversal counts as well. We assume that the chance for a passive edge and an active edge to combine into a traversal is a single probability representing how likely an arbitrary active state is to have a continuation with a label matching an arbitrary passive state. List rule states have only one continuation, while trie rule states in the branch- Figure 13 : The average number of traversals for sentences of a given length as observed in practice (left), and as predicted by the models presented in the latter part of the paper (right).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 434,
                        "end": 443,
                        "text": "Figure 13",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Traversals",
                "sec_num": "4.4"
            },
            {
                "text": "ing portion of the trie average about 3.7 (min FSAs 4.2). 20 Making another uniformity assumption, we assume that this combination probability is the continuation degree divided by the total number of passive labels, categorical or tag (73).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Traversals",
                "sec_num": "4.4"
            },
            {
                "text": "In figure 13 , we give graphs and exponents of the traversal counts, both observed and predicted, for various settings. Our model correctly predicts the approximate values and qualitative facts, including:",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 3,
                        "end": 12,
                        "text": "figure 13",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Traversals",
                "sec_num": "4.4"
            },
            {
                "text": "For LIST, the observed exponent is lower than for TRIEs, though the total number of traversals is dramatically higher. This is because the active saturation is growing much faster for TRIEs; note that in cases like this the lower-exponent curve will never actually outgrow the higher-exponent curve.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Traversals",
                "sec_num": "4.4"
            },
            {
                "text": "Of the settings shown, only TRIE-NOEMPTIES exhibits super-cubic traversal totals.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Traversals",
                "sec_num": "4.4"
            },
            {
                "text": "Despite their similar active and passive exponents, TRIE-NOEMPTIES and TRIE-NOTRANSFORM vary in traversal growth due to the \"early burst\" of active edges which gives TRIE-NOTRANSFORM significantly more edges over short spans than its power law would predict. This excess leads to a sizeable quadratic addend in the number of transitions, causing the average best-fit exponent to drop without greatly affecting the overall magnitudes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Traversals",
                "sec_num": "4.4"
            },
            {
                "text": "Overall, growth of saturation values in span size increases best-fit traversal exponents, while early spikes in saturation reduce them. The traversal exponents therefore range from LIST-NOTRANSFORM at 2.6 to TRIE-NOUNARIESLOW at over 3.8. However, the final performance is more dependent on the magnitudes, which range from LIST-NOTRANSFORM as the worst, despite its exponent, to MIN-NOUNARIESHIGH as the best. The single biggest factor in the time and traversal performance turned out to be the encoding, which is fortunate because the choice of grammar transform will depend greatly on the application. 20 This is a simplification as well, since the shorter prefixes that tend to have higher continuation degrees are on average also a larger fraction of the active edges.",
                "cite_spans": [
                    {
                        "start": 605,
                        "end": 607,
                        "text": "20",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Traversals",
                "sec_num": "4.4"
            },
            {
                "text": "We built simple but accurate models on the basis of two observations. First, passive saturation is relatively constant in span size, but large due to high reachability among phrasal categories in the grammar. Second, active saturation grows with span size because, as spans increase, the tags in a given active edge are more likely to find a matching arrangement over a span. Combining these models, we demonstrated that a wide range of empirical qualitative and quantitative behaviors of an exhaustive parser could be derived, including the potential super-cubic traversal growth over sentence lengths of interest.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "5"
            },
            {
                "text": "Effectively \"testing on the training set\" would be invalid if we wished to present performance results such as precision and recall, but it is not a problem for the present experiments, which focus solely on the parser load and grammar structure.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "Note that the number of words (or size) of a span is equal to the difference between the endpoints.5 The hardware was a 700 MHz Intel Pentium III, and we used up to 2GB of RAM for very long sentences or very poor parameters. With good parameter settings, the system can parse 100+ word treebank sentences.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "Note that, over these values of , even a basic quadratic function like the simple sum r \u00a4 B d e E f g has a bestfit simple power curve exponent of only",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "The essence of the MIN model, which is omitted here, is that states are represented by the \"easiest\" label sequence which leads to that state.13 The model for the NOUNARIES cases is slightly more complex, but similar.14 This is of course false; in particular, tags at the end of rules disproportionately tend to be punctuation tags.15 Although the present model does not directly apply to the NOUNARIES cases, NOUNARIESLOW is significantly",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "In general, the we used was lower for not having modeled tagging ambiguity, but higher for not having modeled the fact that the SCCs are not of size 27.18 And does so without any \"tweakable\" parameters.19 Note that the list curves do not compellingly suggest a power law model.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Natural Language Understanding",
                "authors": [
                    {
                        "first": "James",
                        "middle": [],
                        "last": "Allen",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "James Allen. 1995. Natural Language Understand- ing. Benjamin Cummings, Redwood City, CA.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Tree-bank grammars",
                "authors": [
                    {
                        "first": "Eugene",
                        "middle": [],
                        "last": "Charniak",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "Proceedings of the Thirteenth National Conference on Artificial Intelligence",
                "volume": "",
                "issue": "",
                "pages": "1031--1036",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Eugene Charniak. 1996. Tree-bank grammars. In Proceedings of the Thirteenth National Conference on Artificial Intelligence, pages 1031-1036.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Three generative, lexicalised models for statistical parsing",
                "authors": [
                    {
                        "first": "Michael John",
                        "middle": [],
                        "last": "",
                        "suffix": ""
                    },
                    {
                        "first": "Collins",
                        "middle": [],
                        "last": "",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "ACL 35/EACL 8",
                "volume": "",
                "issue": "",
                "pages": "16--23",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Michael John Collins. 1997. Three generative, lex- icalised models for statistical parsing. In ACL 35/EACL 8, pages 16-23.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "An efficient context-free parsing algorithm",
                "authors": [
                    {
                        "first": "Jay",
                        "middle": [],
                        "last": "Earley",
                        "suffix": ""
                    }
                ],
                "year": 1970,
                "venue": "Communications of the ACM",
                "volume": "6",
                "issue": "",
                "pages": "451--455",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jay Earley. 1970. An efficient context-free parsing al- gorithm. Communications of the ACM, 6:451-455.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "An \u00a2 \u00a4 \u00a3 \u00a6 \u00a5 \u00a7\u00a9 agenda-based chart parser for arbitrary probabilistic context-free grammars",
                "authors": [
                    {
                        "first": "Dan",
                        "middle": [],
                        "last": "Klein",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Christopher",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Manning",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Dan Klein and Christopher D. Manning. 2001. An \u00a2 \u00a4 \u00a3 \u00a6 \u00a5 \u00a7\u00a9 agenda-based chart parser for arbitrary prob- abilistic context-free grammars. Technical Report dbpubs/2001-16, Stanford University.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "A recursive ascent Earley parser",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Leermakers",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "Information Processing Letters",
                "volume": "41",
                "issue": "",
                "pages": "87--91",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "R. Leermakers. 1992. A recursive ascent Earley parser. Information Processing Letters, 41:87-91.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Foundations of Statistical Natural Language Processing",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Christopher",
                        "suffix": ""
                    },
                    {
                        "first": "Hinrich",
                        "middle": [],
                        "last": "Manning",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Sch\u00fctze",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Christopher D. Manning and Hinrich Sch\u00fctze. 1999. Foundations of Statistical Natural Language Pro- cessing. MIT Press, Boston, MA.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Building a large annotated corpus of English: The Penn treebank",
                "authors": [
                    {
                        "first": "Mitchell",
                        "middle": [
                            "P"
                        ],
                        "last": "Marcus",
                        "suffix": ""
                    },
                    {
                        "first": "Beatrice",
                        "middle": [],
                        "last": "Santorini",
                        "suffix": ""
                    },
                    {
                        "first": "Mary",
                        "middle": [
                            "Ann"
                        ],
                        "last": "Marcinkiewicz",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Computational Linguistics",
                "volume": "19",
                "issue": "",
                "pages": "313--330",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Mitchell P. Marcus, Beatrice Santorini, and Mary Ann Marcinkiewicz. 1993. Building a large annotated corpus of English: The Penn treebank. Computa- tional Linguistics, 19:313-330.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Improved left-corner chart parsing for large context-free grammars",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Robert",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Moore",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the Sixth International Workshop on Parsing Technologies",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Robert C. Moore. 2000. Improved left-corner chart parsing for large context-free grammars. In Pro- ceedings of the Sixth International Workshop on Parsing Technologies.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "uris": null,
                "text": "Grammar Encodings: FSAs for a subset of the rules for the category NP. Non-black states are active, non-white states are accepting, and bold transitions are phrasal.",
                "num": null,
                "type_str": "figure"
            },
            "FIGREF2": {
                "uris": null,
                "text": "The average time to parse sentences using various parameters.",
                "num": null,
                "type_str": "figure"
            },
            "FIGREF3": {
                "uris": null,
                "text": "(a) The number of traversals for different grammar transforms. (b) The number of traversals for different grammar encodings. (c)",
                "num": null,
                "type_str": "figure"
            },
            "FIGREF4": {
                "uris": null,
                "text": "The empty-reachable set for the NOTRANS-The same-span-reachability graph for the NOEMPTIES grammar.one instance of W , every node not dominating that instance is an instance of an empty-reachable category.",
                "num": null,
                "type_str": "figure"
            },
            "FIGREF5": {
                "uris": null,
                "text": "The average number of passive edges processed in practice (left), and predicted by our models (right).",
                "num": null,
                "type_str": "figure"
            },
            "FIGREF6": {
                "uris": null,
                "text": "The average active saturation (number of active edges) for a span of a given size as processed in practice (left), and as predicted by our models (right). Grammar sizes: active state counts.",
                "num": null,
                "type_str": "figure"
            },
            "FIGREF8": {
                "uris": null,
                "text": "align with a non-empty span (for NOEMPTIES) or any span at all (for NOTRANSFORM), then the question of whether this alignment of c with a matches is determined entirely by the d tags. However, with our assumptions, the probability that a randomly chosen set of d tags matches a randomly chosen set of d words is simplyhy .",
                "num": null,
                "type_str": "figure"
            },
            "TABREF3": {
                "content": "<table><tr><td/><td>2.0M</td><td/><td/><td/><td/><td/><td/><td>2.0M</td><td/><td/><td/></tr><tr><td>Avg. Active Totals</td><td>0.5M 1.0M 1.5M</td><td/><td/><td/><td/><td>List-NoTransform exp 1.88 r 0.999 Trie-NoTransform exp 2.18 r 0.999 Trie-NoEmpties exp 2.43 r 0.999</td><td>Avg. Active Totals</td><td>0.5M 1.0M 1.5M</td><td/><td/><td/><td>List-NoTransform exp 1.81 r 0.999 Trie-NoTransform exp 2.10 r 1.000 Trie-NoEmpties exp 2.36 r 1.000</td></tr><tr><td/><td>0.0M</td><td/><td/><td/><td/><td/><td/><td>0.0M</td><td/><td/><td/></tr><tr><td/><td>0</td><td>10</td><td>20</td><td>30</td><td>40</td><td>50</td><td/><td>0</td><td>10</td><td>20</td><td>30</td><td>40</td><td>50</td></tr><tr><td/><td/><td/><td colspan=\"2\">Sentence Length</td><td/><td/><td/><td/><td/><td colspan=\"2\">Sentence Length</td></tr><tr><td colspan=\"2\">2.0K 12.0K 14.0K Figure 10: 0.0K 4.0K 6.0K 8.0K 10.0K Avg. Active Saturation</td><td/><td/><td/><td/><td>List-NoTransform exp 0.092 r 0.957 Trie-NoTransform exp 0.323 r 0.999 Trie-NoEmpties exp 0.389 r 0.997</td><td>Avg. Active Saturation</td><td>0.0K 2.0K 12.0K 14.0K 4.0K 6.0K 8.0K 10.0K</td><td/><td/><td/><td>List-NoTransform exp 0.111 r 0.999 Trie-NoTransform exp 0.297 r 0.998 Trie-NoEmpties exp 0.298 r 0.991</td></tr><tr><td/><td>0</td><td>5</td><td>10</td><td/><td>15</td><td>20</td><td/><td>0</td><td>5</td><td>10</td><td/><td>15</td><td>20</td></tr><tr><td/><td/><td/><td colspan=\"2\">Span Length</td><td/><td/><td/><td/><td/><td colspan=\"2\">Span Length</td></tr></table>",
                "type_str": "table",
                "text": "The average number of active edges for sentences of a given length as observed in practice (left), and as predicted by our models (right).",
                "num": null,
                "html": null
            }
        }
    }
}