File size: 68,949 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
{
    "paper_id": "P01-1017",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T09:30:02.543047Z"
    },
    "title": "Immediate-Head Parsing for Language Models \u00a3",
    "authors": [
        {
            "first": "Eugene",
            "middle": [],
            "last": "Charniak",
            "suffix": "",
            "affiliation": {
                "laboratory": "Brown Laboratory for Linguistic Information Processing",
                "institution": "Brown University",
                "location": {
                    "postBox": "Box 1910",
                    "country": "Providence RI"
                }
            },
            "email": "ec@cs.brown.edu"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "We present two language models based upon an \"immediate-head\" parserour name for a parser that conditions all events below a constituent c upon the head of c. While all of the most accurate statistical parsers are of the immediate-head variety, no previous grammatical language model uses this technology. The perplexity for both of these models significantly improve upon the trigram model base-line as well as the best previous grammarbased language model. For the better of our two models these improvements are 24% and 14% respectively. We also suggest that improvement of the underlying parser should significantly improve the model's perplexity and that even in the near term there is a lot of potential for improvement in immediatehead language models.",
    "pdf_parse": {
        "paper_id": "P01-1017",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "We present two language models based upon an \"immediate-head\" parserour name for a parser that conditions all events below a constituent c upon the head of c. While all of the most accurate statistical parsers are of the immediate-head variety, no previous grammatical language model uses this technology. The perplexity for both of these models significantly improve upon the trigram model base-line as well as the best previous grammarbased language model. For the better of our two models these improvements are 24% and 14% respectively. We also suggest that improvement of the underlying parser should significantly improve the model's perplexity and that even in the near term there is a lot of potential for improvement in immediatehead language models.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "All of the most accurate statistical parsers [1, 3, 6, 7, 12, 14] are lexicalized in that they condition probabilities on the lexical content of the sentences being parsed. Furthermore, all of these parsers are what we will call immediate-head parsers in that all of the properties of the immediate descendants of a constituent c are assigned probabilities that are conditioned on the lexical head of c. For example, in Figure 1 the probability that the vp expands into v np pp is conditioned on the head of the vp, \"put\", as are the choices of the sub-heads under the vp, i.e., \"ball\" (the head of the np) and \"in\" (the head of the pp). It is the experience of the statistical parsing community that immediate-head parsers are the most accurate we can design.",
                "cite_spans": [
                    {
                        "start": 45,
                        "end": 48,
                        "text": "[1,",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 49,
                        "end": 51,
                        "text": "3,",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 52,
                        "end": 54,
                        "text": "6,",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 55,
                        "end": 57,
                        "text": "7,",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 58,
                        "end": 61,
                        "text": "12,",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 62,
                        "end": 65,
                        "text": "14]",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 420,
                        "end": 428,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "It is also worthy of note that many of these parsers [1, 3, 6, 7] are generative -that is, for a sentence s they try to find the parse defined by Equation 1: arg max p( s) = arg max p( , s) (1) This is interesting because insofar as they compute p( , s) these parsers define a language-model in that they can (in principle) assign a probability to all possible sentences in the language by computing the sum in Equation 2:",
                "cite_spans": [
                    {
                        "start": 53,
                        "end": 56,
                        "text": "[1,",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 57,
                        "end": 59,
                        "text": "3,",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 60,
                        "end": 62,
                        "text": "6,",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 63,
                        "end": 65,
                        "text": "7]",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 146,
                        "end": 157,
                        "text": "Equation 1:",
                        "ref_id": null
                    },
                    {
                        "start": 190,
                        "end": 193,
                        "text": "(1)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "p(s) = p( , s)",
                        "eq_num": "( 2 )"
                    }
                ],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "where p( , s) is zero if the yield of = s. Language models, of course, are of interest because speech-recognition systems require them. These systems determine the words that were spoken by solving Equation 3:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "arg max s p(s A) = arg max s p(s)p(A s)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "where A denotes the acoustic signal. Virtually all current speech recognition systems use the so-called trigram language model in which the probability of a string is broken down into conditional probabilities on each word given the two previous words. E.g.,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "p(w 0,n ) = i=0,n 1 p(w i w i 1 , w i 2 )",
                        "eq_num": "(4)"
                    }
                ],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "On the other hand, in the last few years there has been interest in designing language models based upon parsing and Equation 2. We now turn to this previous research.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "There is, of course, a very large body of literature on language modeling (for an overview, see [10] ) and even the literature on grammatical language models is becoming moderately large [4, 9, 15, 16, 17] . The research presented in this paper is most closely related to two previous efforts, that by Chelba and Jelinek [4] (C&J) and that by Roark [15] , and this review concentrates on these two papers. While these two works differ in many particulars, we stress here the ways in which they are similar, and similar in ways that differ from the approach taken in this paper.",
                "cite_spans": [
                    {
                        "start": 96,
                        "end": 100,
                        "text": "[10]",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 187,
                        "end": 190,
                        "text": "[4,",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 191,
                        "end": 193,
                        "text": "9,",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 194,
                        "end": 197,
                        "text": "15,",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 198,
                        "end": 201,
                        "text": "16,",
                        "ref_id": "BIBREF15"
                    },
                    {
                        "start": 202,
                        "end": 205,
                        "text": "17]",
                        "ref_id": "BIBREF16"
                    },
                    {
                        "start": 321,
                        "end": 324,
                        "text": "[4]",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 349,
                        "end": 353,
                        "text": "[15]",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "2"
            },
            {
                "text": "In both cases the grammar based language model computes the probability of the next word based upon the previous words of the sentence. More specifically, these grammar-based models compute a subset of all possible grammatical relations for the prior words, and then comput\u0113 the probability of the next grammatical situation, and the probability of seeing the next word given each of these grammatical situations.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "2"
            },
            {
                "text": "Also, when computing the probability of the next word, both models condition on the two prior heads of constituents. Thus, like a trigram model, they use information about triples of words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "2"
            },
            {
                "text": "Neither of these models uses an immediatehead parser. Rather they are both what we will call strict left-to-right parsers. At each sentence position in strict left-to-right parsing one computes the probability of the next word given the previous words (and does not go back to modify such probabilities). This is not possible in immediate-head parsing. Sometimes the immediate head of a constituent occurs after it (e.g, in noun-phrases, where the head is typically the rightmost noun) and thus is not available for conditioning by a strict left-to-right parser.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "2"
            },
            {
                "text": "There are two reasons why one might prefer strict left-to-right parsing for a language model (Roark [15] and Chelba, personal communication). First, the search procedures for guessing the words that correspond to the acoustic signal works left to right in the string. If the language model is to offer guidance to the search procedure it must do so as well.",
                "cite_spans": [
                    {
                        "start": 100,
                        "end": 104,
                        "text": "[15]",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "2"
            },
            {
                "text": "The second benefit of strict left-to-right parsing is that it is easily combined with the standard trigram model. In both cases at every point in the sentence we compute the probability of the next word given the prior words. Thus one can interpolate the trigram and grammar probability estimates for each word to get a more robust estimate. It turns out that this is a good thing to do, as is clear from Table 1 , which gives perplexity results for a trigram model of the data in column one, results for the grammar-model in column two, and results for a model in which the two are interpo- While both of these reasons for strict-left-toright parsing (search and trigram interpolation) are valid, they are not necessarily compelling. The ability to combine easily with trigram models is important only as long as trigram models can improve grammar models. A sufficiently good grammar model would obviate the need for trigrams. As for the search problem, we briefly return to this point at the end of the paper. Here we simply note that while search requires that a language model provide probabilities in a left to right fashion, one can easily imagine procedures where these probabilities are revised after new information is found (i.e., the head of the constituent). Note that already our search procedure needs to revise previous most-likely-word hypotheses when the original guess makes the subsequent words very unlikely. Revising the associated language-model probabilities complicates the search procedure, but not unimaginably so. Thus it seems to us that it is worth finding out whether the superior parsing performance of immediate-head parsers translates into improved language models.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 405,
                        "end": 412,
                        "text": "Table 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "2"
            },
            {
                "text": "We have taken the immediate-head parser described in [3] as our starting point. This parsing model assigns a probability to a parse by a topdown process of considering each constituent c in and, for each c, first guessing the pre-terminal of c, t(c) (t for \"tag\"), then the lexical head of c, h(c), and then the expansion of c into further constituents e(c). Thus the probability of a parse is given by the equation",
                "cite_spans": [
                    {
                        "start": 53,
                        "end": 56,
                        "text": "[3]",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Head Parsing Model",
                "sec_num": "3"
            },
            {
                "text": "p( ) = c\u00be p(t(c) l(c), H(c)) \u00a1p(h(c) t(c), l(c), H(c)) \u00a1p(e(c) l(c), t(c), h(c), H(c))",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Head Parsing Model",
                "sec_num": "3"
            },
            {
                "text": "where l(c) is the label of c (e.g., whether it is a noun phrase (np), verb phrase, etc.) and H(c) is the relevant history of c -information outside c that our probability model deems important in determining the probability in question. In Whenever it is clear to which constituent we are referring we omit the (c) in, e.g., h(c). In this notation the above equation takes the following form:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Head Parsing Model",
                "sec_num": "3"
            },
            {
                "text": "p( ) = c\u00be p(t l, m, u, i) \u00a1 p(h t, l, m, u, i) \u00a1p(e l, t, h, m, u). (5)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Head Parsing Model",
                "sec_num": "3"
            },
            {
                "text": "Because this is a point of contrast with the parsers described in the previous section, note that all of the conditional distributions are conditioned on one lexical item (either i or h). Thus only p(h t, l, m, u, i), the distribution for the head of c, looks at two lexical items (i and h itself), and none of the distributions look at three lexical items as do the trigram distribution of Equation 4 and the previously discussed parsing language models [4, 15] .",
                "cite_spans": [
                    {
                        "start": 455,
                        "end": 458,
                        "text": "[4,",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 459,
                        "end": 462,
                        "text": "15]",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Head Parsing Model",
                "sec_num": "3"
            },
            {
                "text": "Next we describe how we assign a probability to the expansion e of a constituent. We break up a traditional probabilistic context-free grammar (PCFG) rule into a left-hand side with a label l(c) drawn from the non-terminal symbols of our grammar, and a right-hand side that is a sequence of one or more such symbols. For each expansion we distinguish one of the right-hand side labels as the \"middle\" or \"head\" symbol M(c). M(c) is the constituent from which the head lexical item h is obtained according to deterministic rules that pick the head of a constituent from among the heads of its children. To the left of M is a sequence of one or more left labels L i (c) including the special termination symbol , which indicates that there are no more symbols to the left, and similarly for the labels to the right, R i (c). Thus an expansion e(c) looks like:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Head Parsing Model",
                "sec_num": "3"
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "l L m . . . L 1 MR 1 . . . R n .",
                        "eq_num": "(6)"
                    }
                ],
                "section": "The Immediate-Head Parsing Model",
                "sec_num": "3"
            },
            {
                "text": "The expansion is generated by guessing first M, then in order L 1 through L m+1 (= ), and similarly for R 1 through R n+1 .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Head Parsing Model",
                "sec_num": "3"
            },
            {
                "text": "In anticipation of our discussion in Section 4.2, note that when we are expanding an L i we do not know the lexical items to its left, but if we properly dovetail our \"guesses\" we can be sure of what word, if any, appears to its right and before M, and similarly for the word to the left of R j . This makes such words available to be conditioned upon.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Head Parsing Model",
                "sec_num": "3"
            },
            {
                "text": "Finally, the parser of [3] deviates in two places from the strict dictates of a language model. First, as explicitly noted in [3] , the parser does not compute the partition function (normalization constant) for its distributions so the numbers it returns are not true probabilities. We noted there that if we replaced the \"max-ent inspired\" feature with standard deleted interpolation smoothing, we took a significant hit in performance. We have now found several ways to overcome this problem, including some very efficient ways to compute partition functions for this class of models. In the end, however, this was not necessary, as we found that we could obtain equally good performance by \"hand-crafting\" our interpolation smoothing rather than using the \"obvious\" method (which performs poorly).",
                "cite_spans": [
                    {
                        "start": 23,
                        "end": 26,
                        "text": "[3]",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 126,
                        "end": 129,
                        "text": "[3]",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Head Parsing Model",
                "sec_num": "3"
            },
            {
                "text": "Secondly, as noted in [2] , the parser encourages right branching with a \"bonus\" multiplicative factor of 1.2 for constituents that end at the right boundary of the sentence, and a penalty of 0.8 for those that do not. This is replaced by explicitly conditioning the events in the expansion of Equation 6 on whether or not the constituent is at the right boundary (barring sentence-final punctu-ation). Again, with proper attention to details, this can be known at the time the expansion is taking place. This modification is much more complex than the multiplicative \"hack,\" and it is not quite as good (we lose about 0.1% in precision/recall figures), but it does allow us to compute true probabilities.",
                "cite_spans": [
                    {
                        "start": 22,
                        "end": 25,
                        "text": "[2]",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Head Parsing Model",
                "sec_num": "3"
            },
            {
                "text": "The resulting parser strictly speaking defines a PCFG in that all of the extra conditioning information could be included in the non-terminalnode labels (as we did with the head information in Figure 1) . When a PCFG probability distribution is estimated from training data (in our case the Penn tree-bank) PCFGs define a tight (summing to one) probability distribution over strings [5] , thus making them appropriate for language models. We also empirically checked that our in- As with [3] , a subset of parses is computed with a non-lexicalized PCFG, and the most probable edges (using an empirically established threshold) have their probabilities recomputed according to the complete probability model of Equation 5. Both searches are conducted using dynamic programming.",
                "cite_spans": [
                    {
                        "start": 383,
                        "end": 386,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 488,
                        "end": 491,
                        "text": "[3]",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 193,
                        "end": 202,
                        "text": "Figure 1)",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "The Immediate-Head Parsing Model",
                "sec_num": "3"
            },
            {
                "text": "The parser as described in the previous section was trained and tested on the data used in the previously described grammar-based language modeling research [4, 15] . This data is from the Penn Wall Street Journal tree-bank [13] , but modified to make the text more \"speech-like\". In particular: As in previous work, files F0 to F20 are used for training, F21-F22 for development, and F23-F24 for testing.",
                "cite_spans": [
                    {
                        "start": 157,
                        "end": 160,
                        "text": "[4,",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 161,
                        "end": 164,
                        "text": "15]",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 224,
                        "end": 228,
                        "text": "[13]",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Bihead Language Model",
                "sec_num": "4.1"
            },
            {
                "text": "The results are given in Table 2 . We refer to the current model as the bihead model. \"Bihead\" here emphasizes the already noted fact that in this model probabilities involve at most two lexical heads. As seen in Table 2 , the immediate-bihead model with a perplexity of 144.98 outperforms both previous models, even though they use trigrams of words in their probability estimates.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 25,
                        "end": 32,
                        "text": "Table 2",
                        "ref_id": null
                    },
                    {
                        "start": 213,
                        "end": 220,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "The Immediate-Bihead Language Model",
                "sec_num": "4.1"
            },
            {
                "text": "We also interpolated our parsing model with the trigram model (interpolation constant .36, as with the other models) and this model outperforms the other interpolation models. Note, however, that because our parser does not define probabilities for each word based upon previous words (as with trigram) it is not possible to do the integration at the word level. Rather we interpolate the probabilities of the entire sentences. This is a much less powerful technique than the word-level interpolation used by both C&J and Roark, but we still observe a significant gain in performance.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Bihead Language Model",
                "sec_num": "4.1"
            },
            {
                "text": "While the performance of the grammatical model is good, a look at sentences for which the trigram model outperforms it makes its limitations apparent. The sentences in question have noun phrases like \"monday night football\" that trigram models eats up but on which our bihead parsing model performs less well. For example, consider the sentence \"he watched monday night football\". The trigram model assigns this a probability of 1. 9 \u00a1 10 5 , while the grammar model gives it a probability of 2. 77 \u00a1 10 7 . To a first approximation, this is entirely due to the difference in prob-monday night football nbar np Figure 2 : A noun-phrase with sub-structure ability of the noun-phrase. For example, the trigram probability p(football monday, night) = 0. 366, and would have been 1.0 except that smoothing saved some of the probability for other things it might have seen but did not. Because the grammar model conditions in a different order, the closest equivalent probability would be that for \"monday\", but in our model this is only conditioned on \"football\" so the probability is much less biased, only 0. 0306. (Penn tree-bank base noun-phrases are flat, thus the head above \"monday\" is \"football\".) This immediately suggests creating a second model that captures some of the trigram-like probabilities that the immediate-bihead model misses. The most obvious extension would be to condition upon not just one's parent's head, but one's grandparent's as well. This does capture some of the information we would like, particularly the case heads of noun-phrases inside of prepositional phrases. For example, in \"united states of america\", the probability of \"america\" is now conditioned not just on \"of\" (the head of its parent) but also on \"states\".",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 611,
                        "end": 619,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "The Immediate-Trihead Model",
                "sec_num": "4.2"
            },
            {
                "text": "Unfortunately, for most of the cases where trigram really cleans up this revision would do little. Thus, in \"he watched monday night football\" \"monday\" would now be conditioned upon \"football\" and \"watched.\" The addition of \"watched\" is unlikely to make much difference, certainly compared to the boost trigram models get by, in effect, recognizing the complete name.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Trihead Model",
                "sec_num": "4.2"
            },
            {
                "text": "It is interesting to note, however, that virtually all linguists believe that a noun-phrase like \"monday night football\" has significant substructure -e.g., it would look something like Figure  2 . If we assume this tree-structure the two heads above \"monday\" are \"night\" and \"football\" respectively, thus giving our trihead model the same power as the trigram for this case. Ignoring some of the conditioning events, we now get a probability p(h = monday i = night, j = football), which is much higher than the corresponding bihead version p(h = monday i = football). The reader may remember that h is the head of the current constituent, while i is the head of its parent. We now define j to be the grandparent head.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 186,
                        "end": 195,
                        "text": "Figure  2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "The Immediate-Trihead Model",
                "sec_num": "4.2"
            },
            {
                "text": "We decided to adopt this structure, but to keep things simple we only changed the definition of \"head\" for the distribution p(h  t, l, m, u, i, j) . Thus we adopted the following revised definition of head for constituents of base noun-phrases:",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 124,
                        "end": 146,
                        "text": "p(h  t, l, m, u, i, j)",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "The Immediate-Trihead Model",
                "sec_num": "4.2"
            },
            {
                "text": "For a pre-terminal (e.g., noun) constituent c of a base noun-phrase in which it is not the standard head (h) and which has as its right-sister another preterminal constituent d which is not itself h, the head of c is the head of d. The sole exceptions to this rule are phraseinitial determiners and numbers which retain h as their heads.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Trihead Model",
                "sec_num": "4.2"
            },
            {
                "text": "In effect this definition assumes that the substructure of all base noun-phrases is left branching, as in Figure 2 . This is not true, but Lauer [11] shows that about two-thirds of all branching in base-noun-phrases is leftward. We believe we would get even better results if the parser could determine the true branching structure.",
                "cite_spans": [
                    {
                        "start": 145,
                        "end": 149,
                        "text": "[11]",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 106,
                        "end": 114,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "The Immediate-Trihead Model",
                "sec_num": "4.2"
            },
            {
                "text": "We then adopt the following definition of a grandparent-head feature j.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Immediate-Trihead Model",
                "sec_num": "4.2"
            },
            {
                "text": "phrase, or is a pre-terminal which takes a revised head as defined above, then j is the grandparent head of c, else 2. if c is a pre-terminal and is not next (in the production generating c) to the head of its parent (i) then j(c) is the head of the constituent next to c in the production in the direction of the head of that production, else 3. j is a \"none-of-the-above\" symbol.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "if c is a noun phrase under a prepositional",
                "sec_num": "1."
            },
            {
                "text": "Case 1 now covers both \"united states of america\" and \"monday night football\" examples. Case 2 handles other flat constituents in Penn tree-bank style (e.g., quantifier-phrases) for which we do not have a good analysis. Case three says that this feature is a no-op in all other situations. The results for this model, again trained on F0-F20 and tested on F23-24, are given in Figure  3 under the heading \"Immediate-trihead model\". We see that the grammar perplexity is reduced to 130.20, a reduction of 10% over our first model, 14% over the previous best grammar model (152.26%), and 22% over the best of the above trigram models for the task (167.02). When we run the trigram and new grammar model in tandem we get a perplexity of 126.07, a reduction of 8% over the best previous tandem model and 24% over the best trigram model.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 377,
                        "end": 386,
                        "text": "Figure  3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "if c is a noun phrase under a prepositional",
                "sec_num": "1."
            },
            {
                "text": "One interesting fact about the immediate-trihead model is that of the 3761 sentences in the test corpus, on 2934, or about 75%, the grammar model assigns a higher probability to the sentence than does the trigram model. One might well ask what went \"wrong\" with the remaining 25%? Why should the grammar model ever get beaten? Three possible reasons come to mind:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "4.3"
            },
            {
                "text": "1. The grammar model is better but only by a small amount, and due to sparse data problems occasionally the worse model will luck out and beat the better one.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "4.3"
            },
            {
                "text": "2. The grammar model and the trigram model capture different facts about the distribution of words in the language, and for some set of sentences one distribution will perform better than the other.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "4.3"
            },
            {
                "text": "3. The grammar model is, in some sense, always better than the trigram model, but if the parser bungles the parse, then the grammar model is impacted very badly. Obviously the trigram model has no such Achilles' heel. Table 4 : Precision/recall for sentences in which trigram/grammar models performed best",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 218,
                        "end": 225,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "4.3"
            },
            {
                "text": "We ask this question because what we should do to improve performance of our grammar-based language models depends critically on which of these explanations is correct: if (1) we should collect more data, if (2) we should just live with the tandem grammar-trigram models, and if (3) we should create better parsers.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "4.3"
            },
            {
                "text": "Based upon a few observations on sentences from the development corpus for which the trigram model gave higher probabilities we hypothesized that reason (3), bungled parses, is primary. To test this we performed the following experiment. We divide the sentences from the test corpus into two groups, ones for which the trigram model performs better, and the ones for which the grammar model does better. We then collect labeled precision and recall statistics (the standard parsing performance measures) separately for each group. If our hypothesis is correct we expect the \"grammar higher\" group to have more accurate parses than the trigram-higher group as the poor parse would cause poor grammar perplexity for the sentence, which would then be worse than the trigram perplexity. If either of the other two explanations were correct one would not expect much difference between the two groups. The results are shown in Table 4 . We see there that, for example, sentences for which the grammar model has the superior perplexity have average recall 5.9 (= 84. 9 79. 0) percentage points higher than the sentences for which the trigram model performed better. The gap for precision is 5.6. This seems to support our hypothesis.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 922,
                        "end": 929,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "4.3"
            },
            {
                "text": "We have presented two grammar-based language models, both of which significantly improve upon both the trigram model baseline for the task (by 24% for the better of the two) and the best previous grammar-based language model (by 14%).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Work",
                "sec_num": "5"
            },
            {
                "text": "Furthermore we have suggested that improvement of the underlying parser should improve the model's perplexity still further.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Work",
                "sec_num": "5"
            },
            {
                "text": "We should note, however, that if we were dealing with standard Penn Tree-bank Wall-Street-Journal text, asking for better parsers would be easier said than done. While there is still some progress, it is our opinion that substantial improvement in the state-of-the-art precision/recall figures (around 90%) is unlikely in the near future. 3 However, we are not dealing with standard tree-bank text. As pointed out above, the text in question has been \"speechified\" by removing punctuation and capitalization, and \"simplified\" by allowing only a fixed vocabulary of 10,000 words (replacing all the rest by the symbol \"UNK\"), and replacing all digits and symbols by the symbol \"N\".",
                "cite_spans": [
                    {
                        "start": 339,
                        "end": 340,
                        "text": "3",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Work",
                "sec_num": "5"
            },
            {
                "text": "We believe that the resulting text grossly underrepresents the useful grammatical information available to speech-recognition systems. First, we believe that information about rare or even truly unknown words would be useful. For example, when run on standard text, the parser uses ending information to guess parts of speech [3] . Even if we had never encountered the word \"showboating\", the \"ing\" ending tells us that this is almost certainly a progressive verb. It is much harder to determine this about UNK. 4 Secondly, while punctuation is not to be found in speech, prosody should give us something like equivalent information, perhaps even better. Thus significantly better parser performance on speechderived data seems possible, suggesting that highperformance trigram-less language models may be within reach. We believe that the adaptation of prosodic information to parsing use is a worthy topic for future research.",
                "cite_spans": [
                    {
                        "start": 326,
                        "end": 329,
                        "text": "[3]",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 512,
                        "end": 513,
                        "text": "4",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Work",
                "sec_num": "5"
            },
            {
                "text": "Finally, we have noted two objections to immediate-head language models: first, they complicate left-to-right search (since heads are often to the right of their children) and second, they cannot be tightly integrated with trigram models.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Work",
                "sec_num": "5"
            },
            {
                "text": "The possibility of trigram-less language models makes the second of these objections without force. Nor do we believe the first to be a permanent disability. If one is willing to provide sub-optimal probability estimates as one proceeds left-to-right and then amend them upon seeing the true head, left-to-right processing and immediatehead parsing might be joined. Note that one of the cases where this might be worrisome, early words in a base noun-phrase could be conditioned upon a head which comes several words later, has been made significantly less problematic by our revised definition of heads inside noun-phrases. We believe that other such situations can be brought into line as well, thus again taming the search problem. However, this too is a topic for future research.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Work",
                "sec_num": "5"
            },
            {
                "text": "We simplify slightly in this section. See[3] for all the details on the equations as well as the smoothing used.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "They should sum to one. We are just checking that there are no bugs in the code.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "Furthermore, some of the newest wrinkles[8] use discriminative methods and thus do not define language models at all, seemingly making them ineligible for the competition on a priori grounds.4 To give the reader some taste for the difficulties presented by UNKs, we encourage you to try parsing the following real example: \"its supposedly unk unk unk a unk that makes one unk the unk of unk unk the unk radical unk of unk and unk and what in unk even seems like unk in unk\".",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "What is the minimal set of fragments that achieves maximal parse accuracy",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Bod",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Proceedings of Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "BOD, R. What is the minimal set of frag- ments that achieves maximal parse accuracy. In Proceedings of Association for Computa- tional Linguistics 2001 . 2001.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Tree-bank grammars",
                "authors": [
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Charniak",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "Proceedings of the Thirteenth National Conference on Artificial Intelligence",
                "volume": "",
                "issue": "",
                "pages": "1031--1036",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "CHARNIAK, E. Tree-bank grammars. In Pro- ceedings of the Thirteenth National Con- ference on Artificial Intelligence. AAAI Press/MIT Press, Menlo Park, 1996, 1031- 1036.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "A maximum-entropyinspired parser",
                "authors": [
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Charniak",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the 2000 Conference of the North American Chapter of the Association for Computational Linguistics . ACL",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "CHARNIAK, E. A maximum-entropy- inspired parser. In Proceedings of the 2000 Conference of the North American Chapter of the Association for Computational Linguistics . ACL, New Brunswick NJ, 2000.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Exploiting syntactic structure for language modeling",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Chelba",
                        "suffix": ""
                    },
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Jelinek",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings for COLING-ACL 98 . ACL",
                "volume": "",
                "issue": "",
                "pages": "225--231",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "CHELBA, C. AND JELINEK, F. Exploiting syntactic structure for language modeling. In Proceedings for COLING-ACL 98 . ACL, New Brunswick NJ, 1998, 225-231.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Estimation of probabilistic context-free grammars",
                "authors": [
                    {
                        "first": "Z",
                        "middle": [],
                        "last": "Chi",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Geman",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Computational Linguistics",
                "volume": "24",
                "issue": "2",
                "pages": "299--306",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "CHI, Z. AND GEMAN, S. Estimation of probabilistic context-free grammars. Computa- tional Linguistics 24 2 (1998), 299-306.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Three generative lexicalized models for statistical parsing",
                "authors": [
                    {
                        "first": "M",
                        "middle": [
                            "J"
                        ],
                        "last": "Collins",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "Proceedings of the 35th Annual Meeting of the ACL",
                "volume": "",
                "issue": "",
                "pages": "16--23",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "COLLINS, M. J. Three generative lexicalized models for statistical parsing. In Proceedings of the 35th Annual Meeting of the ACL . 1997, 16-23.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Head-Driven Statistical Models for Natural Language Parsing. University of Pennsylvania",
                "authors": [
                    {
                        "first": "M",
                        "middle": [
                            "J"
                        ],
                        "last": "Collins",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "COLLINS, M. J. Head-Driven Statistical Models for Natural Language Parsing. Univer- sity of Pennsylvania, Ph.D. Dissertation, 1999.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Discriminative reranking for natural language parsing",
                "authors": [
                    {
                        "first": "M",
                        "middle": [
                            "J"
                        ],
                        "last": "Collins",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the International Conference on Machine Learning",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "COLLINS, M. J. Discriminative reranking for natural language parsing. In Proceedings of the International Conference on Machine Learning (ICML 2000) . 2000.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Using probabilistic shiftreduce parsing in speech recognition systems",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Goddeau",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "Proceedings of the 2nd International Conference on Spoken Language Processing",
                "volume": "",
                "issue": "",
                "pages": "321--324",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "GODDEAU, D. Using probabilistic shift- reduce parsing in speech recognition systems. In Proceedings of the 2nd International Confer- ence on Spoken Language Processing. 1992, 321-324.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Putting it all together: language model combination",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Goodman",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "ICASSP",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "GOODMAN, J. Putting it all together: lan- guage model combination. In ICASSP-2000 . 2000.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Corpus statistics meet the noun compound: some empirical results",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Lauer",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Proceedings of the 33rd Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "47--55",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "LAUER, M. Corpus statistics meet the noun compound: some empirical results. In Proceed- ings of the 33rd Annual Meeting of the Associ- ation for Computational Linguistics. 1995, 47- 55.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Statistical decision-tree models for parsing",
                "authors": [
                    {
                        "first": "D",
                        "middle": [
                            "M"
                        ],
                        "last": "Magerman",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Proceedings of the 33rd Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "276--283",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "MAGERMAN, D. M. Statistical decision-tree models for parsing. In Proceedings of the 33rd Annual Meeting of the Association for Com- putational Linguistics. 1995, 276-283.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Building a large annotated corpus of English: the Penn treebank",
                "authors": [
                    {
                        "first": "M",
                        "middle": [
                            "P"
                        ],
                        "last": "Marcus",
                        "suffix": ""
                    },
                    {
                        "first": "B",
                        "middle": [],
                        "last": "Santorini",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [
                            "A"
                        ],
                        "last": "Marcinkiewicz",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Computational Linguistics",
                "volume": "19",
                "issue": "",
                "pages": "313--330",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "MARCUS, M. P., SANTORINI, B. AND MARCINKIEWICZ, M. A. Building a large annotated corpus of English: the Penn tree- bank. Computational Linguistics 19 (1993), 313-330.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Learning to parse natural language with maximum entropy models. Machine Learning 34 1/2/3 (1999)",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Ratnaparkhi",
                        "suffix": ""
                    }
                ],
                "year": null,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "151--176",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "RATNAPARKHI, A. Learning to parse natural language with maximum entropy models. Ma- chine Learning 34 1/2/3 (1999), 151-176.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Probabilistic top-down parsing and language modeling",
                "authors": [
                    {
                        "first": "B",
                        "middle": [],
                        "last": "Roark",
                        "suffix": ""
                    }
                ],
                "year": null,
                "venue": "Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "ROARK, B. Probabilistic top-down parsing and language modeling. Computational Lin- guistics (forthcoming).",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "An efficient probabilistic context-free parsing algorithm that computes prefix probabilities",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Stolcke",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Computational Linguistics",
                "volume": "21",
                "issue": "",
                "pages": "165--202",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "STOLCKE, A. An efficient probabilistic context-free parsing algorithm that computes prefix probabilities. Computational Linguistics 21 (1995), 165-202.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "Precise ngram probabilities from stochastic context-free grammars",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Stolcke",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Segal",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Proceedings of the 32th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "74--79",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "STOLCKE, A. AND SEGAL, J. Precise n- gram probabilities from stochastic context-free grammars. In Proceedings of the 32th Annual Meeting of the Association for Computational Linguistics. 1994, 74-79.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "num": null,
                "uris": null,
                "type_str": "figure",
                "text": "A tree showing head information"
            },
            "FIGREF1": {
                "num": null,
                "uris": null,
                "type_str": "figure",
                "text": "[3] H(c) approximately consists of the label, head, and head-part-of-speech for the parent of c: m(c), i(c), and u(c) respectively. One exception is the distribution p(e(c) l(c), t(c), h(c), H(c)), where H only includes m and u. 1"
            },
            "FIGREF2": {
                "num": null,
                "uris": null,
                "type_str": "figure",
                "text": "dividual distributions (p(t l, m, u, i), and p(h t, l, m, u, i) from Equation 5 and p(L l, t, h, m, u), p(M l, t, h, m, u), and p(R l, t, h, m, u) from Equation 5) sum to one for a large, random, selection of conditioning events 2"
            },
            "TABREF0": {
                "type_str": "table",
                "num": null,
                "html": null,
                "content": "<table><tr><td/><td>vp/put</td><td/><td/><td/><td/></tr><tr><td/><td colspan=\"2\">np/ball</td><td/><td>pp/in</td><td/></tr><tr><td/><td/><td/><td/><td colspan=\"2\">np/box</td></tr><tr><td colspan=\"2\">verb/put det/the verb/put</td><td>noun/ball</td><td>prep/in</td><td>det/the</td><td>noun/box</td></tr><tr><td>put</td><td>the</td><td>ball</td><td>in</td><td>the</td><td>box</td></tr></table>",
                "text": "The first term on the right, p(s), is the language model, and is what we compute via parsing in Equation 2."
            },
            "TABREF4": {
                "type_str": "table",
                "num": null,
                "html": null,
                "content": "<table/>",
                "text": "Perplexity results for the immediatetrihead model"
            }
        }
    }
}