File size: 75,954 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
{
    "paper_id": "P06-1006",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T09:23:57.201664Z"
    },
    "title": "Kernel-Based Pronoun Resolution with Structured Syntactic Knowledge",
    "authors": [
        {
            "first": "Xiaofeng",
            "middle": [],
            "last": "Yang",
            "suffix": "",
            "affiliation": {},
            "email": "xiaofengy@i2r.a-star.edu.sg"
        },
        {
            "first": "Jian",
            "middle": [],
            "last": "Su",
            "suffix": "",
            "affiliation": {},
            "email": "sujian@i2r.a-star.edu.sg"
        },
        {
            "first": "Chew",
            "middle": [],
            "last": "Lim",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "National University of Singapore",
                "location": {
                    "postCode": "117543",
                    "settlement": "Singapore"
                }
            },
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "Syntactic knowledge is important for pronoun resolution. Traditionally, the syntactic information for pronoun resolution is represented in terms of features that have to be selected and defined heuristically. In the paper, we propose a kernel-based method that can automatically mine the syntactic information from the parse trees for pronoun resolution. Specifically, we utilize the parse trees directly as a structured feature and apply kernel functions to this feature, as well as other normal features, to learn the resolution classifier. In this way, our approach avoids the efforts of decoding the parse trees into the set of flat syntactic features. The experimental results show that our approach can bring significant performance improvement and is reliably effective for the pronoun resolution task.",
    "pdf_parse": {
        "paper_id": "P06-1006",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "Syntactic knowledge is important for pronoun resolution. Traditionally, the syntactic information for pronoun resolution is represented in terms of features that have to be selected and defined heuristically. In the paper, we propose a kernel-based method that can automatically mine the syntactic information from the parse trees for pronoun resolution. Specifically, we utilize the parse trees directly as a structured feature and apply kernel functions to this feature, as well as other normal features, to learn the resolution classifier. In this way, our approach avoids the efforts of decoding the parse trees into the set of flat syntactic features. The experimental results show that our approach can bring significant performance improvement and is reliably effective for the pronoun resolution task.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Pronoun resolution is the task of finding the correct antecedent for a given pronominal anaphor in a document. Prior studies have suggested that syntactic knowledge plays an important role in pronoun resolution. For a practical pronoun resolution system, the syntactic knowledge usually comes from the parse trees of the text. The issue that arises is how to effectively incorporate the syntactic information embedded in the parse trees to help resolution. One common solution seen in previous work is to define a set of features that represent particular syntactic knowledge, such as the grammatical role of the antecedent candidates, the governing relations between the candidate and the pronoun, and so on. These features are calculated by mining the parse trees, and then could be used for resolution by using manually designed rules (Lappin and Leass, 1994; Kennedy and Boguraev, 1996; Mitkov, 1998) , or using machine-learning methods (Aone and Bennett, 1995; Yang et al., 2004; Luo and Zitouni, 2005) .",
                "cite_spans": [
                    {
                        "start": 838,
                        "end": 862,
                        "text": "(Lappin and Leass, 1994;",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 863,
                        "end": 890,
                        "text": "Kennedy and Boguraev, 1996;",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 891,
                        "end": 904,
                        "text": "Mitkov, 1998)",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 941,
                        "end": 965,
                        "text": "(Aone and Bennett, 1995;",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 966,
                        "end": 984,
                        "text": "Yang et al., 2004;",
                        "ref_id": "BIBREF16"
                    },
                    {
                        "start": 985,
                        "end": 1007,
                        "text": "Luo and Zitouni, 2005)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "However, such a solution has its limitation. The syntactic features have to be selected and defined manually, usually by linguistic intuition. Unfortunately, what kinds of syntactic information are effective for pronoun resolution still remains an open question in this research community. The heuristically selected feature set may be insufficient to represent all the information necessary for pronoun resolution contained in the parse trees.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In this paper we will explore how to utilize the syntactic parse trees to help learning-based pronoun resolution. Specifically, we directly utilize the parse trees as a structured feature, and then use a kernel-based method to automatically mine the knowledge embedded in the parse trees. The structured syntactic feature, together with other normal features, is incorporated in a trainable model based on Support Vector Machine (SVM) (Vapnik, 1995) to learn the decision classifier for resolution. Indeed, using kernel methods to mine structural knowledge has shown success in some NLP applications like parsing (Collins and Duffy, 2002; Moschitti, 2004) and relation extraction (Zelenko et al., 2003; Zhao and Grishman, 2005) . However, to our knowledge, the application of such a technique to the pronoun resolution task still remains unexplored.",
                "cite_spans": [
                    {
                        "start": 435,
                        "end": 449,
                        "text": "(Vapnik, 1995)",
                        "ref_id": "BIBREF15"
                    },
                    {
                        "start": 613,
                        "end": 638,
                        "text": "(Collins and Duffy, 2002;",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 639,
                        "end": 655,
                        "text": "Moschitti, 2004)",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 680,
                        "end": 702,
                        "text": "(Zelenko et al., 2003;",
                        "ref_id": "BIBREF17"
                    },
                    {
                        "start": 703,
                        "end": 727,
                        "text": "Zhao and Grishman, 2005)",
                        "ref_id": "BIBREF18"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Compared with previous work, our approach has several advantages: (1) The approach utilizes the parse trees as a structured feature, which avoids the efforts of decoding the parse trees into a set of syntactic features in a heuristic manner.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "(2) The approach is able to put together the structured feature and the normal flat features in a trainable model, which allows different types of information to be considered in combination for both learning and resolution. (3) The approach is applicable for practical pronoun resolution as the syntactic information can be automatically obtained from machine-generated parse trees. And our study shows that the approach works well under the commonly available parsers.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We evaluate our approach on the ACE data set. The experimental results over the different domains indicate that the structured syntactic feature incorporated with kernels can significantly improve the resolution performance (by 5%\u223c8% in the success rates), and is reliably effective for the pronoun resolution task.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The remainder of the paper is organized as follows. Section 2 gives some related work that utilizes the structured syntactic knowledge to do pronoun resolution. Section 3 introduces the framework for the pronoun resolution, as well as the baseline feature space and the SVM classifier. Section 4 presents in detail the structured feature and the kernel functions to incorporate such a feature in the resolution. Section 5 shows the experimental results and has some discussion. Finally, Section 6 concludes the paper.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "One of the early work on pronoun resolution relying on parse trees was proposed by Hobbs (1978) . For a pronoun to be resolved, Hobbs' algorithm works by searching the parse trees of the current text. Specifically, the algorithm processes one sentence at a time, using a left-to-right breadth-first searching strategy. It first checks the current sentence where the pronoun occurs. The first NP that satisfies constraints, like number and gender agreements, would be selected as the antecedent. If the antecedent is not found in the current sentence, the algorithm would traverse the trees of previous sentences in the text. As the searching processing is completely done on the parse trees, the performance of the algorithm would rely heavily on the accuracy of the parsing results. Lappin and Leass (1994) reported a pronoun resolution algorithm which uses the syntactic representation output by McCord's Slot Grammar parser. A set of salience measures (e.g. Subject, Object or Accusative emphasis) is derived from the syntactic structure. The candidate with the highest salience score would be selected as the antecedent. In their algorithm, the weights of Category: whether the candidate is a definite noun phrase, indefinite noun phrase, pronoun, named-entity or others.",
                "cite_spans": [
                    {
                        "start": 83,
                        "end": 95,
                        "text": "Hobbs (1978)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 784,
                        "end": 807,
                        "text": "Lappin and Leass (1994)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "Reflexiveness: whether the pronominal anaphor is a reflexive pronoun.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "Type: whether the pronominal anaphor is a male-person pronoun (like he), female-person pronoun (like she), single gender-neuter pronoun (like it), or plural gender-neuter pronoun (like they) Subject: whether the candidate is a subject of a sentence, a subject of a clause, or not.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "Object: whether the candidate is an object of a verb, an object of a preposition, or not.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "Distance: the sentence distance between the candidate and the pronominal anaphor.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "Closeness: whether the candidate is the candidate closest to the pronominal anaphor.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "FirstNP: whether the candidate is the first noun phrase in the current sentence.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "Parallelism: whether the candidate has an identical collocation pattern with the pronominal anaphor. Luo and Zitouni (2005) proposed a coreference resolution approach which also explores the information from the syntactic parse trees. Different from Lappin and Leass (1994) 's algorithm, they employed a maximum entropy based model to automatically compute the importance (in terms of weights) of the features extracted from the trees. In their work, the selection of their features is mainly inspired by the government and binding theory, aiming to capture the c-command relationships between the pronoun and its antecedent candidate. By contrast, our approach simply utilizes the parse trees as a structured feature, and lets the learning algorithm discover all possible embedded information that is necessary for pronoun resolution.",
                "cite_spans": [
                    {
                        "start": 101,
                        "end": 123,
                        "text": "Luo and Zitouni (2005)",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 250,
                        "end": 273,
                        "text": "Lappin and Leass (1994)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "Our pronoun resolution system adopts the common learning-based framework similar to those by Soon et al. (2001) and Ng and Cardie (2002) .",
                "cite_spans": [
                    {
                        "start": 93,
                        "end": 111,
                        "text": "Soon et al. (2001)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 116,
                        "end": 136,
                        "text": "Ng and Cardie (2002)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Resolution Framework",
                "sec_num": "3"
            },
            {
                "text": "In the learning framework, a training or testing instance is formed by a pronoun and one of its antecedent candidate. During training, for each pronominal anaphor encountered, a positive instance is created by paring the anaphor and its closest antecedent. Also a set of negative instances is formed by paring the anaphor with each of the non-coreferential candidates. Based on the training instances, a binary classifier is generated using a particular learning algorithm. During resolution, a pronominal anaphor to be resolved is paired in turn with each preceding antecedent candidate to form a testing instance. This instance is presented to the classifier which then returns a class label with a confidence value indicating the likelihood that the candidate is the antecedent. The candidate with the highest confidence value will be selected as the antecedent of the pronominal anaphor.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Resolution Framework",
                "sec_num": "3"
            },
            {
                "text": "As with many other learning-based approaches, the knowledge for the reference determination is represented as a set of features associated with the training or test instances. In our baseline system, the features adopted include lexical property, morphologic type, distance, salience, parallelism, grammatical role and so on. Listed in Table 1 , all these features have been proved effective for pronoun resolution in previous work.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 336,
                        "end": 343,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Feature Space",
                "sec_num": "3.1"
            },
            {
                "text": "In theory, any discriminative learning algorithm is applicable to learn the classifier for pronoun resolution. In our study, we use Support Vector Machine (Vapnik, 1995) to allow the use of kernels to incorporate the structured feature.",
                "cite_spans": [
                    {
                        "start": 155,
                        "end": 169,
                        "text": "(Vapnik, 1995)",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Support Vector Machine",
                "sec_num": "3.2"
            },
            {
                "text": "Suppose the training set S consists of labelled vectors {(x i , y i )}, where x i is the feature vector of a training instance and y i is its class label. The classifier learned by SVM is",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Support Vector Machine",
                "sec_num": "3.2"
            },
            {
                "text": "f (x) = sgn( i=1 y i a i x * x i + b) (1)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Support Vector Machine",
                "sec_num": "3.2"
            },
            {
                "text": "where a i is the learned parameter for a support vector",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Support Vector Machine",
                "sec_num": "3.2"
            },
            {
                "text": "x i . An instance x is classified as positive (negative) if f (x) > 0 (f (x) < 0) 1 .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Support Vector Machine",
                "sec_num": "3.2"
            },
            {
                "text": "One advantage of SVM is that we can use kernel methods to map a feature space to a particular high-dimension space, in case that the current problem could not be separated in a linear way. Thus the dot-product x 1 * x 2 is replaced by a kernel function (or kernel) between two vectors, that is K(x 1 , x 2 ). For the learning with the normal features listed in Table 1 , we can just employ the well-known polynomial or radial basis kernels that can be computed efficiently. In the next section we will discuss how to use kernels to incorporate the more complex structured feature.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 361,
                        "end": 368,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Support Vector Machine",
                "sec_num": "3.2"
            },
            {
                "text": "A parse tree that covers a pronoun and its antecedent candidate could provide us much syntactic information related to the pair. The commonly used syntactic knowledge for pronoun resolution, such as grammatical roles or the governing relations, can be directly described by the tree structure. Other syntactic knowledge that may be helpful for resolution could also be implicitly represented in the tree. Therefore, by comparing the common substructures between two trees we can find out to what degree two trees contain similar syntactic information, which can be done using a convolution tree kernel. The value returned from the tree kernel reflects the similarity between two instances in syntax. Such syntactic similarity can be further combined with other knowledge to compute the overall similarity between two instances, through a composite kernel. And thus a SVM classifier can be learned and then used for resolution. This is just the main idea of our approach.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Main Idea",
                "sec_num": "4.1"
            },
            {
                "text": "Normally, parsing is done on the sentence level. However, in many cases a pronoun and an antecedent candidate do not occur in the same sentence. To present their syntactic properties and relations in a single tree structure, we construct a syntax tree for an entire text, by attaching the parse trees of all its sentences to an upper node.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Structured Syntactic Feature",
                "sec_num": "4.2"
            },
            {
                "text": "Having obtained the parse tree of a text, we shall consider how to select the appropriate portion of the tree as the structured feature for a given instance. As each instance is related to a pronoun and a candidate, the structured feature at least should be able to cover both of these two expressions. Generally, the more substructure of the tree is included, the more syntactic information would be provided, but at the same time the more noisy information that comes from parsing errors would likely be introduced. In our study, we examine three possible structured features that contain different substructures of the parse tree:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Structured Syntactic Feature",
                "sec_num": "4.2"
            },
            {
                "text": "Min-Expansion This feature records the minimal structure covering both the pronoun and",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Structured Syntactic Feature",
                "sec_num": "4.2"
            },
            {
                "text": "Simple-Expansion Full-Expansion Figure 1 : structured-features for the instance i{\"him\", \"the man\"} the candidate in the parse tree. It only includes the nodes occurring in the shortest path connecting the pronoun and the candidate, via the nearest commonly commanding node. For example, considering the sentence \"The man in the room saw him.\", the structured feature for the instance i{\"him\",\"the man\"} is circled with dash lines as shown in the leftmost picture of Figure 1 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 32,
                        "end": 40,
                        "text": "Figure 1",
                        "ref_id": null
                    },
                    {
                        "start": 467,
                        "end": 475,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Min-Expansion",
                "sec_num": null
            },
            {
                "text": "Min-Expansion could, to some degree, describe the syntactic relationships between the candidate and pronoun. However, it is incapable of capturing the syntactic properties of the candidate or the pronoun, because the tree structure surrounding the expression is not taken into consideration. To incorporate such information, feature Simple-Expansion not only contains all the nodes in Min-Expansion, but also includes the first-level children of these nodes 2 . The middle of Figure 1 shows such a feature for i{\"him\", \"the man\"}. We can see that the nodes \"PP\" (for \"in the room\") and \"VB\" (for \"saw\") are included in the feature, which provides clues that the candidate is modified by a prepositional phrase and the pronoun is the object of a verb.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 476,
                        "end": 484,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Simple-Expansion",
                "sec_num": null
            },
            {
                "text": "This feature focusses on the whole tree structure between the candidate and pronoun. It not only includes all the nodes in Simple-Expansion, but also the nodes (beneath the nearest commanding parent) that cover the words between the candidate and the pronoun 3 . Such a feature keeps the most information related to the pronoun and candidate pair. The rightmost picture of Figure 1 shows the structure for feature Full-Expansion of i{\"him\", \"the man\"}. As illustrated, different from in Simple-Expansion, the subtree of \"PP\" (for \"in the room\") is fully expanded and all its children nodes are included in Full-Expansion.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 373,
                        "end": 381,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Full-Expansion",
                "sec_num": null
            },
            {
                "text": "Note that to distinguish from other words, we explicitly mark up in the structured feature the pronoun and the antecedent candidate under consideration, by appending a string tag \"ANA\" and \"CANDI\" in their respective nodes (e.g.,\"NN-CANDI\" for \"man\" and \"PRP-ANA\" for \"him\" as shown in Figure 1 ).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 286,
                        "end": 294,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Full-Expansion",
                "sec_num": null
            },
            {
                "text": "To calculate the similarity between two structured features, we use the convolution tree kernel that is defined by Collins and Duffy (2002) and Moschitti (2004) . Given two trees, the kernel will enumerate all their subtrees and use the number of common subtrees as the measure of the similarity between the trees. As has been proved, the convolution kernel can be efficiently computed in polynomial time.",
                "cite_spans": [
                    {
                        "start": 115,
                        "end": 139,
                        "text": "Collins and Duffy (2002)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 144,
                        "end": 160,
                        "text": "Moschitti (2004)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Structural Kernel and Composite Kernel",
                "sec_num": "4.3"
            },
            {
                "text": "The above tree kernel only aims for the structured feature. We also need a composite kernel to combine together the structured feature and the normal features described in Section 3.1. In our study we define the composite kernel as follows:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Structural Kernel and Composite Kernel",
                "sec_num": "4.3"
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "K c (x 1 , x 2 ) = K n (x 1 , x 2 ) |K n (x 1 , x 2 )| * K t (x 1 , x 2 ) |K t (x 1 , x2)|",
                        "eq_num": "(2)"
                    }
                ],
                "section": "Structural Kernel and Composite Kernel",
                "sec_num": "4.3"
            },
            {
                "text": "where K t is the convolution tree kernel defined for the structured feature, and K n is the kernel applied on the normal features. Both kernels are divided by their respective length 4 for normalization. The new composite kernel K c , defined as the multiplier of normalized K t and K n , will return a value close to 1 only if both the structured features and the normal features from the two vectors have high similarity under their respective kernels.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Structural Kernel and Composite Kernel",
                "sec_num": "4.3"
            },
            {
                "text": "In our study we focussed on the third-person pronominal anaphora resolution. All the experiments were done on the ACE-2 V1.0 corpus (NIST, 2003) , which contain two data sets, training and devtest, used for training and testing respectively. Each of these sets is further divided into three domains: newswire (NWire), newspaper (NPaper), and broadcast news (BNews).",
                "cite_spans": [
                    {
                        "start": 132,
                        "end": 144,
                        "text": "(NIST, 2003)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Setup",
                "sec_num": "5.1"
            },
            {
                "text": "An input raw text was preprocessed automatically by a pipeline of NLP components, including sentence boundary detection, POS-tagging, Text Chunking and Named-Entity Recognition. The texts were parsed using the maximum-entropybased Charniak parser (Charniak, 2000) , based on which the structured features were computed automatically. For learning, the SVM-Light software (Joachims, 1999) was employed with the convolution tree kernel implemented by Moschitti (2004) . All classifiers were trained with default learning parameters.",
                "cite_spans": [
                    {
                        "start": 247,
                        "end": 263,
                        "text": "(Charniak, 2000)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 371,
                        "end": 387,
                        "text": "(Joachims, 1999)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 449,
                        "end": 465,
                        "text": "Moschitti (2004)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Setup",
                "sec_num": "5.1"
            },
            {
                "text": "The performance was evaluated based on the metric success, the ratio of the number of correctly resolved 5 anaphor over the number of all anaphors. For each anaphor, the NPs occurring within the current and previous two sentences were taken as the initial antecedent candidates. Those with mismatched number and gender agreements were filtered from the candidate set. Also, pronouns or NEs that disagreed in person with the anaphor were removed in advance. For training, there were 1207, 1440, and 1260 pronouns with non-empty candidate set found pronouns in the three domains respectively, while for testing, the number was 313, 399 and 271. On average, a pronoun anaphor had 6\u223c9 antecedent candidates ahead. Totally, we got around 10k, 13k and 8k training instances for the three domains. Table 2 lists the performance of different systems. We first tested Hobbs' algorithm (Hobbs, 1978) . 5 An anaphor was deemed correctly resolved if the found antecedent is in the same coreference chain of the anaphor. Hobbs (1978) 66 Described in Section 2, the algorithm uses heuristic rules to search the parse tree for the antecedent, and will act as a good baseline to compare with the learned-based approach with the structured feature. As shown in the first line of Table 2 shows the baseline system (NORM) that uses only the normal features listed in Table 1 . Throughout our experiments, we applied the polynomial kernel on the normal features to learn the SVM classifiers. In the table we also compared the SVM-based results with those using other learning algorithms, i.e., Maximum Entropy (Maxent) and C5 decision tree, which are more commonly used in the anaphora resolution task.",
                "cite_spans": [
                    {
                        "start": 876,
                        "end": 889,
                        "text": "(Hobbs, 1978)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 892,
                        "end": 893,
                        "text": "5",
                        "ref_id": null
                    },
                    {
                        "start": 1008,
                        "end": 1020,
                        "text": "Hobbs (1978)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 791,
                        "end": 798,
                        "text": "Table 2",
                        "ref_id": "TABREF2"
                    },
                    {
                        "start": 1262,
                        "end": 1269,
                        "text": "Table 2",
                        "ref_id": "TABREF2"
                    },
                    {
                        "start": 1348,
                        "end": 1355,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Experimental Setup",
                "sec_num": "5.1"
            },
            {
                "text": "As shown in the table, the system with normal features (NORM) obtains 74%\u223c77% success rates for the three domains. The performance is similar to other published results like those by Keller and Lapata (2003) , who adopted a similar feature set and reported around 75% success rates on the ACE data set. The comparison between different learning algorithms indicates that SVM can work as well as or even better than Maxent (NORM MaxEnt) or C5 (NORM C5).",
                "cite_spans": [
                    {
                        "start": 183,
                        "end": 207,
                        "text": "Keller and Lapata (2003)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NWire NPaper BNews",
                "sec_num": null
            },
            {
                "text": "The last two blocks of Table 2 summarize the results using the three syntactic structured features, i.e, Min Expansion (S MIN), Simple Expansion (S SIMPLE) and Full Expansion (S FULL). Between them, the third block is for the systems using the individual structured feature alone. We can see that all the three structured features per- Table 4 : The resolution results for different types of pronouns form better than the normal features for NPaper (up to 5.3% success) and BNews (up to 8.1% success), or equally well (\u00b11 \u223c 2% in success) for NWire. When used together with the normal features, as shown in the last block, the three structured features all outperform the baselines. Especially, the combinations of NORM+S SIMPLE and NORM+S FULL can achieve significantly 6 better results than NORM, with the success rate increasing by (4.8%, 5.3% and 8.1%) and (7.1%, 5.8%, 7.2%) respectively. All these results prove that the structured syntactic feature is effective for pronoun resolution. We further compare the performance of the three different structured features. As shown in Table 2 , when used together with the normal features, Full Expansion gives the highest success rates in NWire and NPaper, but nevertheless the lowest in BNews. This should be because feature Full-Expansion captures a larger portion of the parse trees, and thus can provide more syntactic information than Min Expansion or Simple Expansion. However, if the texts are less-formally structured as those in BNews, Full-Expansion would inevitably involve more noises and thus adversely affect the resolution performance. By contrast, feature Simple Expansion would achieve balance between the information and the noises to be introduced: from Table 2 we can find that compared with the other two features, Simple Expansion is capable of producing average results for all the three domains. And for this As described, to compute the structured feature, parse trees for different sentences are connected to form a large tree for the text. It would be interesting to find how the structured feature works for pronouns whose antecedents reside in different sentences. For this purpose we tested the success rates for the pronouns with the closest antecedent occurring in the same sentence, one-sentence apart, and two-sentence apart. Table 3 compares the learning systems with/without the structured feature present. From the table, for all the systems, the success rates drop with the increase of the distances between the pronoun and the antecedent. However, in most cases, adding the structured feature would bring consistent improvement against the baselines regardless of the number of sentence distance. This observation suggests that the structured syntactic information is helpful for both intra-sentential and intersentential pronoun resolution.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 23,
                        "end": 30,
                        "text": "Table 2",
                        "ref_id": "TABREF2"
                    },
                    {
                        "start": 336,
                        "end": 343,
                        "text": "Table 4",
                        "ref_id": null
                    },
                    {
                        "start": 1084,
                        "end": 1091,
                        "text": "Table 2",
                        "ref_id": "TABREF2"
                    },
                    {
                        "start": 1723,
                        "end": 1730,
                        "text": "Table 2",
                        "ref_id": "TABREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Systems with Structured Features",
                "sec_num": "5.3"
            },
            {
                "text": "We were also concerned about how the structured feature works for different types of pronouns. Table 4 lists the resolution results for two types of pronouns: person pronouns (i.e., \"he\", \"she\") and neuter-gender pronouns (i.e., \"it\" and \"they\"). As shown, with the structured feature incorporated, the system NORM+S Simple can significantly boost the performance of the baseline (NORM), for both personal pronoun and neutergender pronoun resolution. Figure 2 plots the learning curves for the systems with three feature sets, i.e, normal features (NORM), structured feature alone (S Simple), and combined features (NORM+S Simple). We trained each system with different number of instances from 1k, 2k, 3k, . . . , till the full size. Each point in the figures was the average over two trails with instances selected forwards and backwards respectively. From the figures we can find that (1) Used in combination (NORM+S Simple), the structured feature shows superiority over NORM, achieving results consistently better than the normal features (NORM) do in all the three domains.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 95,
                        "end": 102,
                        "text": "Table 4",
                        "ref_id": null
                    },
                    {
                        "start": 451,
                        "end": 459,
                        "text": "Figure 2",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Systems with Structured Features",
                "sec_num": "5.3"
            },
            {
                "text": "(2) With training instances above 3k, the structured feature, used either in isolation (S Simple) or in combination (NORM+S Simple), leads to steady increase in the success rates and exhibit smoother learning curves than the normal features (NORM). These observations further prove the reliability of the structured feature in pronoun resolution.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Learning Curves",
                "sec_num": "5.4"
            },
            {
                "text": "In our experiment we were also interested to compare the structured feature with the normal flat features extracted from the parse tree, like feature Subject and Object. For this purpose we took out these two grammatical features from the normal feature set, and then trained the systems again. As shown in Table 5 , the two grammaticalrole features are important for the pronoun resolution: removing these features results in up to 5.7% (NWire) decrease in success. However, when the structured feature is included, the loss in success reduces to 1.9% and 1.1% for NWire and BNews, and a slight improvement can even be achieved for NPaper. This indicates that the structured feature can effectively provide the syntactic information Table 6 : Results using different parsers important for pronoun resolution. We also tested the flat syntactic feature set proposed in Luo and Zitouni (2005) 's work. As described in Section 2, the feature set is inspired the binding theory, including those features like whether the candidate is c commanding the pronoun, and the counts of \"NP\", \"VP\", \"S\" nodes in the commanding path. The last line of Table 5 shows the results by adding these features into the normal feature set. In line with the reports in (Luo and Zitouni, 2005) we do observe the performance improvement against the baseline (NORM) for all the domains. However, the increase in the success rates (up to 1.3%) is not so large as by adding the structured feature (NORM+S Simple) instead.",
                "cite_spans": [
                    {
                        "start": 868,
                        "end": 890,
                        "text": "Luo and Zitouni (2005)",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 1245,
                        "end": 1268,
                        "text": "(Luo and Zitouni, 2005)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 307,
                        "end": 314,
                        "text": "Table 5",
                        "ref_id": "TABREF6"
                    },
                    {
                        "start": 734,
                        "end": 741,
                        "text": "Table 6",
                        "ref_id": null
                    },
                    {
                        "start": 1137,
                        "end": 1144,
                        "text": "Table 5",
                        "ref_id": "TABREF6"
                    }
                ],
                "eq_spans": [],
                "section": "Feature Analysis",
                "sec_num": "5.5"
            },
            {
                "text": "As mentioned, the above reported results were based on Charniak (2000)'s parser. It would be interesting to examine the influence of different parsers on the resolution performance. For this purpose, we also tried the parser by (Mode II) 7 , and the results are shown in Table 6 . We can see that Charniak (2000) 's parser leads to higher success rates for NPaper and BNews, while 's achieves better results for NWire. However, the difference between the results of the two parsers is not significant (less than 2% success) for the three domains, no matter whether the structured feature is used alone or in combination.",
                "cite_spans": [
                    {
                        "start": 297,
                        "end": 312,
                        "text": "Charniak (2000)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 271,
                        "end": 278,
                        "text": "Table 6",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Comparison with Different Parsers",
                "sec_num": "5.6"
            },
            {
                "text": "The purpose of this paper is to explore how to make use of the structured syntactic knowledge to do pronoun resolution. Traditionally, syntactic information from parse trees is represented as a set of flat features. However, the features are usually selected and defined by heuristics and may not necessarily capture all the syntactic information provided by the parse trees. In the paper, we propose a kernel-based method to incorporate the information from parse trees. Specifically, we directly utilize the syntactic parse tree as a structured feature, and then apply kernels to such a feature, together with other normal features, to learn the decision classifier and do the resolution. Our experimental results on ACE data set show that the system with the structured feature included can achieve significant increase in the success rate by around 5%\u223c8%, for all the different domains. The deeper analysis on various factors like training size, feature set or parsers further proves that the structured feature incorporated with our kernelbased method is reliably effective for the pronoun resolution task.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6"
            },
            {
                "text": "For our task, the result of f (x) is used as the confidence value of the candidate to be the antecedent of the pronoun described by x.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "If the pronoun and the candidate are not in the same sentence, we will not include the nodes denoting the sentences before the candidate or after the pronoun.3 We will not expand the nodes denoting the sentences other than where the pronoun and the candidate occur.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "The length of a kernel K is defined as|K(x 1 , x 2 )| = K(x 1 , x 1 ) * K(x 2 , x 2 )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "p < 0.05 by a 2-tailed t test.reason, our subsequent reports will focus on Simple Expansion, unless otherwise specified.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Evaluating automated and manual acquisition of anaphora resolution strategies",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Aone",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [
                            "W"
                        ],
                        "last": "Bennett",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Proceedings of the 33rd Annual Meeting of the Association for Compuational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "122--129",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "C. Aone and S. W. Bennett. 1995. Evaluating auto- mated and manual acquisition of anaphora resolu- tion strategies. In Proceedings of the 33rd Annual Meeting of the Association for Compuational Lin- guistics, pages 122-129.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "A maximum-entropy-inspired parser",
                "authors": [
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Charniak",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of North American chapter of the Association for Computational Linguistics annual meeting",
                "volume": "",
                "issue": "",
                "pages": "132--139",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "E. Charniak. 2000. A maximum-entropy-inspired parser. In Proceedings of North American chapter of the Association for Computational Linguistics an- nual meeting, pages 132-139.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "New ranking algorithms for parsing and tagging: kernels over discrete structures and the voted perceptron",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Collins",
                        "suffix": ""
                    },
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Duffy",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the 40th Annual Meeting of the Association 7 As in their pulic reports on Section 23 of WSJ TreeBank",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Collins and N. Duffy. 2002. New ranking algo- rithms for parsing and tagging: kernels over discrete structures and the voted perceptron. In Proceed- ings of the 40th Annual Meeting of the Association 7 As in their pulic reports on Section 23 of WSJ TreeBank, Charniak (2000)'s parser achieves 89.6% recall and 89.5% precision with 0.88 crossing brackets (words \u2264 100), against",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "s 88.1% recall and 88.3% precision with 1.06 crossing brackets",
                "authors": [
                    {
                        "first": "",
                        "middle": [],
                        "last": "Collins",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "for Computational Linguistics (ACL'02)",
                "volume": "",
                "issue": "",
                "pages": "263--270",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Collins (1999)'s 88.1% recall and 88.3% precision with 1.06 crossing brackets. for Computational Linguistics (ACL'02), pages 263- 270.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Head-Driven Statistical Models for Natural Language Parsing",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Collins",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Collins. 1999. Head-Driven Statistical Models for Natural Language Parsing. Ph.D. thesis, University of Pennsylvania.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Resolving pronoun references",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Hobbs",
                        "suffix": ""
                    }
                ],
                "year": 1978,
                "venue": "Lingua",
                "volume": "44",
                "issue": "",
                "pages": "339--352",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Hobbs. 1978. Resolving pronoun references. Lin- gua, 44:339-352.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Making large-scale svm learning practical",
                "authors": [
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Joachims",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Advances in Kernel Methods -Support Vector Learning",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "T. Joachims. 1999. Making large-scale svm learning practical. In Advances in Kernel Methods -Support Vector Learning. MIT Press.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Using the web to obtain freqencies for unseen bigrams",
                "authors": [
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Keller",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Lapata",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Computational Linguistics",
                "volume": "29",
                "issue": "3",
                "pages": "459--484",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "F. Keller and M. Lapata. 2003. Using the web to ob- tain freqencies for unseen bigrams. Computational Linguistics, 29(3):459-484.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Anaphora for everyone: pronominal anaphra resolution without a parser",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Kennedy",
                        "suffix": ""
                    },
                    {
                        "first": "B",
                        "middle": [],
                        "last": "Boguraev",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "Proceedings of the 16th International Conference on Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "113--118",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "C. Kennedy and B. Boguraev. 1996. Anaphora for everyone: pronominal anaphra resolution with- out a parser. In Proceedings of the 16th Inter- national Conference on Computational Linguistics, pages 113-118, Copenhagen, Denmark.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "An algorithm for pronominal anaphora resolution",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Lappin",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Leass",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Computational Linguistics",
                "volume": "20",
                "issue": "4",
                "pages": "525--561",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "S. Lappin and H. Leass. 1994. An algorithm for pronominal anaphora resolution. Computational Linguistics, 20(4):525-561.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Milti-lingual coreference resolution with syntactic features",
                "authors": [
                    {
                        "first": "X",
                        "middle": [],
                        "last": "Luo",
                        "suffix": ""
                    },
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Zitouni",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Proceedings of Human Language Techonology conference and Conference on Empirical Methods in Natural Language Processing (HLT/EMNLP)",
                "volume": "",
                "issue": "",
                "pages": "660--667",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "X. Luo and I. Zitouni. 2005. Milti-lingual coreference resolution with syntactic features. In Proceedings of Human Language Techonology conference and Con- ference on Empirical Methods in Natural Language Processing (HLT/EMNLP), pages 660-667.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Robust pronoun resolution with limited knowledge",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Mitkov",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the 17th Int. Conference on Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "869--875",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "R. Mitkov. 1998. Robust pronoun resolution with lim- ited knowledge. In Proceedings of the 17th Int. Con- ference on Computational Linguistics, pages 869- 875.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "A study on convolution kernels for shallow semantic parsing",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Moschitti",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of the 42nd Annual Meeting of the Association for Computational Linguistics (ACL'04)",
                "volume": "",
                "issue": "",
                "pages": "335--342",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "A. Moschitti. 2004. A study on convolution kernels for shallow semantic parsing. In Proceedings of the 42nd Annual Meeting of the Association for Compu- tational Linguistics (ACL'04), pages 335-342.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Improving machine learning approaches to coreference resolution",
                "authors": [
                    {
                        "first": "V",
                        "middle": [],
                        "last": "Ng",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Cardie",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "104--111",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "V. Ng and C. Cardie. 2002. Improving machine learn- ing approaches to coreference resolution. In Pro- ceedings of the 40th Annual Meeting of the Associa- tion for Computational Linguistics, pages 104-111, Philadelphia.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "A machine learning approach to coreference resolution of noun phrases",
                "authors": [
                    {
                        "first": "W",
                        "middle": [],
                        "last": "Soon",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Ng",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Lim",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Computational Linguistics",
                "volume": "27",
                "issue": "4",
                "pages": "521--544",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "W. Soon, H. Ng, and D. Lim. 2001. A machine learning approach to coreference resolution of noun phrases. Computational Linguistics, 27(4):521- 544.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "The Nature of Statistical Learning Theory",
                "authors": [
                    {
                        "first": "V",
                        "middle": [],
                        "last": "Vapnik",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "V. Vapnik. 1995. The Nature of Statistical Learning Theory. Springer.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "Improving pronoun resolution by incorporating coreferential information of candidates",
                "authors": [
                    {
                        "first": "X",
                        "middle": [],
                        "last": "Yang",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Su",
                        "suffix": ""
                    },
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Zhou",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Tan",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of 42th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "127--134",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "X. Yang, J. Su, G. Zhou, and C. Tan. 2004. Improv- ing pronoun resolution by incorporating coreferen- tial information of candidates. In Proceedings of 42th Annual Meeting of the Association for Compu- tational Linguistics, pages 127-134, Barcelona.",
                "links": null
            },
            "BIBREF17": {
                "ref_id": "b17",
                "title": "Kernel methods for relation extraction",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Zelenko",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Aone",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Richardella",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Journal of Machine Learning Research",
                "volume": "3",
                "issue": "6",
                "pages": "1083--1106",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "D. Zelenko, C. Aone, and A. Richardella. 2003. Ker- nel methods for relation extraction. Journal of Ma- chine Learning Research, 3(6):1083 -1106.",
                "links": null
            },
            "BIBREF18": {
                "ref_id": "b18",
                "title": "Extracting relations with integrated information using kernel methods",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Zhao",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Grishman",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Proceedings of 43rd Annual Meeting of the Association for Computational Linguistics (ACL05)",
                "volume": "",
                "issue": "",
                "pages": "419--426",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "S. Zhao and R. Grishman. 2005. Extracting rela- tions with integrated information using kernel meth- ods. In Proceedings of 43rd Annual Meeting of the Association for Computational Linguistics (ACL05), pages 419-426.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "uris": null,
                "text": "Learning curves of systems with different features",
                "type_str": "figure",
                "num": null
            },
            "TABREF0": {
                "content": "<table><tr><td>: Feature set for the baseline pronoun res-</td></tr><tr><td>olution system</td></tr><tr><td>salience measures have to be assigned manually.</td></tr></table>",
                "text": "",
                "type_str": "table",
                "num": null,
                "html": null
            },
            "TABREF2": {
                "content": "<table><tr><td>, Hobbs'</td></tr></table>",
                "text": "",
                "type_str": "table",
                "num": null,
                "html": null
            },
            "TABREF4": {
                "content": "<table><tr><td/><td colspan=\"2\">NWire</td><td colspan=\"2\">NPaper</td><td colspan=\"2\">BNews</td></tr><tr><td>Type</td><td colspan=\"6\">person neuter person neuter person neuter</td></tr><tr><td colspan=\"2\">(Number of Prons) (171)</td><td>(142)</td><td>(250)</td><td>(149)</td><td>(153)</td><td>(118)</td></tr><tr><td>NORM</td><td>81.9</td><td>65.5</td><td>80.0</td><td>73.2</td><td>74.5</td><td>73.7</td></tr><tr><td>S Simple</td><td>81.9</td><td>62.7</td><td>83.2</td><td>81.9</td><td>82.4</td><td>82.2</td></tr><tr><td>NORM+S Simple</td><td>87.1</td><td>69.7</td><td>83.6</td><td>81.2</td><td>86.9</td><td>76.3</td></tr></table>",
                "text": "The resolution results for pronouns with antecedent in different sentences apart",
                "type_str": "table",
                "num": null,
                "html": null
            },
            "TABREF6": {
                "content": "<table><tr><td>Feature</td><td>Parser</td><td colspan=\"3\">NWire NPaper BNews</td></tr><tr><td>S Simple</td><td>Charniak00 Collins99</td><td>73.2 75.1</td><td>82.7 83.2</td><td>82.3 80.4</td></tr><tr><td colspan=\"2\">NORM+ Charniak00</td><td>79.2</td><td>82.7</td><td>82.3</td></tr><tr><td>S Simple</td><td>Collins99</td><td>80.8</td><td>81.5</td><td>82.3</td></tr></table>",
                "text": "Comparison of the structured feature and the flat features extracted from parse trees",
                "type_str": "table",
                "num": null,
                "html": null
            }
        }
    }
}