File size: 72,611 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
{
    "paper_id": "U07-1011",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T03:08:54.264055Z"
    },
    "title": "Experiments in Mutual Exclusion Bootstrapping",
    "authors": [
        {
            "first": "Tara",
            "middle": [],
            "last": "Murphy",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Sydney NSW 2006",
                "location": {
                    "country": "Australia"
                }
            },
            "email": ""
        },
        {
            "first": "James",
            "middle": [
                "R"
            ],
            "last": "Curran",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Sydney NSW 2006",
                "location": {
                    "country": "Australia"
                }
            },
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "Mutual Exclusion Bootstrapping (MEB) was designed to overcome the problem of semantic drift suffered by iterative bootstrapping, where the meaning of extracted terms quickly drifts from the original seed terms (Curran et al., 2007). MEB works by extracting mutually exclusive classes in parallel which constrain each other. In this paper we explore the strengths and limitations of MEB by applying it to two novel lexical-semantic extraction tasks: extracting bigram named entities and WordNet lexical file classes (Fellbaum, 1998) from the Google Web 1T 5-grams.",
    "pdf_parse": {
        "paper_id": "U07-1011",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "Mutual Exclusion Bootstrapping (MEB) was designed to overcome the problem of semantic drift suffered by iterative bootstrapping, where the meaning of extracted terms quickly drifts from the original seed terms (Curran et al., 2007). MEB works by extracting mutually exclusive classes in parallel which constrain each other. In this paper we explore the strengths and limitations of MEB by applying it to two novel lexical-semantic extraction tasks: extracting bigram named entities and WordNet lexical file classes (Fellbaum, 1998) from the Google Web 1T 5-grams.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Extracting lexical semantic resources from text with minimal supervision is critical to overcoming the knowledge bottleneck in Natural Language Processing (NLP) tasks ranging from Word Sense Disambiguation to Question Answering.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Template-based extraction is attractive because it is reasonably efficient, works on small and large datasets, and requires minimal linguistic preprocessing, making it fairly language independent. Hearst (1992) proposed template-based extraction for identifying hyponyms using templates like X, Y, and/or other Z where X and Y are hyponyms of Z. Riloff and Shepherd (1997) proposed iterative bootstrapping where frequent neighbours to terms from a given semantic class are extracted in multiple bootstrap iterations. Roark and Charniak (1998) improved its accuracy by optimising the bootstrapping parameters. In mutual bootstrapping (Riloff and Jones, 1999) the terms, and the contexts they occur in, are extracted. Similar approaches have been used in Information Extraction (IE) for identifying company headquarters (Agichtein et al., 2000) and acronym expansions (Sundaresan and Yi, 2000) .",
                "cite_spans": [
                    {
                        "start": 346,
                        "end": 372,
                        "text": "Riloff and Shepherd (1997)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 517,
                        "end": 542,
                        "text": "Roark and Charniak (1998)",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 633,
                        "end": 657,
                        "text": "(Riloff and Jones, 1999)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 818,
                        "end": 842,
                        "text": "(Agichtein et al., 2000)",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 866,
                        "end": 891,
                        "text": "(Sundaresan and Yi, 2000)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In Mutual Exclusion Bootstrapping (MEB), we assume the semantic classes partition terms into disjoint sets, that is, the classes are mutually exclusive (Curran et al., 2007) . Each class is extracted in parallel using separate bootstrapping loops that each race to collect terms and contexts. Although this assumption is clearly false, it significantly reduces the extraction errors of existing approaches.",
                "cite_spans": [
                    {
                        "start": 152,
                        "end": 173,
                        "text": "(Curran et al., 2007)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "This paper presents two applications of MEB that allow some insight into MEB's strengths and limitations. First, we extend MEB to extracting bigram BBN named entity types (Weischedel and Brunstein, 2005) . We discover that both unigram and bigram MEB are very sensitive to the context window surrounding the extracted terms. Surprisingly, MEB is insensitive to the order the semantic classes are presented and the noise in the terms themselves.",
                "cite_spans": [
                    {
                        "start": 171,
                        "end": 203,
                        "text": "(Weischedel and Brunstein, 2005)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Second, we extract common nouns using 25 semantic classes defined by the WordNet lexical files (Fellbaum, 1998) . We use a closed vocabulary of WordNet unigram nouns, so the evaluation can be performed automatically against WordNet. We find that MEB performs well on classes with narrow definitions and thus more coherent contexts, such as animal, but performs poorly on classes like cognition. We also find that increasing the number of seed terms increases the accuracy significantly.",
                "cite_spans": [
                    {
                        "start": 95,
                        "end": 111,
                        "text": "(Fellbaum, 1998)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Mutual bootstrapping (Riloff and Jones, 1999) has the advantage that it can identify new templates or contexts, which in turn identify new terms, significantly increasing recall. Unfortunately, erroneously adding a term with a different predominant sense or a context that weakly constrains the terms, quickly leads to semantic drift, where erroneous terms or contexts infect the semantic class.",
                "cite_spans": [
                    {
                        "start": 21,
                        "end": 45,
                        "text": "(Riloff and Jones, 1999)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Mutual Exclusion Bootstrapping",
                "sec_num": "2"
            },
            {
                "text": "Mutual Exclusion Bootstrapping (MEB) attempts to minimise semantic drift in both the terms and contexts (Curran et al., 2007) . It does this by extracting all of the semantic classes in parallel, using an independent bootstrapping loop for each class, with the constraint that a term or context must only be used by one class. We assume that each term has only one sense and that each context only extracts terms with one sense, that is, the semantic classes are mutually exclusive with respect to terms and contexts.",
                "cite_spans": [
                    {
                        "start": 104,
                        "end": 125,
                        "text": "(Curran et al., 2007)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Mutual Exclusion Bootstrapping",
                "sec_num": "2"
            },
            {
                "text": "This assumption is far from realistic, but it is very effective at reducing the degree of semantic drift. For many terms, especially the bigram named entities, there is a clearly dominant semantic class. However, for some pairs of semantic classes, e.g. nationalities and languages, there is a significant lexical overlap and so they are far from mutually exclusive.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Mutual Exclusion Bootstrapping",
                "sec_num": "2"
            },
            {
                "text": "The MEB algorithm is shown in Algorithm 1. In each iteration, contexts and then terms are added to each semantic class. If more than one class attempts to extract a context or term in the current iteration then it is eliminated, leading to mutual exclusion between the semantic classes. The terms and contexts are ranked in the same way as Riloff and Jones (1999) , our only addition in MEB is the parallel mutual exclusion constraint.",
                "cite_spans": [
                    {
                        "start": 340,
                        "end": 363,
                        "text": "Riloff and Jones (1999)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Mutual Exclusion Bootstrapping",
                "sec_num": "2"
            },
            {
                "text": "Mutual exclusion is very strict and many terms and contexts are discarded. This is not a major issue when precision is paramount and we are using a large dataset, e.g. Web 1T, but it can be problematic on smaller datasets. It is a significant problem when the semantic classes are far from mutually exclusive because many viable contexts are rejected when the terms they extract are polysemous, even though the contexts themselves reliably select one sense.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Mutual Exclusion Bootstrapping",
                "sec_num": "2"
            },
            {
                "text": "MEB is potentially sensitive to the order the contexts and terms are added to semantic classes, since once they are added to a class they cannot be added in : Seed word lists S k \u2200 categories k in : Raw contexts C and terms T in : # terms N T and contexts N C per iteration out: Term T k and context C k lists \u2200 categories k T k \u2190\u2212 S k \u2200 categories k; foreach iteration do foreach c \u2208 C do count # times c occurs with each t \u2208 T k ; discard c if occurs with multiple classes; foreach class k do sort set of c by above occurrence counts; add top N C contexts to C k ; foreach t \u2208 T do count # times t occurs with each c \u2208 C k ; discard t if occurs with multiple classes; foreach class k do sort set of t by above occurrence counts; add top N T terms to T k ;",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Mutual Exclusion Bootstrapping",
                "sec_num": "2"
            },
            {
                "text": "Algorithm 1: Mutual Exclusion Bootstrapping elsewhere (by the mutual exclusion assumption). In this sense, the individual bootstrapping loops compete in parallel to reach a term or context first, and claim it for themselves. Polysemous terms may be added to just one semantic class if it is not identified by contexts from multiple semantic classes simultaneously, and this also applies for contexts. There is no guarantee that the predominant sense of a term will be reached first, although if it is significantly more frequent, it is likely to be reached first since it will appear in more contexts.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Mutual Exclusion Bootstrapping",
                "sec_num": "2"
            },
            {
                "text": "3 Using the Google Web 1T n-grams Riloff and Jones (1999) used contexts extracted from POS tagged and chunked text by AutoSlog-TS (Riloff, 1996) . Our goal was to keep MEB language independent to maintain this key advantage of template-based approaches. We also wanted to demonstrate that MEB scales efficiently to extremely large datasets and vocabularies. Google has collected the Web 1T corpus (Brants and Franz, 2006) , which consists of unigram to 5gram counts calculated over 1 trillion words of web text collected during January 2006. The text was tokenised using Penn Treebank tokenisation, except that words are usually split on hyphens; and dates, email addresses, and URLs are kept as single tokens. Sentence boundaries were detected using sta-tistical techniques. The individual words in the ngrams occurred \u2265200 times, otherwise they were replaced with <UNK>. Each n-gram appears \u226540 times. There is 25GB of compressed data.",
                "cite_spans": [
                    {
                        "start": 34,
                        "end": 57,
                        "text": "Riloff and Jones (1999)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 130,
                        "end": 144,
                        "text": "(Riloff, 1996)",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 397,
                        "end": 421,
                        "text": "(Brants and Franz, 2006)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Mutual Exclusion Bootstrapping",
                "sec_num": "2"
            },
            {
                "text": "We use the 3-, 4-, or 5-grams from Web 1T as our raw data, depending on the experiment. The middle token (for unigrams) or tokens (for bigrams) form the term and the one or two tokens on either side form the context. This context definition is quite language independent (except for languages without word segmentation). Unfortunately, we can only extract terms consisting of one or two words, and the contexts are noisier than those extracted from parsed text, cf. Curran (2004) .",
                "cite_spans": [
                    {
                        "start": 466,
                        "end": 479,
                        "text": "Curran (2004)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Mutual Exclusion Bootstrapping",
                "sec_num": "2"
            },
            {
                "text": "For the bigram experiments we follow the process described in Curran et al. (2007) . We removed n-grams with non-titlecase middle token(s) because we only extract proper noun named entity types, and we removed all contexts containing numbers. For the WordNet experiments we only included n-grams where the middle token(s) were a term in WordNet. In every experiment, we eliminate contexts that only appear with one term, and thus terms that only appear in one context, since they cannot be reached.",
                "cite_spans": [
                    {
                        "start": 62,
                        "end": 82,
                        "text": "Curran et al. (2007)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Mutual Exclusion Bootstrapping",
                "sec_num": "2"
            },
            {
                "text": "The size of the resulting dataset varied depending on the experiment from 176MB (for the bigrams heavily filtered using the t-test) to 1.2GB (for the bigrams with a window of one word either side and the WordNet experiments). All of the data must be loaded into memory and for the largest experiments this requires 1.6GB of RAM using our spaceoptimised C++ implementation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Mutual Exclusion Bootstrapping",
                "sec_num": "2"
            },
            {
                "text": "In our first set of experiments we continue our previous work on proper-noun named entities. We based our semantic classes on the 29 entity types used to annotated the BBN Pronoun Coreference and Entity Type Corpus (Weischedel and Brunstein, 2005) . We ignored entity types that did not primarily include proper nouns, for example the DESCRIPTION types, CHEMICALS and QUANTITIES.",
                "cite_spans": [
                    {
                        "start": 215,
                        "end": 247,
                        "text": "(Weischedel and Brunstein, 2005)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Named Entity Classes",
                "sec_num": "4"
            },
            {
                "text": "For the unigram experiments we reused our previous classification where we ignored entity types that were almost exclusively multi-word terms, for example WORKS OF ART and LAWS. We also split the PERSON ",
                "cite_spans": [
                    {
                        "start": 196,
                        "end": 202,
                        "text": "PERSON",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Named Entity Classes",
                "sec_num": "4"
            },
            {
                "text": "Our evaluation followed the manual inspection process used in our previous experiments. To make this more efficient, we stored a cache of previous evaluator decisions for each class, so that once a decision had been made for a particular term in a particular class it would be made automatically in future instances. This dramatically reduces the effort re-quired for manual evaluation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Named Entity Evaluation",
                "sec_num": "5"
            },
            {
                "text": "Although the seed lists were mutually exclusive, for the purposes of evaluation, ambiguous words such as French were counted as correct if they appeared in either valid category (NORP or LANG).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Named Entity Evaluation",
                "sec_num": "5"
            },
            {
                "text": "If a single word was an clearly part of a multiword term we counted it as correct (e.g. Coast as a LOC) with the exception of the mixed unigrambigram experiments. If the word was not strongly indicative of a semantic class (e.g. The) it was not counted as correct. Mis-spellings of words (e.g. Januray) were also counted as correct. The extracted terms that were unrecognised by the human evaluator were checked using Wikipedia and Google.",
                "cite_spans": [
                    {
                        "start": 99,
                        "end": 103,
                        "text": "LOC)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Named Entity Evaluation",
                "sec_num": "5"
            },
            {
                "text": "We calculate accuracy at n -the percentage of correct terms in the top n ranked terms, following previous bootstrapping work. This is averaged over the semantic classes (Av(n)). We manually evaluated all semantic classes down to n = 50, which adequately discriminates between most configurations. We vary the number of seeds (nS), and terms (nT) and context (nC) added in each iteration.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Named Entity Evaluation",
                "sec_num": "5"
            },
            {
                "text": "Our initial expectation was that bigram named entities would be an easier task than unigram named entities because they had fewer senses and so better satisfied the mutual exclusion assumption. Also, we expected them to be easier to evaluate since they were less ambiguous. However, the results did not match our intuition and so we experimented with unigrams and bigrams to determine the cause.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Named Entity Experiments",
                "sec_num": "6"
            },
            {
                "text": "A major disadvantage for the bigram and longer ngram experiments is that the size of the context must be reduced to accommodate the term itself within a fixed sized n-gram (e.g. the Web 1T 5-grams). Even if longer n-grams were collected for bootstrapping, there would still be the problem of sparser counts (even from one trillion words).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Context Geometry",
                "sec_num": "6.1"
            },
            {
                "text": "We started by repeating our original unigram named entity experiments but this time we reduced the context window to one token on the left and/or right, as shown in Table 2 . Table 3 shows the impact of context geometry on unigram accuracy. Our previous best unigram results are UNI5GMS with 78%. Removing a token from the right context (UNI4LEFT) makes almost no difference to the results, but removing a token from the left context (UNI4RIGHT) makes an enormous difference (a loss of almost 30%). The effect of removing both (UNI3GMS) is slightly worse again. We should also note that the UNI3GMS and UNI4GMS experiments use the Web 1T 3-and 4gram data, so the counts are larger and more reliable, and the chance of shared contexts is greater. This suggests that the impact of removing the left context is even greater than these results indicate.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 165,
                        "end": 172,
                        "text": "Table 2",
                        "ref_id": "TABREF2"
                    },
                    {
                        "start": 175,
                        "end": 182,
                        "text": "Table 3",
                        "ref_id": "TABREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Context Geometry",
                "sec_num": "6.1"
            },
            {
                "text": "NAME TEMPLATE UNI5GMS w 1 w 2 X w 3 w 4 UNI4LEFT w 1 w 2 X w 3 UNI4RIGHT w 1 X w 2 w 3 UNI3GMS w 1 X w 2 BI5LEFT w 1 w 2 X X w 3 BI5RIGHT w 1 X X w 2 w 3 BI4GMS w 1 X X w 2",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Context Geometry",
                "sec_num": "6.1"
            },
            {
                "text": "We also considered the minimum number of contexts a term had to appear in to be included. Our previous experiments required two contexts -otherwise a term cannot be discovered. Increasing this cutoff to 10 made no significant difference.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Context Geometry",
                "sec_num": "6.1"
            },
            {
                "text": "The impact of context geometry on bigram accuracy is shown in 2.9 YES 75 3.0 Table 5 : Effects of category order on unigrams.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 77,
                        "end": 84,
                        "text": "Table 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Context Geometry",
                "sec_num": "6.1"
            },
            {
                "text": "One criticism of the MEB algorithm is that it may be highly dependant on the order in which the classes are considered. Because of the mutual exclusion (i.e. once a word has been assigned to a particular class, it can't be assigned to any other class) the class order clearly has the potential to impact the results.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Class Ordering and Stop Classes",
                "sec_num": "6.2"
            },
            {
                "text": "To test this we have run a set of ten unigram experiments with the classes arranged in random permutations. The results are shown in Table 5 . The standard deviation of the ten sets is 3.0, around a mean of 75. This shows that although it has some impact, MEB is reasonably robust to changes in the category order. The standard deviation from these experiments can be used as an indication of the scatter across the MEB experiments in general.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 133,
                        "end": 140,
                        "text": "Table 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Class Ordering and Stop Classes",
                "sec_num": "6.2"
            },
            {
                "text": "We also compared the accuracy of using, and not using, the stop classes, which are used to constrain specific semantic drift problems (Curran et al., 2007) . When the stop classes were used, they always appeared first in the same order, before the randomly permuted semantic classes. The difference in the means in Table 5 between the set with and without stop classes shows that using stop classes does improve the accuracy of MEB.",
                "cite_spans": [
                    {
                        "start": 134,
                        "end": 155,
                        "text": "(Curran et al., 2007)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 315,
                        "end": 322,
                        "text": "Table 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Class Ordering and Stop Classes",
                "sec_num": "6.2"
            },
            {
                "text": "Our experiments with unigrams in Curran et al. (2007) showed that the best results were obtained by adding 10 contexts per iteration of the bootstrapping process. We have repeated this experiment for bigrams, with the results shown in Table 6 .",
                "cite_spans": [
                    {
                        "start": 33,
                        "end": 53,
                        "text": "Curran et al. (2007)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 235,
                        "end": 242,
                        "text": "Table 6",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Number of Contexts",
                "sec_num": "6.3"
            },
            {
                "text": "These experiments show that that best results are obtained for numbers of contexts between 5 and 20. There is a significant drop-off in accuracy (\u223c 10 \u2212 20%) for values of nC less than 5 or greater than 20. This demonstrates a preference for having more evidence for new terms being reliable than for simply adding more terms in each iteration. It also shows that keeping the number of terms added per iteration (nT) and the number of contexts (nC) added per iteration reasonably well balanced is the nS nT nC Av(10) Av (50)  5  5  1  73  52  5  5  2  76  50  5  5  5  92  71  5  5  10  92  73  5  5  20  93  70  5  5  50  87  58  5  5 100  84  59   Table 6 : Effects of changing the number of contexts added per iteration.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 520,
                        "end": 657,
                        "text": "(50)  5  5  1  73  52  5  5  2  76  50  5  5  5  92  71  5  5  10  92  73  5  5  20  93  70  5  5  50  87  58  5  5 100  84  59   Table 6",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Number of Contexts",
                "sec_num": "6.3"
            },
            {
                "text": "best strategy. This makes sense if we consider the extreme cases: adding 5 terms and only 1 new context per iteration would mean that it was difficult for the system to expand into new space; adding 5 terms and 100 new contexts per iteration would mean that many of the contexts may not be representative of the contexts that those 5 terms appear in.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Number of Contexts",
                "sec_num": "6.3"
            },
            {
                "text": "One issue that arises when extracting bigrams (or longer n-grams) is the possibility that random combinations of tokens may be selected by chance in the MEB process. To investigate this we have carried out a series of experiments on data that was pre-filtered using collocation statistics. We filtered the Web 1T data so that we only kept bigrams that were significant collocations based on their frequency in the Web 1T corpus. We chose the t-test as our measure of significance as it is simple to calculate and we do not have any low frequency values (< 5) for which the t-test is known to perform badly. Our calculation follows Manning and Sch\u00fctze (1999, pg. 165 ). If f (w) and f (w 1 , w 2 ) are the unigram and bigram frequencies from Web 1T, then the t-test is:",
                "cite_spans": [
                    {
                        "start": 631,
                        "end": 665,
                        "text": "Manning and Sch\u00fctze (1999, pg. 165",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Filtering Using Collocations",
                "sec_num": "6.4"
            },
            {
                "text": "t = p(w 1 , w 2 ) \u2212 p(w 1 )p(w 2 ) p(w 1 ,w 2 ) N\u22121 (1)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Filtering Using Collocations",
                "sec_num": "6.4"
            },
            {
                "text": "where N is the number of tokens. Using the Maximum Likelihood Estimates (MLE) we have:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Filtering Using Collocations",
                "sec_num": "6.4"
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "p(w 1 , w 2 ) = f (w 1 , w 2 ) N \u2212 1 (2) p(w) = f (w) N",
                        "eq_num": "(3)"
                    }
                ],
                "section": "Filtering Using Collocations",
                "sec_num": "6.4"
            },
            {
                "text": "The results for cutoffs at different significance levels are shown in Table 7 . These experiments t nS nT nC Av(10) Av (50)  50  5  5 10  86  69  100 5  5 10  87  70  250 5  5 10  85  68  500 5  5 10  81  58   Table 7 : Effects of using only significant collocations. A value of 100 in column 1 means that only bigrams with a significance of t \u2265 100 were used.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 70,
                        "end": 77,
                        "text": "Table 7",
                        "ref_id": null
                    },
                    {
                        "start": 119,
                        "end": 217,
                        "text": "(50)  50  5  5 10  86  69  100 5  5 10  87  70  250 5  5 10  85  68  500 5  5 10  81  58   Table 7",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Filtering Using Collocations",
                "sec_num": "6.4"
            },
            {
                "text": "show that the filtering had no statistically significant result on the accuracy of MEB. In a sense, this is not surprising, as the MEB process of ranking new terms on the number of contexts they occur in is already performing a form of significance testing. However, filtering on collocations does have the advantage of significantly reducing the size of the vocabulary without a significant loss of accuracy at the Av(50) level. For example the number of unique bigram terms in the BI5LEFT experiments in previous sections is 1 858 097, compared to 482 053 for the t \u2265 100 filtered subset (\u223c 25%) and 87 537 for the t \u2265 250 filtered subset (\u223c 5%). This is particularly important when dealing with massive corpora.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Filtering Using Collocations",
                "sec_num": "6.4"
            },
            {
                "text": "Of greater interest than extracting unigrams or bigrams alone is the application of MEB to the general case of extracting n-grams of any length. Since the maximum length of the term and context in the Web 1T corpus is five tokens, and given the decline in accuracy that comes with reducing the length of the context (see Tables 3 and 4 ) it would be impractical to extract terms with more than two tokens.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 321,
                        "end": 335,
                        "text": "Tables 3 and 4",
                        "ref_id": "TABREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Multi-word Expressions",
                "sec_num": "6.5"
            },
            {
                "text": "Hence our final experiment with proper noun named entity extraction combines the unigram and bigram data together. This serves as an initial test of extracting multi-word expressions as it is not specific to only unigrams or only bigrams. The data consists of that used for UNI4LEFT and BI5LEFT, so that the context surrounding the unigram or bigram has the same length and geometry.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Multi-word Expressions",
                "sec_num": "6.5"
            },
            {
                "text": "The categories we used for this experiment are those in Table 1 that are marked as suitable for both unigrams and bigrams. The results for this experiment are shown in Table 8 . These are comparable to our best results for bigram extraction. nS nT nC Av(10) Av(50) 5 5 10 84 69 ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 56,
                        "end": 63,
                        "text": "Table 1",
                        "ref_id": "TABREF1"
                    },
                    {
                        "start": 168,
                        "end": 175,
                        "text": "Table 8",
                        "ref_id": "TABREF6"
                    }
                ],
                "eq_spans": [],
                "section": "Multi-word Expressions",
                "sec_num": "6.5"
            },
            {
                "text": "In our second set of experiments we investigate the application of MEB to common nouns. For these experiments we used the noun classes from WordNet, as described in the next section. We expected the performance on this task to be worse than for proper nouns for a number of reasons. Firstly, common nouns have a larger number of senses, on average, compared to proper nouns. This breaks the mutual exclusion assumption that is central to MEB's success. Secondly, common nouns are likely to occur in a wider range of contexts than many proper nouns. Thirdly, the common noun categories are more general and less well defined than for proper nouns, and abstract nouns are also likely to be harder to categorise than concrete nouns.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "WordNet Common Nouns",
                "sec_num": "7"
            },
            {
                "text": "One factor that favours common noun extraction is that the WordNet classes are designed to have reasonably complete coverage of the semantic space. This is not the case in the BBN named entity categories, which is one of the reasons why we introduced stop classes (Curran et al., 2007) . Table 9 compares the size of the initial dataset for the UNI5GMS experiments (Section 6) and the Word-Net common noun experiments. Even though we have \u223c 10 times fewer unique terms in the WordNet dataset, the number of unique term-context combinations is double that in the UNI5GMS dataset. The total size of the dataset used for the common noun experiments is 1.2GB.",
                "cite_spans": [
                    {
                        "start": 264,
                        "end": 285,
                        "text": "(Curran et al., 2007)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 288,
                        "end": 295,
                        "text": "Table 9",
                        "ref_id": "TABREF7"
                    }
                ],
                "eq_spans": [],
                "section": "WordNet Common Nouns",
                "sec_num": "7"
            },
            {
                "text": "For common nouns we used 25 noun categories from WordNet 3.0. These come from the broad seman- Table 10 : Noun categories in WordNet and the number of words, unigrams and bigrams in each.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 95,
                        "end": 103,
                        "text": "Table 10",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "WordNet Categories",
                "sec_num": "7.1"
            },
            {
                "text": "tic classes employed by lexicographers in the initial phase of inserting words into the WORDNET hierarchy, called lexicographer files (lex files). For the noun hierarchy, there are 25 lex files and a file containing the top level nodes in the hierarchy called Tops. Lex files form a set of coarse-grained sense distinctions within WORDNET. These categories and the number of WordNet words in each category are shown in Table 7 .1.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 419,
                        "end": 426,
                        "text": "Table 7",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "WordNet Categories",
                "sec_num": "7.1"
            },
            {
                "text": "These experiments only involved unigrams seen in WordNet and hence we could evaluate directly against WordNet as a complete gold standard. We extracted the unigrams from all of the noun categories in WordNet. We then filtered the Web 1T corpus to extract only contexts where a WordNet unigram was the central token. The rest of the filtering, nS nT nC Av(10) Av (50)  5  5 10  29  22  10  5 10  51  43  20  5 10  67  52  100 5 10 73 59 Table 11 : Effects of number of seed words.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 362,
                        "end": 429,
                        "text": "(50)  5  5 10  29  22  10  5 10  51  43  20  5 10  67  52  100 5 10",
                        "ref_id": "TABREF1"
                    },
                    {
                        "start": 436,
                        "end": 444,
                        "text": "Table 11",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "WordNet Evaluation",
                "sec_num": "7.2"
            },
            {
                "text": "evaluation and scoring details follow the principles described in Section 5. Each proposed term was marked as correct if it appeared in that WordNet semantic category. The advantage of a closed system is the ease of evaluating the results. However, an obvious disadvantage is that the system cannot be marked correct for valid unigrams it discovers in a category, that are not listed under that category in WordNet. A full manual evaluation may produce better results.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "WordNet Evaluation",
                "sec_num": "7.2"
            },
            {
                "text": "Creating seed lists using the Web 1T frequencies, as we had done in previous experiments, was complicated by skew towards web-related senses. For example, thumbnail was the 5th most frequent word in the body category and site was the 2nd most frequent word in the location category. In the number of seeds experiments we chose the seeds based on their frequency alone, but in the remaining experiments we manually created seed lists.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "WordNet Experiments",
                "sec_num": "8"
            },
            {
                "text": "We use the n most frequent words that were unique to each category as seeds, regardless of whether they have obvious web-related senses. The results for increasing the number of seed words are shown in Table 11 . Note that the seed words are not included in the accuracy calculation. The limited number of terms in some categories (in particular, motive) causes a decrease in accuracy when more seeds are used because many of the correct proposed synonyms are now seed words.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 202,
                        "end": 210,
                        "text": "Table 11",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Number of Seed Words",
                "sec_num": "8.1"
            },
            {
                "text": "There is a substantial increase in accuracy as the number of seeds is increased. This shows that even though the choice of seeds is far from optimal, and is strongly affected by interference, the results are still reasonable as long as a large number of seed words is used. Table 12 : Results for a selection of high and low performing common noun categories. The mean was calculated across all the semantic classes. The other parameters were (nS, nT, nC) = (5, 5, 10).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 274,
                        "end": 282,
                        "text": "Table 12",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Number of Seed Words",
                "sec_num": "8.1"
            },
            {
                "text": "To compare performance across semantic classes, we manually selected 5 seed words from the 20 most frequent words in each category (as measured in the Web 1T corpus). This allowed us to excluded words which we knew to have web-related senses that would dominate on the Web 1T data. The accuracy obtained was 44%, which is substantially lower than for the named entity unigram experiments (maximum 78%). However, the variation in performance across the categories was extremely high, as demonstrated in Table 12 . Some categories, such as cognition are extremely difficult.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 502,
                        "end": 510,
                        "text": "Table 12",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Comparison of Semantic Classes",
                "sec_num": "8.2"
            },
            {
                "text": "This demonstrates that MEB is very good at extracting certain kinds of lexical semantic knowledge -primarily for categories that are very well defined, with frequent terms that appear in fairly constrained or idiomatic contexts, for example animals and food. For these categories, MEB performed just as well on common nouns as it did on many of the proper noun named entity categories.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Comparison of Semantic Classes",
                "sec_num": "8.2"
            },
            {
                "text": "We have presented two novel applications of Mutual Exclusion Bootstrapping (MEB): extracting bigram named entities and common nouns from WordNet. We confirmed that MEB is sensitive to the geometry of the context window surrounding the extracted terms. As expected, a larger context leads to higher accuracy, but interestingly, this is almost entirely due to extra context on the left of the target term. Overall, this makes bigram and longer n-gram ex-traction more difficult on fixed-sized window data, such as the Web 1T corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": null
            },
            {
                "text": "Surprisingly, we discovered that MEB is relatively insensitive to the order the semantic classes are presented and to noise in the possible terms themselves.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": null
            },
            {
                "text": "We applied MEB to common nouns using 25 semantic classes defined by the WordNet lexical files. We performed automatic evaluation using a closed vocabulary and found that MEB performed well on classes with narrower definitions such as animal, but poorly on classes such as cognition. This is partly due to the concrete categories having more coherent contexts. We found that increasing the number of seed terms improved the accuracy, even with poor quality seed terms.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": null
            },
            {
                "text": "We now plan to experiment with loosening the mutual exclusion assumption to allow for some overlap between categories. There are many possibilities for improving the performance of MEB on common nouns -here we have presented only a preliminary analysis of the WordNet results. We also plan to experiment with text other than the Web 1T corpus so that we can test whether allowing wider contexts will further improve performance.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": null
            },
            {
                "text": "The experiments we have presented in this paper have demonstrated that MEB is an efficient and accurate method of extracting semantic classes over both unigram and bigram named entities. We have also demonstrated its potential for extracting semantic classes from WordNet for common nouns.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "Both authors were funded on this work under ARC Discovery grants DP0453131 and DP0665973. We would like to thank the anonymous reviewers for their useful feedback.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Combining strategies for extracting relations from text collections",
                "authors": [
                    {
                        "first": "Eugene",
                        "middle": [],
                        "last": "Agichtein",
                        "suffix": ""
                    },
                    {
                        "first": "Eleazar",
                        "middle": [],
                        "last": "Eskin",
                        "suffix": ""
                    },
                    {
                        "first": "Luis",
                        "middle": [],
                        "last": "Gravano",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Eugene Agichtein, Eleazar Eskin, and Luis Gravano. 2000. Combining strategies for extracting relations from text collections. Technical Report CUCS-006- 00, Department of Computer Science, Columbia Uni- versity, New York, March.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Web 1T 5-gram version 1",
                "authors": [
                    {
                        "first": "Thorsten",
                        "middle": [],
                        "last": "Brants",
                        "suffix": ""
                    },
                    {
                        "first": "Alex",
                        "middle": [],
                        "last": "Franz",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Thorsten Brants and Alex Franz. 2006. Web 1T 5-gram version 1. Technical Report LDC2006T13, Linguistic Data Consortium.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Minimising semantic drift with mutual exclusion bootstrapping",
                "authors": [
                    {
                        "first": "James",
                        "middle": [
                            "R"
                        ],
                        "last": "Curran",
                        "suffix": ""
                    },
                    {
                        "first": "Tara",
                        "middle": [],
                        "last": "Murphy",
                        "suffix": ""
                    },
                    {
                        "first": "Bernhard",
                        "middle": [],
                        "last": "Scholz",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of the Conference of the Pacific Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "19--21",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "James R. Curran, Tara Murphy, and Bernhard Scholz. 2007. Minimising semantic drift with mutual exclu- sion bootstrapping. In Proceedings of the Confer- ence of the Pacific Association for Computational Lin- guistics, pages 172-180, Melbourne, Australia, 19-21 September.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "From Distributional to Semantic Similarity",
                "authors": [
                    {
                        "first": "James",
                        "middle": [
                            "R"
                        ],
                        "last": "Curran",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "James R. Curran. 2004. From Distributional to Seman- tic Similarity. Ph.D. thesis, University of Edinburgh, Edinburgh, UK.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "WordNet: an electronic lexical database",
                "authors": [],
                "year": 1998,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Cristiane Fellbaum, editor. 1998. WordNet: an elec- tronic lexical database. The MIT Press, Cambridge, MA USA.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Automatic acquisition of hyponyms from large text corpora",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Marti",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Hearst",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "Proceedings of the 14th international conference on Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "23--28",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Marti A. Hearst. 1992. Automatic acquisition of hy- ponyms from large text corpora. In Proceedings of the 14th international conference on Computational Lin- guistics, pages 539-545, Nantes, France, 23-28 July.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Foundations of Statistical Natural Language Processing",
                "authors": [
                    {
                        "first": "Chris",
                        "middle": [],
                        "last": "Manning",
                        "suffix": ""
                    },
                    {
                        "first": "Hinrich",
                        "middle": [],
                        "last": "Sch\u00fctze",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Chris Manning and Hinrich Sch\u00fctze. 1999. Founda- tions of Statistical Natural Language Processing. MIT Press, Cambridge, MA.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Learning dictionaries for information extraction by multi-level bootstrapping",
                "authors": [
                    {
                        "first": "Ellen",
                        "middle": [],
                        "last": "Riloff",
                        "suffix": ""
                    },
                    {
                        "first": "Rosie",
                        "middle": [],
                        "last": "Jones",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Proceedings of the Sixteenth National Conference on Artificial Intelligence",
                "volume": "",
                "issue": "",
                "pages": "18--22",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ellen Riloff and Rosie Jones. 1999. Learning dictio- naries for information extraction by multi-level boot- strapping. In Proceedings of the Sixteenth National Conference on Artificial Intelligence, pages 474-479, Orlando, FL USA, 18-22 July.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "A corpus-based approach for building semantic lexicons",
                "authors": [
                    {
                        "first": "Ellen",
                        "middle": [],
                        "last": "Riloff",
                        "suffix": ""
                    },
                    {
                        "first": "Jessica",
                        "middle": [],
                        "last": "Shepherd",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "Proceedings of the Second Conference on Empirical Methods in Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "1--2",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ellen Riloff and Jessica Shepherd. 1997. A corpus-based approach for building semantic lexicons. In Proceed- ings of the Second Conference on Empirical Meth- ods in Natural Language Processing, pages 117-124, Providence, 1-2 August.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Automatically generating extraction patterns from untagged text",
                "authors": [
                    {
                        "first": "Ellen",
                        "middle": [],
                        "last": "Riloff",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "Proceedings of the Thirteenth National Conference on Artificial Intelligence",
                "volume": "",
                "issue": "",
                "pages": "1044--1049",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ellen Riloff. 1996. Automatically generating extrac- tion patterns from untagged text. In Proceedings of the Thirteenth National Conference on Artificial Intel- ligence, pages 1044-1049.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Nounphrase co-occurrence statistic for semi-automatic semantic lexicon construction",
                "authors": [
                    {
                        "first": "Brian",
                        "middle": [],
                        "last": "Roark",
                        "suffix": ""
                    },
                    {
                        "first": "Eugene",
                        "middle": [],
                        "last": "Charniak",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the 17th International Conference on Computational Linguistics and the 36th annual meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "10--14",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Brian Roark and Eugene Charniak. 1998. Noun- phrase co-occurrence statistic for semi-automatic se- mantic lexicon construction. In Proceedings of the 17th International Conference on Computational Lin- guistics and the 36th annual meeting of the Associa- tion for Computational Linguistics, pages 1110-1116, Montr\u00e9al, Qu\u00e9bec, Canada, 10-14 August.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Mining the web for relations",
                "authors": [
                    {
                        "first": "Neel",
                        "middle": [],
                        "last": "Sundaresan",
                        "suffix": ""
                    },
                    {
                        "first": "Jeonghee",
                        "middle": [],
                        "last": "Yi",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the 9th International World Wide Web Conference",
                "volume": "",
                "issue": "",
                "pages": "15--19",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Neel Sundaresan and Jeonghee Yi. 2000. Mining the web for relations. In Proceedings of the 9th Inter- national World Wide Web Conference, Amsterdam, Netherlands, 15-19 May.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "BBN pronoun coreference and entity type corpus",
                "authors": [
                    {
                        "first": "Ralph",
                        "middle": [],
                        "last": "Weischedel",
                        "suffix": ""
                    },
                    {
                        "first": "Ada",
                        "middle": [],
                        "last": "Brunstein",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ralph Weischedel and Ada Brunstein. 2005. BBN pro- noun coreference and entity type corpus. Technical Report LDC2005T33, Linguistic Data Consortium.",
                "links": null
            }
        },
        "ref_entries": {
            "TABREF0": {
                "content": "<table><tr><td colspan=\"3\">LABEL UNI BI DESCRIPTION</td></tr><tr><td>NAME</td><td/><td>\u2022 Person: name</td></tr><tr><td/><td/><td>'Katie Holmes' 'Adam Smith'</td></tr><tr><td>FEM</td><td>\u2022</td><td>Person: female first name</td></tr><tr><td/><td/><td>Mary Patricia Linda Elizabeth</td></tr><tr><td>MALE</td><td>\u2022</td><td>Person: male first name</td></tr><tr><td/><td/><td>James John Robert Michael William</td></tr><tr><td>LAST</td><td>\u2022</td><td>Person: last name</td></tr><tr><td/><td/><td>Smith Johnson Williams Jones Brown</td></tr><tr><td>TITLE</td><td colspan=\"2\">\u2022 \u2022 Honorific title</td></tr><tr><td/><td/><td>President Dr Lord Miss Major</td></tr><tr><td>NORP</td><td colspan=\"2\">\u2022 \u2022 Nationality, Religion, Political (adj)</td></tr><tr><td/><td/><td>Republican Christian 'South African'</td></tr><tr><td>FAC</td><td colspan=\"2\">\u2022 \u2022 Facility: names of man-made structures</td></tr><tr><td/><td/><td>Broadway Legoland 'Golden Gate'</td></tr><tr><td>ORG</td><td colspan=\"2\">\u2022 \u2022 Organisation: e.g. companies, gov.</td></tr><tr><td/><td/><td>Intel Microsoft 'American Express'</td></tr><tr><td>GPE</td><td colspan=\"2\">\u2022 \u2022 Geo-political entity</td></tr><tr><td/><td/><td>Canada China London 'Los Angeles'</td></tr></table>",
                "type_str": "table",
                "num": null,
                "text": "class into MALE and FEMALE first names",
                "html": null
            },
            "TABREF1": {
                "content": "<table/>",
                "type_str": "table",
                "num": null,
                "text": "The semantic classes used for the proper noun unigram (Column 2) and bigram (Column 3) experiments. Bigram examples are shown in quotes.and LAST names to investigate more fine-grained distinctions for this class.For the bigram experiments, we kept a single class NAME for person name, and reintroduced the LAW and EVENT classes. Most classes are common to both the unigram and bigram experiments. As in our previous experiments, some classes were easier to evaluate manually because we were only extracting unigrams, whilst others were more difficult. Similar difficulties exist in the bigram classes as well. The complete list of semantic classes used in the named entity experiments are summarised inTable 1.",
                "html": null
            },
            "TABREF2": {
                "content": "<table><tr><td/><td colspan=\"4\">nS nT nC Av(10) Av(50)</td></tr><tr><td>UNI5GMS</td><td>5</td><td>5 10</td><td>90</td><td>78</td></tr><tr><td>UNI4LEFT</td><td>5</td><td>5 10</td><td>86</td><td>74</td></tr><tr><td>UNI4RIGHT</td><td>5</td><td>5 10</td><td>76</td><td>49</td></tr><tr><td>UNI3GMS</td><td>5</td><td>5 10</td><td>74</td><td>48</td></tr></table>",
                "type_str": "table",
                "num": null,
                "text": "Unigram and bigram Web 1T templates.",
                "html": null
            },
            "TABREF3": {
                "content": "<table><tr><td/><td colspan=\"4\">nS nT nC Av(10) Av(50)</td></tr><tr><td>BI5LEFT</td><td>5</td><td>5 10</td><td>92</td><td>68</td></tr><tr><td>BI5RIGHT</td><td>5</td><td>5 10</td><td>83</td><td>51</td></tr><tr><td>BI4GMS</td><td>5</td><td>5 10</td><td>77</td><td>48</td></tr></table>",
                "type_str": "table",
                "num": null,
                "text": "Effect of context geometry on unigrams.",
                "html": null
            },
            "TABREF4": {
                "content": "<table/>",
                "type_str": "table",
                "num": null,
                "text": "",
                "html": null
            },
            "TABREF5": {
                "content": "<table/>",
                "type_str": "table",
                "num": null,
                "text": "The penalty for removing some left context was not as great for bigrams, dropping from 68% with BI5GMS to 48% with BI4GMS. The remaining unigram experiments use UNI5GMS and the bigram experiments use BI5LEFT. STOP mean Av(50) \u03c3 Av(50)",
                "html": null
            },
            "TABREF6": {
                "content": "<table><tr><td/><td>UNI5GMS</td><td>WordNet</td></tr><tr><td>terms</td><td>263 613</td><td>29 157</td></tr><tr><td>contexts</td><td colspan=\"2\">10 449 412 18 832 474</td></tr><tr><td colspan=\"3\">terms-contexts 42 039 483 88 178 856</td></tr></table>",
                "type_str": "table",
                "num": null,
                "text": "Results for extracting bi-and unigrams",
                "html": null
            },
            "TABREF7": {
                "content": "<table/>",
                "type_str": "table",
                "num": null,
                "text": "Comparison of the datasets used in the UNI5GMS and WordNet experiments. The number of unique terms, unique contexts and unique termcontext combinations is shown.",
                "html": null
            }
        }
    }
}