File size: 62,864 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
{
    "paper_id": "P01-1005",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T09:30:13.408024Z"
    },
    "title": "Scaling to Very Very Large Corpora for Natural Language Disambiguation",
    "authors": [
        {
            "first": "Michele",
            "middle": [],
            "last": "Banko",
            "suffix": "",
            "affiliation": {},
            "email": "mbanko@microsoft.com"
        },
        {
            "first": "Eric",
            "middle": [],
            "last": "Brill",
            "suffix": "",
            "affiliation": {},
            "email": "brill@microsoft.com"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "The amount of readily available on-line text has reached hundreds of billions of words and continues to grow. Yet for most core natural language tasks, algorithms continue to be optimized, tested and compared after training on corpora consisting of only one million words or less. In this paper, we evaluate the performance of different learning methods on a prototypical natural language disambiguation task, confusion set disambiguation, when trained on orders of magnitude more labeled data than has previously been used. We are fortunate that for this particular application, correctly labeled training data is free. Since this will often not be the case, we examine methods for effectively exploiting very large corpora when labeled data comes at a cost.",
    "pdf_parse": {
        "paper_id": "P01-1005",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "The amount of readily available on-line text has reached hundreds of billions of words and continues to grow. Yet for most core natural language tasks, algorithms continue to be optimized, tested and compared after training on corpora consisting of only one million words or less. In this paper, we evaluate the performance of different learning methods on a prototypical natural language disambiguation task, confusion set disambiguation, when trained on orders of magnitude more labeled data than has previously been used. We are fortunate that for this particular application, correctly labeled training data is free. Since this will often not be the case, we examine methods for effectively exploiting very large corpora when labeled data comes at a cost.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Machine learning techniques, which automatic ally learn linguistic information from online text corpora, have been applied to a number of natural language problems throughout the last decade. A large percentage of papers published in this area involve comparisons of different learning approaches trained and tested with commonly used corpora. While the amount of available online text has been increasing at a dramatic rate, the size of training corpora typically used for learning has not. In part, this is due to the standardization of data sets used within the field, as well as the potentially large cost of annotating data for those learning methods that rely on labeled text.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The empirical NLP community has put substantial effort into evaluating performance of a large number of machine learning methods over fixed, and relatively small, data sets. Yet since we now have access to significantly more data, one has to wonder what conclusions that have been drawn on small data sets may carry over when these learning methods are trained using much larger corpora.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In this paper, we present a study of the effects of data size on machine learning for natural language disambiguation. In particular, we study the problem of selection among confusable words, using orders of magnitude more training data than has ever been applied to this problem. First we show learning curves for four different machine learning algorithms. Next, we consider the efficacy of voting, sample selection and partially unsupervised learning with large training corpora, in hopes of being able to obtain the benefits that come from significantly larger training corpora without incurring too large a cost.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Confusion set disambiguation is the problem of choosing the correct use of a word, given a set of words with which it is commonly confused. Example confusion sets include: {principle , principal}, {then, t han}, {to,two,t oo}, and {weather,whether}. Numerous methods have been presented for confusable disambiguation. The more recent set of techniques includes mult iplicative weightupdate algorithms (Golding and Roth, 1998) , latent semantic analysis (Jones and Martin, 1997) , transformation-based learning (Mangu and Brill, 1997) , differential grammars (Powers, 1997) , decision lists (Yarowsky, 1994) , and a variety of Bayesian classifiers (Gale et al., 1993 , Golding, 1995 , Golding and Schabes, 1996 . In all of these approaches, the problem is formulated as follows:",
                "cite_spans": [
                    {
                        "start": 401,
                        "end": 425,
                        "text": "(Golding and Roth, 1998)",
                        "ref_id": null
                    },
                    {
                        "start": 453,
                        "end": 477,
                        "text": "(Jones and Martin, 1997)",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 510,
                        "end": 533,
                        "text": "(Mangu and Brill, 1997)",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 558,
                        "end": 572,
                        "text": "(Powers, 1997)",
                        "ref_id": "BIBREF18"
                    },
                    {
                        "start": 590,
                        "end": 606,
                        "text": "(Yarowsky, 1994)",
                        "ref_id": "BIBREF21"
                    },
                    {
                        "start": 647,
                        "end": 665,
                        "text": "(Gale et al., 1993",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 666,
                        "end": 681,
                        "text": ", Golding, 1995",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 682,
                        "end": 709,
                        "text": ", Golding and Schabes, 1996",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Confusion Set Disambiguation",
                "sec_num": "2"
            },
            {
                "text": "Given a specific confusion set (e.g. {to,two,too}), all occurrences of confusion set members in the test set are replaced by a marker; everywhere the system sees this marker, it must decide which member of the confusion set to choose.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Confusion Set Disambiguation",
                "sec_num": "2"
            },
            {
                "text": "Confusion set disambiguation is one of a class of natural language problems involving disambiguation from a relatively small set of alternatives based upon the string context in which the ambiguity site appears. Other such problems include word sense disambiguation, part of speech tagging and some formulations of phrasal chunking. One advantageous aspect of confusion set disambiguation, which allows us to study the effects of large data sets on performance, is that labeled training data is essentially free, since the correct answer is surface apparent in any collection of reasonably well-edited text.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Confusion Set Disambiguation",
                "sec_num": "2"
            },
            {
                "text": "This work was partially motivated by the desire to develop an improved grammar checker. Given a fixed amount of time, we considered what would be the most effective way to focus our efforts in order to attain the greatest performance improvement. Some possibilities included modifying standard learning algorithms, exploring new learning techniques, and using more sophisticated features. Before exploring these somewhat expensive paths, we decided to first see what happened if we simply trained an e xisting method with much more data. This led to the exploration of learning curves for various machine learning algorithms : winnow 1 , perceptron, na\u00efve Bayes, and a very simple memory-based learner. For the first three learners, we used the standard colle ction of features employed for this problem: the set of words within a window of the target word, and collocations containing words and/or parts of We collected a 1-billion-word training corpus from a variety of English texts, including news articles, scientific abstracts, government transcripts, literature and other varied forms of prose. This training corpus is three orders of magnitude greater than the largest training corpus previously used for this problem. We used 1 million words of Wall Street Journal text as our test set, and no data from the Wall Street Journal was used when constructing the training corpus. Each learner was trained at several cutoff points in the training corpus, i.e. the first one million words, the first five million words, and so on, until all one billion words were used for training. In order to avoid training biases that may result from merely concatenating the different data sources to form a larger training corpus, we constructed each consecutive training corpus by probabilistically sampling sentences from the different sources weighted by the size of each source.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Learning Curve Expe riments",
                "sec_num": "3"
            },
            {
                "text": "In Figure 1 , we show learning curves for each learner, up to one billion words of training data. Each point in the graph is the average performance over ten confusion sets for that size training corpus. Note that the curves appear to be log-linear even out to one billion words.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 3,
                        "end": 11,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Learning Curve Expe riments",
                "sec_num": "3"
            },
            {
                "text": "Of course for many problems, additional training data has a non-zero cost. However, these results suggest that we may want to reconsider the trade-off between spending time and money on algorithm development versus spending it on corpus development. At least for the problem of confusable disambiguation, none of the learners tested is close to asymptoting in performance at the training corpus size commonly employed by the field.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Learning Curve Expe riments",
                "sec_num": "3"
            },
            {
                "text": "Such gains in accuracy, however, do not come for free. Figure 2 shows the size of learned representations as a function of training data size. For some applications, this is not necessarily a concern. But for others, where space comes at a premium, obtaining the gains that come with a billion words of training data may not be viable without an effort made to compress information. In such cases, one could look at numerous methods for compressing data (e.g. Dagan and Engleson, 1995, Weng, et al, 1998) .",
                "cite_spans": [
                    {
                        "start": 460,
                        "end": 469,
                        "text": "Dagan and",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 470,
                        "end": 504,
                        "text": "Engleson, 1995, Weng, et al, 1998)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 55,
                        "end": 63,
                        "text": "Figure 2",
                        "ref_id": "FIGREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Learning Curve Expe riments",
                "sec_num": "3"
            },
            {
                "text": "Voting has proven to be an effective technique for improving classifier accuracy for many applications, including part-of-speech tagging (van Halteren, et al, 1998) , parsing (Henderson and Brill, 1999) , and word sense disambiguation (Pederson, 2000) . By training a set of classifiers on a single training corpus and then combining their outputs in classification, it is often possible to achieve a target accuracy with less labeled training data than would be needed if only one cla ssifier was being used. Voting can be effective in reducing both the bias of a particular training corpus and the bias of a specific learner. When a training corpus is very small, there is much more room for these biases to surface and therefore for voting to be effective. But does voting still offer performance gains when classifiers are trained on much larger corpora? The complementarity between two learners was defined by Brill and Wu (1998) in order to quantify the percentage of time when one system is wrong, that another system is correct, and therefore providing an upper bound on combination accuracy. As training size increases significantly, we would expect complementarity between classifiers to decrease. This is due in part to the fact that a l arger training corpus will reduce the data set variance and any bias arising from this. Also, some of the differences between classifiers might be due to how they handle a sparse training set. As a result of comparing a sample of two learners as a function of increasingly large training sets, we see in Table 1 that complementarity does indeed decrease as training size increases.",
                "cite_spans": [
                    {
                        "start": 137,
                        "end": 164,
                        "text": "(van Halteren, et al, 1998)",
                        "ref_id": "BIBREF19"
                    },
                    {
                        "start": 175,
                        "end": 202,
                        "text": "(Henderson and Brill, 1999)",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 235,
                        "end": 251,
                        "text": "(Pederson, 2000)",
                        "ref_id": null
                    },
                    {
                        "start": 915,
                        "end": 934,
                        "text": "Brill and Wu (1998)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 1553,
                        "end": 1560,
                        "text": "Table 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "The Efficacy of Voting",
                "sec_num": "4"
            },
            {
                "text": "Training Size (words) Complementarity(L1,L2) 10 6 0.2612 10 7 0.2410 10 8 0.1759 10 9 0.1612 Table 1 . Complementarity Next we tested whether this decrease in complementarity meant that voting loses its effectiveness as the training set increases. To examine the impact of voting when using a significantly larger training corpus, we ran 3 out of the 4 learners on our set of 10 confusable pairs, excluding the memory-based learner. Voting was done by combining the normalized score each learner assigned to a classification choice. In Figure 3 , we show the accuracy obtained from voting, along with the single best learner accuracy at each training set size. We see that for very small corpora, voting is beneficial, resulting in better performance than any single classifier. Beyond 1 million words, little is gained by voting, and indeed on the largest training sets voting actually hurts accuracy. ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 93,
                        "end": 100,
                        "text": "Table 1",
                        "ref_id": null
                    },
                    {
                        "start": 536,
                        "end": 544,
                        "text": "Figure 3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "The Efficacy of Voting",
                "sec_num": "4"
            },
            {
                "text": "While the observation that learning curves are not asymptoting even with orders of magnitude more training data than is currently used is very exciting, this result may have somewhat limited ramifications. V ery few problems exist for which annotated data of this size is available for free. Surely we cannot reasonably expect that the manual annotation of one billion words along with corresponding parse trees will occur any time soon (but see (Banko and Brill 2001) for a discussion that this might not be completely infeasible). Despite this pitfall, there are techniques one can use to try to obtain the benefits of considerably larger training corpora without incurring significant additional costs. In the sections that follow, we study two such solutions: active learning and unsupervised learning.",
                "cite_spans": [
                    {
                        "start": 446,
                        "end": 468,
                        "text": "(Banko and Brill 2001)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "When Annotated Data Is Not Free",
                "sec_num": "5"
            },
            {
                "text": "Active learning involves intelligently selecting a portion of samples for annotation from a pool of as-yet unannotated training samples. Not all samples in a training set are equally useful. By concentrating human annotation efforts on the samples of greatest utility to the machine learning algorithm, it may be possible to attain better performance for a fixed annotation cost than if samples were chosen randomly for human annotation. Most active learning approaches work by first training a seed learner (or family of learners) and then running the learner(s) over a set of unlabeled samples.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Learning",
                "sec_num": "5.1"
            },
            {
                "text": "A sample is presumed to be more useful for training the more uncertain its classification label is. Uncertainty can be judged by the relative weights assigned to different labels by a single classifier (Lewis and Catlett, 1994) . Another approach, committee-based sampling, first creates a committee of classifie rs and then judges classification uncertainty according to how much the learners differ among label assignments. For example, Dagan and Engelson (1995) describe a committee-based sampling technique where a part of speech tagger is trained using an annotated seed corpus. A family of taggers is then generated by randomly permuting the tagger probabilities, and the disparity among tags output by the committee members is used as a measure of classification uncertainty. Sentences for human annotation are drawn, biased to prefer those containing high uncertainty instances.",
                "cite_spans": [
                    {
                        "start": 202,
                        "end": 227,
                        "text": "(Lewis and Catlett, 1994)",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 439,
                        "end": 464,
                        "text": "Dagan and Engelson (1995)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Learning",
                "sec_num": "5.1"
            },
            {
                "text": "While active learning has been shown to work for a number of tasks, the majority of active learning experiments in natural language processing have been conducted using very small seed corpora and sets of unlabeled examples. Therefore, we wish to explore situations where we have, or can afford, a nonnegligible sized training corpus (such as for part-of-speech tagging) and have access to very large amounts of unlabeled data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Learning",
                "sec_num": "5.1"
            },
            {
                "text": "We can use bagging (Breiman, 1996) , a technique for generating a committee of classifiers, to assess the label uncertainty of a potential training instance. With bagging, a variant of the original training set is constructed by randomly sampling sentences with replacement from the source training set in order to produce N new training sets of size equal to the original. After the N models have been trained and run on the same test set, their classifications for each test sentence can be compared for classification agreement. The higher the disagreement between classifiers, the more useful it would be to have an instance We used the na\u00efve Bayes classifier, creating 10 classifiers each trained on bags generated from an initial one million words of labeled training data. We present the active learning algorithm we used below.",
                "cite_spans": [
                    {
                        "start": 19,
                        "end": 34,
                        "text": "(Breiman, 1996)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Learning",
                "sec_num": "5.1"
            },
            {
                "text": "Initialize: Training data consists of X words correctly labeled Iterate : 1) Generate a committee of classifiers using bagging on the training set 2) Run the committee on unlabeled portion of the training set 3) Choose M instances from the unlabeled set for labeling -pick the M/2 with the greatest vote entropy and then pick another M/2 randomly -and add to training set",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Learning",
                "sec_num": "5.1"
            },
            {
                "text": "We initially tried selecting the M most uncertain examples, but this resulted in a sample too biased toward the difficult instances. Instead we pick half of our samples for annotation randomly and the other half from those whose labels we are most uncertain of, as judged by the entropy of the votes assigned to the instance by the committee. This is, in effect, biasing our sample toward instances the classifiers are most uncertain of.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Learning",
                "sec_num": "5.1"
            },
            {
                "text": "We show the results from sample selection for confusion set disambiguation in Figure 4 . The line labeled \"sequential\" shows test set accuracy achieved for different percentages of the one billion word training set, where training instances are taken at random. We ran three a ctive learning experiments, increasing the size of the total unlabeled training corpus from which we can pick samples to be annotated. In all three cases, sample selection outperforms sequential sampling.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 78,
                        "end": 86,
                        "text": "Figure 4",
                        "ref_id": "FIGREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Active Learning",
                "sec_num": "5.1"
            },
            {
                "text": "At the endpoint of each training run in the graph, the same number of samples has been annotated for training. However, we see that the larger the pool of candidate instances for annotation is, the better the resulting accuracy. By increasing the pool of unlabel ed training instances for active learning, we can improve accuracy with only a fixed additional annotation cost. Thus it is possible to benefit from the availability of extremely large corpora without incurring the full costs of annotation, training time, and representation size.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Active Learning",
                "sec_num": "5.1"
            },
            {
                "text": "While the previous section shows that we can benefit from substantially larger training corpora without needing significant additional manual annotation, it would be ideal if we could improve classification accuracy using only our seed annotated corpus and the large unlabeled corpus, without requiring any additional hand labeling. In this section we turn to unsupervised learning in an attempt to achieve this goal. Numerous approaches have been explored for exploiting situations where some a mount of annotated data is available and a much larger amount of data exists unannotated, e.g. Marialdo's HMM part-of-speech tagger training (1994), Charniak's parser retraining experiment (1996), Yarowsky's seeds for word sense disambiguation (1995) and Nigam et al's (1998) topic classifier learned in part from unlabelled documents. A nice discussion of this general problem can be found in Mitchell (1999) .",
                "cite_spans": [
                    {
                        "start": 740,
                        "end": 746,
                        "text": "(1995)",
                        "ref_id": null
                    },
                    {
                        "start": 751,
                        "end": 771,
                        "text": "Nigam et al's (1998)",
                        "ref_id": "BIBREF16"
                    },
                    {
                        "start": 890,
                        "end": 905,
                        "text": "Mitchell (1999)",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Weakly Supe rvised Learning",
                "sec_num": "5.2"
            },
            {
                "text": "The question we want to answer is whether there is something to be gained by combining unsupervised and supervised learning when we scale up both the seed corpus and the unlabeled corpus significantly. We can again use a committee of bagged classifiers, this time for unsupervised learning. Whereas with active learning we want to choose the most uncertain instances for human annotation, with unsupervised learning we want to choose the instances that have the highest probability of being correct for automatic labeling and inclusion in our labeled training data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Weakly Supe rvised Learning",
                "sec_num": "5.2"
            },
            {
                "text": "In Table 2 , we show the test set accuracy (averaged over the four most frequently occurring confusion pairs) as a function of the number of classifiers that agree upon the label of an instance. For this experiment, we trained a collection of 10 na\u00efve Bayes classifiers, using bagging on a 1-million-word seed corpus. As can be seen, the greater the classifier agreement, the more likely it is that a test sample has been correctly labeled. Since the instances in which all bags agree have the highest probability of being correct, we attempted to automatically grow our labeled training set using the 1 -million-word labeled seed corpus along with the collection of na\u00efve Bayes classifiers described above. All instances from the remainder of the corpus on which all 10 classifiers agreed were selected, trusting the agreed-upon label. The classif iers were then retrained using the labeled seed corpus plus the new training material collected automatically during the previous step.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 3,
                        "end": 10,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Weakly Supe rvised Learning",
                "sec_num": "5.2"
            },
            {
                "text": "In Table 3 we show the results from these unsupervised learning experiments for two confusion sets. In both cases we gain from unsupervised training compared to using only the seed corpus, but only up to a point. At this point, test set accuracy begins to decline as additional training instances are automatically harvested. We are able to attain improvements in accuracy for free using unsupervised learning, but unlike our learning curve experiments using correctly labeled data, accuracy does not continue to improve with additional data. Table 3 . Committee-Based Unsupervised Learning Charniak (1996) ran an experiment in which he trained a parser on one million words of parsed data, ran the parser over an additional 30 million words, and used the resulting parses to reestimate model probabilities. Doing so gave a small improvement over just using the manually parsed data.",
                "cite_spans": [
                    {
                        "start": 591,
                        "end": 606,
                        "text": "Charniak (1996)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 3,
                        "end": 10,
                        "text": "Table 3",
                        "ref_id": null
                    },
                    {
                        "start": 543,
                        "end": 550,
                        "text": "Table 3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Weakly Supe rvised Learning",
                "sec_num": "5.2"
            },
            {
                "text": "We repeated this experiment with our data, and show the outcome in Table 4 . Choosing only the labeled instances most likely to be correct as judged by a committee of classifiers results in higher accuracy than using all instances classified by a model trained with the labeled seed corpus. In applying unsupervised learning to improve upon a seed-trained method, we consistently saw an improvement in performance followed by a decline. This is likely due to eventually having reached a point where the gains from additional training data are offset by the sample bias in mining these instances. It may be possible to combine active learning with unsupervised learning as a way to reduce this sample bias and gain the benefits of both approaches.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 67,
                        "end": 74,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Weakly Supe rvised Learning",
                "sec_num": "5.2"
            },
            {
                "text": "In this paper, we have looked into what happens when we begin to take advantage of the large amounts of text that are now readily available. We have shown that for a prototypical natural language classification task, the performance of learners can benefit significantly from much larger training sets. We have also shown that both active learning and unsupervised learning can be used to attain at least some of the advantage that comes with additional training data, while minimizing the cost of additional human annotation. We propose that a logical next step for the research community would be to direct efforts towards increasing the size of annotated training collections, while deemphasizing the focus on comparing different learning techniques trained only on small training corpora. While it is encouraging that there is a vast amount of on-line text, much work remains to be done if we are to learn how best to exploit this resource to improve natural language processing.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": "6"
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Mitigating the Paucity of Data Problem",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Banko",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Brill",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Banko, M. and Brill, E. (2001). Mitigating the Paucity of Data Problem. Human Language Technology.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Bagging Predictors",
                "authors": [
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Breiman",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "Machine Learning",
                "volume": "24",
                "issue": "",
                "pages": "123--140",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Breiman L., (1996). Bagging Predictors, Machine Learning 24 123-140.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Classifier combination for improved lexical disambiguation",
                "authors": [
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Brill",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Wu",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the 17th International Conference on Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Brill, E. and Wu, J. (1998). Classifier combination for improved lexical disambiguation. In Proceedings of the 17th International Conference on Computational Linguistics.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Treebank Grammars, Proceedings AAAI-96",
                "authors": [
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Charniak",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Charniak, E. (1996). Treebank Grammars, Proceedings AAAI-96 , Menlo Park, Ca.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Committee-based sampling for training probabilistic classifiers",
                "authors": [
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Dagan",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Engelson",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Proc. ML-95, the 12th Int. Conf. on Machine Learning",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Dagan, I. and Engelson, S. (1995). Committee-based sampling for training probabilistic classifiers. In Proc. ML-95, the 12th Int. Conf. on Machine Learning.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "A method for disambiguating word senses in a large corpus",
                "authors": [
                    {
                        "first": "W",
                        "middle": [
                            "A"
                        ],
                        "last": "Gale",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [
                            "W"
                        ],
                        "last": "Church",
                        "suffix": ""
                    },
                    {
                        "first": "Yarowsky",
                        "middle": [],
                        "last": "",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Computers and the Humanities",
                "volume": "26",
                "issue": "",
                "pages": "415--439",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gale, W. A., Church, K. W., and Yarowsky, D. (1993). A method for disambiguating word senses in a large corpus. Computers and the Humanities, 26:415--439.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "A Bayesian hybrid method for context-sensitive spelling correction",
                "authors": [
                    {
                        "first": "A",
                        "middle": [
                            "R"
                        ],
                        "last": "Golding",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Proc. 3rd Workshop on Very Large Corpora",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Golding, A. R. (1995). A Bayesian hybrid method for context-sensitive spelling correction. In Proc. 3rd Workshop on Very Large Corpora, Boston, MA.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "A Winnow-Based Approach to Context-Sensitive Spelling Correction",
                "authors": [
                    {
                        "first": "A",
                        "middle": [
                            "R"
                        ],
                        "last": "Golding",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Roth",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Machine Learning",
                "volume": "34",
                "issue": "",
                "pages": "107--130",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Golding, A. R. and Roth, D.(1999), A Winnow- Based Approach to Context-Sensitive Spelling Correction. Machine Learning, 34:107--130.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Combining trigram-based and feature-based methods for context-sensitive spelling correction",
                "authors": [
                    {
                        "first": "A",
                        "middle": [
                            "R"
                        ],
                        "last": "Golding",
                        "suffix": ""
                    },
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "Schabes",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "Proc. 34th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Golding, A. R. and Schabes, Y. (1996). Combining trigram-based and feature-based methods for context-sensitive spelling correction. In Proc. 34th Annual Meeting of the Association for Computational Linguistics, Santa Cruz, CA.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Exploiting diversity in natural language processing: combining parsers",
                "authors": [
                    {
                        "first": "J",
                        "middle": [
                            "C"
                        ],
                        "last": "Henderson",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Brill",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "1999 Joint Sigdat Conference on Empirical Methods in Natural Language Processing and Very Large Corpora. ACL",
                "volume": "",
                "issue": "",
                "pages": "187--194",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Henderson, J. C. and Brill, E . (1999). Exploiting diversity in natural language processing: combining parsers. In 1999 Joint Sigdat Conference on Empirical Methods in Natural Language Processing and Very Large Corpora. ACL, New Brunswick NJ. 187-194.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Contextual spelling correction using latent semantic analysis",
                "authors": [
                    {
                        "first": "M",
                        "middle": [
                            "P"
                        ],
                        "last": "Jones",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [
                            "H"
                        ],
                        "last": "Martin",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jones, M. P. and Martin, J. H. (1997). Contextual spelling correction using latent semantic analysis.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Proc. 5th Conference on Applied Natural Language Processing",
                "authors": [],
                "year": null,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "In Proc. 5th Conference on Applied Natural Language Processing, Washington, DC.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Heterogeneous uncertainty sampling",
                "authors": [
                    {
                        "first": "D",
                        "middle": [
                            "D"
                        ],
                        "last": "Lewis",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Catlett",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Proceedings of the Eleventh International Conference on Machine Learning",
                "volume": "",
                "issue": "",
                "pages": "148--156",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Lewis , D. D., & Catlett, J. (1994). Heterogeneous uncertainty sampling. Proceedings of the Eleventh International Conference on Machine Learning (pp. 148--156). New Brunswick, NJ: Morgan Kaufmann.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Automatic rule acquisition for spelling correction",
                "authors": [
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Mangu",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Brill",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "Proc. 14th International Conference on Machine Learning",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Mangu, L. and Brill, E. (1997). Automatic rule acquisition for spelling correction. In Proc. 14th International Conference on Machine Learning. Morgan Kaufmann.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Tagging English text with a probabilistic model",
                "authors": [
                    {
                        "first": "B",
                        "middle": [],
                        "last": "Merialdo",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Computational Linguistics",
                "volume": "20",
                "issue": "2",
                "pages": "155--172",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Merialdo, B. (1994). Tagging English text with a probabilistic model. Computational Linguistics, 20(2):155--172.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "The role of unlabeled data in supervised learning",
                "authors": [
                    {
                        "first": "T",
                        "middle": [
                            "M"
                        ],
                        "last": "Mitchell",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Proceedings of the Sixth International Colloquium on Cognitive Science",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Mitchell, T. M. (1999), The role of unlabeled data in supervised learning, in Proceedings of the Sixth International Colloquium on Cognitive Science, San Sebastian, Spain.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "Learning to classify text from labeled and unlabeled documents",
                "authors": [
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Nigam",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Mccallum",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Thrun",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Mitchell",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the Fifteenth National Conference on Artificial Intelligence",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Nigam, N., McCallum, A., Thrun, S., and Mitchell, T. (1998). Learning to classify text from labeled and unlabeled documents. In Proceedings of the Fifteenth National Conference on Artificial Intelligence. AAAI Press..",
                "links": null
            },
            "BIBREF17": {
                "ref_id": "b17",
                "title": "A simple approach to building ensembles of naive bayesian classifiers for word sense disambiguation",
                "authors": [
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Pedersen",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the First Meeting of the North American Chapter of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Pedersen, T. (2000). A simple approach to building ensembles of naive bayesian classifiers for word sense disambiguation. In Proceedings of the First Meeting of the North American Chapter of the Association for Computational Linguistics May 1- 3, 2000, Seattle, WA",
                "links": null
            },
            "BIBREF18": {
                "ref_id": "b18",
                "title": "Learning and application of differential grammars",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Powers",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "Proc. Meeting of the ACL Special Interest Group in Natural Language Learning",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Powers, D. (1997). Learning and application of differential grammars. In Proc. Meeting of the ACL Special Interest Group in Natural Language Learning, Madrid.",
                "links": null
            },
            "BIBREF19": {
                "ref_id": "b19",
                "title": "Improving data driven wordclass tagging by system combination",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Van Halteren",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Zavrel",
                        "suffix": ""
                    },
                    {
                        "first": "W",
                        "middle": [],
                        "last": "Daelemans",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "COLING-ACL'98",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "van Halteren, H. Zavrel, J. and Daelemans, W. (1998). Improving data driven wordclass tagging by system combination. In COLING-ACL'98, pages 491497, Montreal, Canada.",
                "links": null
            },
            "BIBREF20": {
                "ref_id": "b20",
                "title": "Efficient lattice representation and generation",
                "authors": [
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Weng",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Stolcke",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Sankar",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proc. Intl. Conf. on Spoken Language Processing",
                "volume": "6",
                "issue": "",
                "pages": "2531--2534",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Weng, F., Stolcke, A, & Sankar, A (1998). Efficient lattice representation and generation. Proc. Intl. Conf. on Spoken Language Processing, vol. 6, pp. 2531-2534. Sydney, Australia.",
                "links": null
            },
            "BIBREF21": {
                "ref_id": "b21",
                "title": "Decision lists for lexical ambiguity resolution: Application to accent restoration in Spanish and French",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Yarowsky",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Proc. 32nd Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Yarowsky, D. (1994). Decision lists for lexical ambiguity resolution: Application to accent restoration in Spanish and French. In Proc. 32nd Annual Meeting of the Association for Computational Linguistics, Las Cruces, NM.",
                "links": null
            },
            "BIBREF22": {
                "ref_id": "b22",
                "title": "Unsupervised word sense disambiguation rivaling supervised methods",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Yarowsky",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Proceedings of the 33rd Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "189--196",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Yarowsky, D. (1995) Unsupervised word sense disambiguation rivaling supervised methods. In Proceedings of the 33rd Annual Meeting of the Association for Computational Linguistics. Cambridge, MA, pp. 189-196, 1995.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "text": "Learning Curves for Confusion Set Disambiguation",
                "num": null,
                "type_str": "figure",
                "uris": null
            },
            "FIGREF1": {
                "text": "Representation Size vs. Training Corpus Size",
                "num": null,
                "type_str": "figure",
                "uris": null
            },
            "FIGREF2": {
                "text": "Figure 3. Voting Among Classifiers",
                "num": null,
                "type_str": "figure",
                "uris": null
            },
            "FIGREF3": {
                "text": "Active Learning with Large Corpora manually labeled.",
                "num": null,
                "type_str": "figure",
                "uris": null
            },
            "TABREF0": {
                "content": "<table><tr><td/><td>1.00</td><td/><td/><td/></tr><tr><td/><td>0.95</td><td/><td/><td/></tr><tr><td/><td>0.90</td><td/><td/><td/></tr><tr><td>Test Accuracy</td><td>0.85</td><td/><td/><td/></tr><tr><td/><td>0.80</td><td/><td/><td/></tr><tr><td/><td/><td/><td/><td colspan=\"2\">Memory-Based</td></tr><tr><td/><td>0.75</td><td/><td/><td>Winnow</td></tr><tr><td/><td/><td/><td/><td colspan=\"2\">Perceptron</td></tr><tr><td/><td/><td/><td/><td colspan=\"2\">Na\u00efve Bayes</td></tr><tr><td/><td>0.70</td><td/><td/><td/></tr><tr><td/><td>0.1</td><td>1</td><td>10</td><td>100</td><td>1000</td></tr><tr><td/><td/><td/><td>Millions of Words</td><td/></tr></table>",
                "type_str": "table",
                "text": "Thanks to Dan Roth for making both Winnow and Perceptron available.speech. The memory-based learner used only the word before and word after as features.",
                "num": null,
                "html": null
            }
        }
    }
}