File size: 70,327 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
{
    "paper_id": "P06-1032",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T09:26:21.419958Z"
    },
    "title": "Correcting ESL Errors Using Phrasal SMT Techniques",
    "authors": [
        {
            "first": "Chris",
            "middle": [],
            "last": "Brockett",
            "suffix": "",
            "affiliation": {
                "laboratory": "Natural Language Processing Group Microsoft Research One Microsoft Way",
                "institution": "",
                "location": {
                    "postCode": "98005",
                    "settlement": "Redmond",
                    "region": "WA",
                    "country": "USA"
                }
            },
            "email": "chrisbkt@microsoft.com"
        },
        {
            "first": "William",
            "middle": [
                "B"
            ],
            "last": "Dolan",
            "suffix": "",
            "affiliation": {
                "laboratory": "Natural Language Processing Group Microsoft Research One Microsoft Way",
                "institution": "",
                "location": {
                    "postCode": "98005",
                    "settlement": "Redmond",
                    "region": "WA",
                    "country": "USA"
                }
            },
            "email": ""
        },
        {
            "first": "Michael",
            "middle": [],
            "last": "Gamon",
            "suffix": "",
            "affiliation": {
                "laboratory": "Natural Language Processing Group Microsoft Research One Microsoft Way",
                "institution": "",
                "location": {
                    "postCode": "98005",
                    "settlement": "Redmond",
                    "region": "WA",
                    "country": "USA"
                }
            },
            "email": "mgamon@microsoft.com"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "This paper presents a pilot study of the use of phrasal Statistical Machine Translation (SMT) techniques to identify and correct writing errors made by learners of English as a Second Language (ESL). Using examples of mass noun errors found in the Chinese Learner Error Corpus (CLEC) to guide creation of an engineered training set, we show that application of the SMT paradigm can capture errors not well addressed by widely-used proofing tools designed for native speakers. Our system was able to correct 61.81% of mistakes in a set of naturallyoccurring examples of mass noun errors found on the World Wide Web, suggesting that efforts to collect alignable corpora of pre-and post-editing ESL writing samples offer can enable the development of SMT-based writing assistance tools capable of repairing many of the complex syntactic and lexical problems found in the writing of ESL learners.",
    "pdf_parse": {
        "paper_id": "P06-1032",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "This paper presents a pilot study of the use of phrasal Statistical Machine Translation (SMT) techniques to identify and correct writing errors made by learners of English as a Second Language (ESL). Using examples of mass noun errors found in the Chinese Learner Error Corpus (CLEC) to guide creation of an engineered training set, we show that application of the SMT paradigm can capture errors not well addressed by widely-used proofing tools designed for native speakers. Our system was able to correct 61.81% of mistakes in a set of naturallyoccurring examples of mass noun errors found on the World Wide Web, suggesting that efforts to collect alignable corpora of pre-and post-editing ESL writing samples offer can enable the development of SMT-based writing assistance tools capable of repairing many of the complex syntactic and lexical problems found in the writing of ESL learners.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Every day, in schools, universities and businesses around the world, in email and on blogs and websites, people create texts in languages that are not their own, most notably English. Yet, for writers of English as a Second Language (ESL), useful editorial assistance geared to their needs is surprisingly hard to come by. Grammar checkers such as that provided in Microsoft Word have been designed primarily with native speakers in mind. Moreover, despite growing demand for ESL proofing tools, there has been remarkably little progress in this area over the last decade. Research into computer feedback for ESL writers remains largely focused on smallscale pedagogical systems implemented within the framework of CALL (Computer Aided Language Learning) (Reuer 2003; Vanderventer Faltin, 2003) , while commercial ESL grammar checkers remain brittle and difficult to customize to meet the needs of ESL writers of different first-language (L1) backgrounds and skill levels.",
                "cite_spans": [
                    {
                        "start": 755,
                        "end": 767,
                        "text": "(Reuer 2003;",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 768,
                        "end": 794,
                        "text": "Vanderventer Faltin, 2003)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Some researchers have begun to apply statistical techniques to identify learner errors in the context of essay evaluation (Chodorow & Leacock, 2000; Lonsdale & Strong-Krause, 2003) , to detect non-native text (Tomokiyo & Jones, 2001) , and to support lexical selection by ESL learners through first-language translation (Liu et al., 2000) . However, none of this work appears to directly address the more general problem of how to robustly provide feedback to ESL writers-and for that matter non-native writers in any second language-in a way that is easily tailored to different L1 backgrounds and secondlanguage (L2) skill levels.",
                "cite_spans": [
                    {
                        "start": 122,
                        "end": 148,
                        "text": "(Chodorow & Leacock, 2000;",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 149,
                        "end": 180,
                        "text": "Lonsdale & Strong-Krause, 2003)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 209,
                        "end": 233,
                        "text": "(Tomokiyo & Jones, 2001)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 320,
                        "end": 338,
                        "text": "(Liu et al., 2000)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In this paper, we show that a noisy channel model instantiated within the paradigm of Statistical Machine Translation (SMT) (Brown et al., 1993) can successfully provide editorial assistance for non-native writers. In particular, the SMT approach provides a natural mechanism for suggesting a correction, rather than simply stranding the user with a flag indicating that the text contains an error. Section 2 further motivates the approach and briefly describes our SMT system. Section 3 discusses the data used in our experiment, which is aimed at repairing a common type of ESL error that is not well-handled by current grammar checking technology: mass/count noun confusions. Section 4 presents experimental results, along with an analysis of errors produced by the system. Finally we present discussion and some future directions for investigation.",
                "cite_spans": [
                    {
                        "start": 124,
                        "end": 144,
                        "text": "(Brown et al., 1993)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "A major difficulty for ESL proofing is that errors of grammar, lexical choice, idiomaticity, and style rarely occur in isolation. Instead, any given sentence produced by an ESL learner may involve a complex combination of all these error types. It is difficult enough to design a proofing tool that can reliably correct individual errors; the simultaneous combination of multiple errors is beyond the capabilities of current proofing tools designed for native speakers. Consider the following example, written by a Korean speaker and found on the World Wide Web, which involves the misapplication of countability to a mass noun:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Beyond Grammar Checking",
                "sec_num": "2.1"
            },
            {
                "text": "And I knew many informations about Christmas while I was preparing this article.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Beyond Grammar Checking",
                "sec_num": "2.1"
            },
            {
                "text": "The grammar and spelling checkers in Microsoft Word 2003 correctly suggest many much and informations information. Accepting these proposed changes, however, does not render the sentence entirely native-like. Substituting the word much for many leaves the sentence stilted in a way that is probably undetectable to an inexperienced non-native speaker, while the use of the word knew represents a lexical selection error that falls well outside the scope of conventional proofing tools. A better rewrite might be:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Beyond Grammar Checking",
                "sec_num": "2.1"
            },
            {
                "text": "And I learned a lot of information about Christmas while I was preparing this article.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Beyond Grammar Checking",
                "sec_num": "2.1"
            },
            {
                "text": "or, even more colloquially:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Beyond Grammar Checking",
                "sec_num": "2.1"
            },
            {
                "text": "And I learned a lot about Christmas while I was preparing this article",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Beyond Grammar Checking",
                "sec_num": "2.1"
            },
            {
                "text": "Repairing the error in the original sentence, then, is not a simple matter of fixing an agreement marker or substituting one determiner for another. Instead, wholesale replacement of the phrase knew many informations with the phrase learned a lot is needed to produce idiomatic-sounding output. Seen in these terms, the process of mapping from a raw, ESLauthored string to its colloquial equivalent looks remarkably like translation. Our goal is to show that providing editorial assistance for writers should be viewed as a special case of translation. Rather than learning how strings in one language map to strings in another, however, \"translation\" now involves learning how systematic patterns of errors in ESL learners' English map to corresponding patterns in native English",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Beyond Grammar Checking",
                "sec_num": "2.1"
            },
            {
                "text": "If ESL error correction is seen as a translation task, the task can be treated as an SMT problem using the noisy channel model of (Brown et al., 1993) : here the L2 sentence produced by the learner can be regarded as having been corrupted by noise in the form of interference from his or her L1 model and incomplete language models internalized during language learning. The task, then, is to reconstruct a corresponding valid sentence of L2 (target). Accordingly, we can seek to probabilistically identify the optimal correct target sentence(s) T* of an ESL input sentence S by applying the familiar SMT formula:",
                "cite_spans": [
                    {
                        "start": 130,
                        "end": 150,
                        "text": "(Brown et al., 1993)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Noisy Channel Model of ESL Errors",
                "sec_num": "2.2"
            },
            {
                "text": "( ) { } { } ) P( ) | P( max arg | P max arg * T T S S T T T T = =",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Noisy Channel Model of ESL Errors",
                "sec_num": "2.2"
            },
            {
                "text": "In the context of this model, editorial assistance becomes a matter of identifying those segments of the optimal target sentence or sentences that differ from the writer's original input and displaying them to the user. In practice, the patterns of errors produced by ESL writers of specific L1 backgrounds can be captured in the channel model as an emergent property of training data consisting ESL sentences aligned with their corrected edited counterparts. The highest frequency errors and infelicities should emerge as targets for replacement, while lesser frequency or idiosyncratic problems will in general not surface as false flags.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Noisy Channel Model of ESL Errors",
                "sec_num": "2.2"
            },
            {
                "text": "In this paper, we explore the use of a large-scale production statistical machine translation system to correct a class of ESL errors. A detailed description of the system can be found in and . In keeping with current best practices in SMT, our system is a phrasal machine translation system that attempts to learn mappings between \"phrases\" (which may not correspond to linguistic units) rather than individual words. What distinguishes this system from other phrasal SMT systems is that rather than aligning simple sequences of words, it maps small phrasal \"treelets\" generated by a dependency parse to corresponding strings in the target. This \"Tree-To-String\" model holds promise in that it allows us to potentially benefit from being able to access a certain amount of structural information during translation, without necessarily being completely tied to the need for a fully-well-formed linguistic analysis of the input-an important consideration when it is sought to handle ungrammatical or otherwise illformed ESL input, but also simultaneously to capture relationships not involving contiguous strings, for example determiner-noun relations.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Implementation",
                "sec_num": "2.3"
            },
            {
                "text": "In our pilot study, this system was employed without modification to the system architecture. The sole adjustment made was to have both Source (erroneous) and Target (correct) sentences tokenized using an English language tokenizer. N-best results for phrasal alignment and ordering models in the decoder were optimized by lambda training via Maximum Bleu, along the lines described in (Och, 2003) .",
                "cite_spans": [
                    {
                        "start": 386,
                        "end": 397,
                        "text": "(Och, 2003)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Implementation",
                "sec_num": "2.3"
            },
            {
                "text": "In this paper, we focus on countability errors associated with mass nouns. This class of errors (involving nouns that cannot be counted, such as information, pollution, and homework) is characteristically encountered in ESL writing by native speakers of several East Asian languages (Dalgish, 1983; Hua & Lee, 2004) . 1 We began by identifying a list of English nouns that are frequently involved in mass/count errors in by writing by Chinese ESL learners, by taking the intersection of words which:",
                "cite_spans": [
                    {
                        "start": 283,
                        "end": 298,
                        "text": "(Dalgish, 1983;",
                        "ref_id": null
                    },
                    {
                        "start": 299,
                        "end": 315,
                        "text": "Hua & Lee, 2004)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 318,
                        "end": 319,
                        "text": "1",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Identifying Mass Nouns",
                "sec_num": "3.1"
            },
            {
                "text": "\u2022 occurred in either the Longman Dictionary of Contemporary English or the American Heritage Dictionary with a mass sense",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Identifying Mass Nouns",
                "sec_num": "3.1"
            },
            {
                "text": "\u2022 were involved in n \u2265 2 mass/count errors in the Chinese Learner English Corpus CLEC (Gui and Yang, 2003) , either tagged as a mass noun error or else with an adjacent tag indicating an article error. 2",
                "cite_spans": [
                    {
                        "start": 86,
                        "end": 106,
                        "text": "(Gui and Yang, 2003)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Identifying Mass Nouns",
                "sec_num": "3.1"
            },
            {
                "text": "This procedure yielded a list of 14 words: knowledge, food, homework, fruit, news, color, nutrition, equipment, paper, advice, haste, information, lunch, and tea. 3 Countability errors involving these words are scattered across 46 sentences in the CLEC corpus.",
                "cite_spans": [
                    {
                        "start": 54,
                        "end": 164,
                        "text": "food, homework, fruit, news, color, nutrition, equipment, paper, advice, haste, information, lunch, and tea. 3",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Identifying Mass Nouns",
                "sec_num": "3.1"
            },
            {
                "text": "For a baseline representing the level of writing assistance currently available to the average ESL writer, we submitted these sentences to the proofing tools in Microsoft Word 2003. The spelling and grammar checkers correctly identified 21 of the 46 relevant errors, proposed one incorrect substitution (a few advice a few advices), and failed to flag the remaining 25 errors. With one exception, the proofing tools successfully detected as spelling errors incorrect plurals on lexical items that permit only mass noun interpretations (e.g., informations), but ignored plural forms like fruits and papers even when contextually inappropriate. The proofing tools in Word 2003 also detected singular determiner mismatches with obligatory plural forms (e.g. a news).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Identifying Mass Nouns",
                "sec_num": "3.1"
            },
            {
                "text": "The errors identified in these sentences provided an informal template for engineering the data in our training set, which was created by manipulating well-formed, edited English sentences. Raw data came from a corpus of ~484.6 million words of Reuters Limited newswire articles, released between 1995 and 1998, combined with a ~7,175,000-word collection of articles from multiple news sources from 2004-2005. The resulting dataset was large enough to ensure that all targeted forms occurred with some frequency. From this dataset we culled about 346,000 sentences containing examples of the 14 targeted words. We then used hand-constructed regular expressions to convert these sentences into mostly-ungrammatical strings that exhibited characteristics of the CLEC data, for example:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": "3.2"
            },
            {
                "text": "\u2022 much many: much advice many advice",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": "3.2"
            },
            {
                "text": "\u2022 some a/an: some advice an advice",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": "3.2"
            },
            {
                "text": "\u2022 conversions to plurals: much good advice many good advices",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": "3.2"
            },
            {
                "text": "\u2022 deletion of counters: piece(s)/ item(s)/sheet(s) of)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": "3.2"
            },
            {
                "text": "\u2022 insertion of determiners These were produced in multiple combinations for broad coverage, for example: I'm not trying to give you legal advice.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": "3.2"
            },
            {
                "text": "\u2022 I'm not trying to give you a legal advice.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": "3.2"
            },
            {
                "text": "\u2022 I'm not trying to give you the legal advice.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": "3.2"
            },
            {
                "text": "\u2022 I'm not trying to give you the legal advices.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": "3.2"
            },
            {
                "text": "A total of 24128 sentences from the news data were \"lesioned\" in this manner to create a set of 65826 sentence pairs. To create a balanced training set that would not introduce too many artifacts of the substitution (e.g., many should not always be recast as much just because that is the only mapping observed in the training data), we randomly created an equivalent number of identity-mapped pairs from the 346,000 examples, with each sentence mapping to itself.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": "3.2"
            },
            {
                "text": "Training sets of various sizes up to 45,000 pairs were then randomly extracted from the lesioned and non-lesioned pairs so that data from both sets occurred in roughly equal proportions. Thus the 45K data set contains approximately 22,500 lesioned examples. An additional 1,000 randomly selected lesioned sentences were set aside for lambda training the SMT system's ordering and replacement models.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": "3.2"
            },
            {
                "text": "The amount of tagged data in CLEC is too small to yield both development and test sets from the same data. In order to create a test set, we had a third party collect 150 examples of the 14 words from English websites in China. After minor cleanup to eliminate sentences irrelevant to the task, 4 we ended up with 123 example sentences to use as test set. The test examples vary widely in style, from the highly casual to more formal public announcements. Thirteen examples were determined to contain no errors relevant to our experiment, but were retained in the data. 5 Table 1 shows per-sentence results of translating the test set on systems built with training data sets of various sizes (given in thousands of sentence pairs). Numbers for the proofing tools in Word 2003 are presented by way of comparison, with the caveat that these tools have been intentionally implemented conservatively so as not to potentially irritate native users with false flags. For our purposes, a replacement string is viewed as correct if, in the view of a native speaker who might be helping an ESL writer, the replacement would appear more natural and hence potentially useful as a suggestion in the context of that sentence taken in isolation. Number disagreement on subject and verb were ignored for the purposes of this evaluation, since these errors were not modeled when we introduced lesions into the data. A correction counted as Whole if the system produced a contextually plausible substitution meeting two criteria: 1) number and 2) determiner/quantifier selection (e.g., many informations much information). Transformations involving bare singular targets (e.g., the fruits fruit) also counted as Whole. Partial corrections are those where only one of the two criteria was met and part of the desired correction was missing (e.g., an 4 In addition to eliminating cases that only involved subject-verb number agreement, we excluded a small amount of spam-like word salad, several instances of the word homework being misused to mean \"work done out of the home\", and one misidentified quotation from Scott's Ivanhoe. 5 This test set may be downloaded at http://research.microsoft.com/research/downloads an equipment versus the targeted bare noun equipment). Incorrect substitutions and newly injected erroneous material anywhere in the sentence counted as New Errors, even if the proposed replacement were otherwise correct. However, changes in upper and lower case and punctuation were ignored.",
                "cite_spans": [
                    {
                        "start": 1833,
                        "end": 1834,
                        "text": "4",
                        "ref_id": null
                    },
                    {
                        "start": 2114,
                        "end": 2115,
                        "text": "5",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 572,
                        "end": 579,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Test Data",
                "sec_num": "4.1"
            },
            {
                "text": "The 55.28% per-sentence score for Whole matches in the system trained on the 45K data set means that it correctly proposed full corrections in 61.8% of locations where corrections needed to be made. The percentage of Missed errors, i.e., targeted errors that were ignored by the system, is correspondingly low. On the 45K training data set, the system performs nearly on a par with Word in terms of not inducing corrections on forms that did not require replacement, as shown in the Correctly Left column. The dip in accuracy in the 30K sentence pair training set is an artifact of our extraction methodology: the relatively small lexical set that we are addressing here appears to be oversensitive to random variation in the engineered training data. This makes it difficult to set a meaningful lower bound on the amount of training data that might be needed for adequate coverage. Nonetheless, it is evident from the table, that given sufficient data, SMT techniques can successfully offer corrections for a significant percentage of cases of the phenomena in question. Table 2 shows some sample inputs together with successful corrections made by the system. Table 3 illustrates a case where two valid corrections are found in the 5-best ranked translations; intervening candidates were identical with the top-ranked candidate. Table 1 also indicates that errors associated with the SMT system itself are encouragingly few. A small number of errors in word order were found, one of which resulted in a severely garbled sentence in the 45K data set. In general, the percentage of this type of error declines consistently with growth of the training data size. Linearity of the training data may play a role, since the sentence pairs differ by only a few words. On the whole, however, we expect the system's order model to benefit from more training data.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 1072,
                        "end": 1079,
                        "text": "Table 2",
                        "ref_id": "TABREF2"
                    },
                    {
                        "start": 1162,
                        "end": 1169,
                        "text": "Table 3",
                        "ref_id": null
                    },
                    {
                        "start": 1331,
                        "end": 1338,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "4.2"
            },
            {
                "text": "The most frequent single class of newly introduced error relates to sporadic substitution of the word their for determiners a/the. This is associated with three words, lunch, tea, and haste, and is the principal contributor to the lower percentages in the Correctly Left bin, as compared with Word. This overgeneralization error reflects our attempt to engineer the discontinuous mapping the X of them their X, motivated by examples like the following, encountered in the CLEC dataset:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Error Analysis",
                "sec_num": "4.3"
            },
            {
                "text": "Shanghai residents can buy the fruits for a cheaper price than before.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Input",
                "sec_num": null
            },
            {
                "text": "Shanghai residents can buy fruit for a cheaper price than before .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Replacement",
                "sec_num": null
            },
            {
                "text": "Thank u for giving me so many advice.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Input",
                "sec_num": null
            },
            {
                "text": "Replacement thank u for giving me so much advice . The inability of our translation system to handle such discontinuities in a unitary manner reflects the limited ability of current SMT modeling techniques to capture long-distance effects. Similar alternations are rife in bilingual data, e.g., ne\u2026pas in French (Fox, 2002) and separable prefixes in German (Collins et al. 2005) . As SMT models become more adept at modeling long-distance effects in a principled manner, monolingual proofing will benefit as well. The Missed category is heterogeneous. The SMT system has an inherent bias against deletion, with the result that unwanted determiners tended not to be deleted, especially in the smaller training sets.",
                "cite_spans": [
                    {
                        "start": 312,
                        "end": 323,
                        "text": "(Fox, 2002)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 357,
                        "end": 378,
                        "text": "(Collins et al. 2005)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Input",
                "sec_num": null
            },
            {
                "text": "Other errors related to coverage in the development data set. Several occurrences of greengrocer's apostrophes (tea's, equipment's) caused correction failures: these were not anticipated when engineering the training data. Likewise, the test data presented several malformed quantifiers and quantifier-like phrases (plenty tea plenty of tea, a lot information a lot of information, few information too little information) that had been unattested in the development set. Examples such as these highlight the difficulty in obtaining complete coverage when using handcrafted techniques, whether to engineer errors, as in our case, or to handcraft targeted correction solutions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Input",
                "sec_num": null
            },
            {
                "text": "The system performed poorly on words that commonly present both mass and count noun senses in ways that are apt to confuse L2 writers. One problematic case was paper. The following sentences, for example, remained uncorrected:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Input",
                "sec_num": null
            },
            {
                "text": "He published many paper in provincial and national publication.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Input",
                "sec_num": null
            },
            {
                "text": "He has published thirty-two pieces of papers.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Input",
                "sec_num": null
            },
            {
                "text": "Large amounts of additional training data would doubtless be helpful in providing contextual resolutions to the problems. Improved alignment models may also play a role here in capturing complex structures of the kind represented by constructions involving counters.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Input",
                "sec_num": null
            },
            {
                "text": "The artificially-engineered training data that we relied on for our experiments proved surprisingly useful in modeling real errors made by nonnative speakers. However, this is obviously a less than ideal data source, since the errors introduced by regular expressions are homogenously distributed in a way that naturally-occurring errors are not, creating artifacts that undoubtedly impair our SMT models.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "Artificial data of this sort may be useful as proof of concept, but hand engineering such data plainly does not present a viable path to developing real world applications. In order to be able to handle the rich panoply of errors and error interactions encountered in the text of second language learners large quantities of naturallyoccurring \"before\" and \"after\" texts will need to be collected. By way of illustration, Table 4 shows the output of results of \"translating\" our test data into more natural English by hand and dumping the pre-and post-editing pairs to the 45K training set. 6 Although we were unable to exactly recover the target sentences, inspection showed that 25 sentences had improved, some significantly, as Table 4 shows. Under the right conditions, the SMT system can capture contextual morphological alternations (nutrition/nutritious), together with complex mappings represented by the dependencies learn \uf0df knowledge \uf0df many (ESL) and 6 Since a single example of each pair was insufficient to override the system's inherent bias towards unigram mappings, 5 copies of each pair were appended to the training data.",
                "cite_spans": [
                    {
                        "start": 961,
                        "end": 962,
                        "text": "6",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 422,
                        "end": 429,
                        "text": "Table 4",
                        "ref_id": null
                    },
                    {
                        "start": 731,
                        "end": 738,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "And we can learn many knowledge or new information from TV Candidate 1: And we can learn much knowledge or new information from TV",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Input:",
                "sec_num": null
            },
            {
                "text": "And we can learn a lot of knowledge or new information from TV Table 3 . Multiple replacement candidates generated by 45K training set gain \uf0df knowledge \uf0df a lot of (English). In a rule-based correction system, an immense amount of hand-coding would be required to handle even a small subset of the potential range of such mismatches between learner and native-like English. This knowledge, we believe, is best acquired from data.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 63,
                        "end": 70,
                        "text": "Table 3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Candidate 5:",
                "sec_num": null
            },
            {
                "text": "Given a sufficiently large corpus of aligned sentences containing error patterns produced by ESL writers of the same L1 background and their corrected counterparts we expect eventually to be able to capture the rich complexity of non-native error within a noisy-channel based SMT model.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Need for Data Collection",
                "sec_num": "5.1"
            },
            {
                "text": "As a practical matter, however, parallel data of the kind needed is far from easy to come by. This does not mean, however, that such data does not exist. The void left by commercial grammar checkers is filled, largely unobserved, by a number of services that provide editorial assistance, ranging from foreign language teachers, to language helpdesks in multinational corporations, to mentoring services for conferences. Translation bureaus frequently offer editing services for nonnative speakers. Yet, unlike translation, the \"before\" and \"after\" texts are rarely recycled in a form that can be used to build translation models. Although collecting this data will involve a large investment in time, effort, and infrastructure, a serious effort along these lines is likely to prove fruitful in terms of making it possible to apply the SMT paradigm to ESL error correction.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Need for Data Collection",
                "sec_num": "5.1"
            },
            {
                "text": "One challenge faced by the SMT model is the extremely high quality that will need to be attained before a system might be usable. Since it is highly undesirable that learners should be presented with inaccurate feedback that they may not have the experience or knowledge to assess, the quality bar imposed on error correction is far higher than is that tolerated in machine translation. Exploration of error correction and writing assistance using SMT models may thus prove an important venue for testing new SMT models.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Feedback to SMT",
                "sec_num": "5.2"
            },
            {
                "text": "Statistical Machine Translation has provided a hugely successful research paradigm within the field of natural language processing over the last decade. One of the major advantages of using SMT in ESL writing assistance is that it can be expected to benefit automatically from any progress made in SMT itself. In fact, the approach presented here benefits from all the advantages of statistical machine translation. Since the architecture is not dependent on hard-to-maintain rules or regular expressions, little or no linguistic expertise will be required in developing and maintain applications. As with SMT, this expertise is pushed into the data component, to be handled by instructors and editors, who do not need programming or scripting skills.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Advantages of the SMT Approach",
                "sec_num": "5.3"
            },
            {
                "text": "We expect it to be possible, moreover, once parallel data becomes available, to quickly ramp up new systems to accommodate the needs of",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Advantages of the SMT Approach",
                "sec_num": "5.3"
            },
            {
                "text": "And we can learn many knowledge or new information from TV.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Input sentence",
                "sec_num": null
            },
            {
                "text": "and we can learn much knowledge or new information from TV .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "45K system output",
                "sec_num": null
            },
            {
                "text": "we can gain a lot of knowledge or new information from TV .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "45K + translation system output",
                "sec_num": null
            },
            {
                "text": "The following is one of the homework for last week.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Input sentence",
                "sec_num": null
            },
            {
                "text": "45K system output the following is one of their homework for last week .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Input sentence",
                "sec_num": null
            },
            {
                "text": "45K + translation system output the following is one of the homework assignments for last week .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Input sentence",
                "sec_num": null
            },
            {
                "text": "i like mushroom,its very nutrition 45K system output i like mushroom , its very nutrition 45K + translation system output i like mushroom , its very nutritious Table 4 . Contextual corrections before and after adding \"translations\" to 45K training data learners with different first-language backgrounds and different skill levels and to writing assistance for learners of L2s other than English. It is also likely that this architecture may have applications in pedagogical environments and as a tool to assist editors and instructors who deal regularly with ESL texts, much in the manner of either Human Assisted Machine Translation or Machine Assisted Human Translation. We also believe that this same architecture could be extended naturally to provide grammar and style tools for native writers.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 160,
                        "end": 167,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Input sentence",
                "sec_num": null
            },
            {
                "text": "In this pilot study we have shown that SMT techniques have potential to provide error correction and stylistic writing assistance to L2 learners. The next step will be to obtain a large dataset of pre-and post-editing ESL text with which to train a model that does not rely on engineered data. A major purpose of the present study has been to determine whether our hypothesis is robust enough to warrant the cost and effort of a collection or data creation effort.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Directions",
                "sec_num": "6"
            },
            {
                "text": "Although we anticipate that it will take a significant lead time to assemble the necessary aligned data, once a sufficiently large corpus is in hand, we expect to begin exploring ways to improve our SMT system by tailoring it more specifically to the demands of editorial assistance. In particular, we expect to be looking into alternative word alignment models and possibly enhancing our system's decoder using some of the richer, more structured language models that are beginning to emerge.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Directions",
                "sec_num": "6"
            },
            {
                "text": "These constructions are also problematic for handcrafted MT systems(Bond et al., 1994). 2 CLEC tagging is not comprehensive; some common mass noun errors (e.g., make a good progress) are not tagged in this corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "Terms that also had a function word sense, such as will, were eliminated for this experiment.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "The authors have benefited extensively from discussions with Casey Whitelaw when he interned at Microsoft Research during the summer of 2005. We also thank the Butler Hill Group for collecting the examples in our test set.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Countability and Number in Japanese-to-English Machine Translation. COLING-94",
                "authors": [
                    {
                        "first": "Francis",
                        "middle": [],
                        "last": "Bond",
                        "suffix": ""
                    },
                    {
                        "first": "Kentaro",
                        "middle": [],
                        "last": "Ogura",
                        "suffix": ""
                    },
                    {
                        "first": "Satoru",
                        "middle": [],
                        "last": "Ikehara",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Bond, Francis, Kentaro Ogura and Satoru Ikehara. 1994. Countability and Number in Japanese-to- English Machine Translation. COLING-94.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "The Mathematics of Statistical Machine Translation",
                "authors": [
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Peter",
                        "suffix": ""
                    },
                    {
                        "first": "Stephen",
                        "middle": [
                            "A Della"
                        ],
                        "last": "Brown",
                        "suffix": ""
                    },
                    {
                        "first": "Robert",
                        "middle": [
                            "L"
                        ],
                        "last": "Pietra",
                        "suffix": ""
                    },
                    {
                        "first": "Vincent",
                        "middle": [
                            "J Della"
                        ],
                        "last": "Mercer",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Pietra",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Computational Linguistics",
                "volume": "19",
                "issue": "2",
                "pages": "263--311",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Peter E Brown, Stephen A. Della Pietra, Robert L. Mercer, and Vincent J. Della Pietra. 1993. The Mathematics of Statistical Machine Translation. Computational Linguistics, Vol. 19(2): 263-311.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "An Unsupervised Method for Detecting Grammatical Errors",
                "authors": [
                    {
                        "first": "Martin",
                        "middle": [],
                        "last": "Chodorow",
                        "suffix": ""
                    },
                    {
                        "first": "Claudia",
                        "middle": [],
                        "last": "Leacock",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Martin Chodorow and Claudia Leacock. 2000. An Unsupervised Method for Detecting Grammatical Errors. NAACL 2000.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Clause Restructuring for Statistical machine Translation",
                "authors": [
                    {
                        "first": "Michael",
                        "middle": [],
                        "last": "Collins",
                        "suffix": ""
                    },
                    {
                        "first": "Philipp",
                        "middle": [],
                        "last": "Koehn",
                        "suffix": ""
                    },
                    {
                        "first": "Ivona",
                        "middle": [],
                        "last": "Ku\u010derov\u00e1",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "531--540",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Michael Collins, Philipp Koehn and Ivona Ku\u010derov\u00e1. 2005. Clause Restructuring for Statistical machine Translation. ACL 2005, 531-540.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Phrasal Cohesion and Statistical Machine Translation",
                "authors": [
                    {
                        "first": "Heidi",
                        "middle": [
                            "J"
                        ],
                        "last": "Fox",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "EMNLP",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Heidi J. Fox. 2002. Phrasal Cohesion and Statistical Machine Translation. EMNLP 2002.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Chinese ESL Learners' Understanding of the English Count-Mass Distinction",
                "authors": [
                    {
                        "first": "Hua",
                        "middle": [],
                        "last": "Dongfan",
                        "suffix": ""
                    },
                    {
                        "first": "Thomas Hun-Tak",
                        "middle": [],
                        "last": "Lee",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of the 7th Generative Approaches to Second Language Acquisition Conference",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hua Dongfan and Thomas Hun-Tak Lee. 2004. Chi- nese ESL Learners' Understanding of the English Count-Mass Distinction. In Proceedings of the 7th Generative Approaches to Second Language Ac- quisition Conference (GASLA 2004).",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "PENS: A Machineaided English Writing System for Chinese Users",
                "authors": [
                    {
                        "first": "Ting",
                        "middle": [],
                        "last": "Liu",
                        "suffix": ""
                    },
                    {
                        "first": "Ming",
                        "middle": [],
                        "last": "Zhou",
                        "suffix": ""
                    },
                    {
                        "first": "Jianfeng",
                        "middle": [],
                        "last": "Gao",
                        "suffix": ""
                    },
                    {
                        "first": "Endong",
                        "middle": [],
                        "last": "Xun",
                        "suffix": ""
                    },
                    {
                        "first": "Changning",
                        "middle": [],
                        "last": "Huang",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ting Liu, Ming Zhou, Jianfeng Gao, Endong Xun, and Changning Huang. 2000. PENS: A Machine- aided English Writing System for Chinese Users. ACL 2000.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Automated Rating of ESL Essays",
                "authors": [
                    {
                        "first": "Deryle",
                        "middle": [],
                        "last": "Lonsdale",
                        "suffix": ""
                    },
                    {
                        "first": "Diane",
                        "middle": [],
                        "last": "Strong-Krause",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of the HLT/NAACL Workshop: Building Educational Applications Using Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Deryle Lonsdale and Diane Strong-Krause. 2003. Automated Rating of ESL Essays. In Proceedings of the HLT/NAACL Workshop: Building Educa- tional Applications Using Natural Language Proc- essing.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Microsoft Research Treelet Translation System: IWSLT Evaluation",
                "authors": [
                    {
                        "first": "Arul",
                        "middle": [],
                        "last": "Menezes",
                        "suffix": ""
                    },
                    {
                        "first": "Chris",
                        "middle": [],
                        "last": "Quirk",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Proceedings of the International Workshop on Spoken Language Translation",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Arul Menezes, and Chris Quirk. 2005. Microsoft Re- search Treelet Translation System: IWSLT Evalua- tion. Proceedings of the International Workshop on Spoken Language Translation.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Minimum error rate training in statistical machine translation",
                "authors": [
                    {
                        "first": "Franz Josef",
                        "middle": [],
                        "last": "Och",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Franz Josef Och, 2003. Minimum error rate training in statistical machine translation. ACL 2003.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Improved Statistical Alignment Models",
                "authors": [
                    {
                        "first": "Josef",
                        "middle": [],
                        "last": "Franz",
                        "suffix": ""
                    },
                    {
                        "first": "Hermann",
                        "middle": [],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Ney",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Franz Josef Och and Hermann Ney. 2000. Improved Statistical Alignment Models. ACL 2000.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Dependency Tree Translation: Syntactically Informed Phrasal SMT",
                "authors": [
                    {
                        "first": "Chris",
                        "middle": [],
                        "last": "Quirk",
                        "suffix": ""
                    },
                    {
                        "first": "Arul",
                        "middle": [],
                        "last": "Menezes",
                        "suffix": ""
                    },
                    {
                        "first": "Colin",
                        "middle": [],
                        "last": "Cherry",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Chris Quirk, Arul Menezes, and Colin Cherry. 2005. Dependency Tree Translation: Syntactically In- formed Phrasal SMT. ACL 2005.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Error Recognition and Feedback with Lexical Functional Grammar",
                "authors": [
                    {
                        "first": "",
                        "middle": [],
                        "last": "Veit Reuer",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "CALICO Journal",
                "volume": "20",
                "issue": "3",
                "pages": "497--512",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Veit Reuer. 2003. Error Recognition and Feedback with Lexical Functional Grammar. CALICO Jour- nal, 20(3): 497-512.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "You're not from round here, are you? Naive Bayes Detection of Non-Native Utterance Text",
                "authors": [
                    {
                        "first": "Laura",
                        "middle": [],
                        "last": "Mayfield Tomokiyo",
                        "suffix": ""
                    },
                    {
                        "first": "Rosie",
                        "middle": [],
                        "last": "Jones",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Laura Mayfield Tomokiyo and Rosie Jones. 2001. You're not from round here, are you? Naive Bayes Detection of Non-Native Utterance Text. NAACL 2001.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Natural language processing tools for computer assisted language learning",
                "authors": [
                    {
                        "first": "Anne",
                        "middle": [],
                        "last": "Vandeventer Faltin",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Linguistik online",
                "volume": "17",
                "issue": "03",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Anne Vandeventer Faltin. 2003. Natural language processing tools for computer assisted language learning. Linguistik online 17, 5/03 (http:// www.linguistik-online.de/17_03/vandeventer.html)",
                "links": null
            }
        },
        "ref_entries": {
            "TABREF0": {
                "content": "<table><tr><td>Data Size</td><td>Whole</td><td>Partial</td><td colspan=\"2\">Correctly Left New Error</td><td>Missed</td><td>Word Order Error</td></tr><tr><td>45K</td><td>55.28</td><td>0.81</td><td>8.13</td><td>12.20</td><td>21.14</td><td>1.63</td></tr><tr><td>30K</td><td>36.59</td><td>4.07</td><td>7.32</td><td>16.26</td><td>32.52</td><td>3.25</td></tr><tr><td>15K</td><td>47.15</td><td>2.44</td><td>5.69</td><td>11.38</td><td>29.27</td><td>4.07</td></tr><tr><td>cf. Word</td><td>29.27</td><td>0.81</td><td>10.57</td><td>1.63</td><td>57.72</td><td>N/A</td></tr></table>",
                "text": "Replacement percentages (per sentence basis) using different training data sets equipments",
                "num": null,
                "html": null,
                "type_str": "table"
            },
            "TABREF2": {
                "content": "<table><tr><td>In this equal world, lots of</td></tr><tr><td>people are still concerned</td></tr><tr><td>on the colors of them \u2026</td></tr></table>",
                "text": "Sample corrections, using 45K engineered training data",
                "num": null,
                "html": null,
                "type_str": "table"
            }
        }
    }
}