File size: 75,296 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
{
    "paper_id": "P03-1010",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T09:13:42.125783Z"
    },
    "title": "Reliable Measures for Aligning Japanese-English News Articles and Sentences",
    "authors": [
        {
            "first": "Masao",
            "middle": [],
            "last": "Utiyama",
            "suffix": "",
            "affiliation": {
                "laboratory": "Communications Research Laboratory",
                "institution": "",
                "location": {
                    "addrLine": "3-5 Hikari-dai, Seika-cho, Souraku-gun",
                    "postCode": "619-0289",
                    "settlement": "Kyoto",
                    "country": "Japan"
                }
            },
            "email": "mutiyama@crl.go.jp"
        },
        {
            "first": "Hitoshi",
            "middle": [],
            "last": "Isahara",
            "suffix": "",
            "affiliation": {
                "laboratory": "Communications Research Laboratory",
                "institution": "",
                "location": {
                    "addrLine": "3-5 Hikari-dai, Seika-cho, Souraku-gun",
                    "postCode": "619-0289",
                    "settlement": "Kyoto",
                    "country": "Japan"
                }
            },
            "email": "isahara@crl.go.jp"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "We have aligned Japanese and English news articles and sentences to make a large parallel corpus. We first used a method based on cross-language information retrieval (CLIR) to align the Japanese and English articles and then used a method based on dynamic programming (DP) matching to align the Japanese and English sentences in these articles. However, the results included many incorrect alignments. To remove these, we propose two measures (scores) that evaluate the validity of alignments. The measure for article alignment uses similarities in sentences aligned by DP matching and that for sentence alignment uses similarities in articles aligned by CLIR. They enhance each other to improve the accuracy of alignment. Using these measures, we have successfully constructed a largescale article and sentence alignment corpus available to the public.",
    "pdf_parse": {
        "paper_id": "P03-1010",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "We have aligned Japanese and English news articles and sentences to make a large parallel corpus. We first used a method based on cross-language information retrieval (CLIR) to align the Japanese and English articles and then used a method based on dynamic programming (DP) matching to align the Japanese and English sentences in these articles. However, the results included many incorrect alignments. To remove these, we propose two measures (scores) that evaluate the validity of alignments. The measure for article alignment uses similarities in sentences aligned by DP matching and that for sentence alignment uses similarities in articles aligned by CLIR. They enhance each other to improve the accuracy of alignment. Using these measures, we have successfully constructed a largescale article and sentence alignment corpus available to the public.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "A large-scale Japanese-English parallel corpus is an invaluable resource in the study of natural language processing (NLP) such as machine translation and cross-language information retrieval (CLIR). It is also valuable for language education. However, no such corpus has been available to the public.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We recently have obtained a noisy parallel corpus of Japanese and English newspapers consisting of issues published over more than a decade and have tried to align their articles and sentences. We first aligned the articles using a method based on CLIR (Collier et al., 1998; Matsumoto and Tanaka, 2002) and then aligned the sentences in these articles by using a method based on dynamic programming (DP) matching (Gale and Church, 1993; Utsuro et al., 1994) . However, the results included many incorrect alignments due to noise in the corpus.",
                "cite_spans": [
                    {
                        "start": 253,
                        "end": 275,
                        "text": "(Collier et al., 1998;",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 276,
                        "end": 303,
                        "text": "Matsumoto and Tanaka, 2002)",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 414,
                        "end": 437,
                        "text": "(Gale and Church, 1993;",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 438,
                        "end": 458,
                        "text": "Utsuro et al., 1994)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "To remove these, we propose two measures (scores) that evaluate the validity of article and sentence alignments. Using these, we can selectively extract valid alignments.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In this paper, we first discuss the basic statistics on the Japanese and English newspapers. We next explain methods and measures used for alignment. We then evaluate the effectiveness of the proposed measures. Finally, we show that our aligned corpus has attracted people both inside and outside the NLP community.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The English articles as of mid-July 1996 have tags indicating whether they are translated from Japanese articles or not, though they don't have explicit links to the original Japanese articles. Consequently, we only used the translated English articles for the article alignment. The number of English articles used was 35,318, which is 68 percent of all of the articles. On the other hand, the English articles before mid-July 1996 do not have such tags. So we used all the articles for the period. The number of them was 59,086. We call the set of articles before mid -July 1996 -July \"1989 -July -1996 and call the set of articles after mid -July 1996 -July \"1996 -July -2001 If an English article is a translation of a Japanese article, then the publication date of the Japanese article will be near that of the English article. So we searched for the original Japanese articles within 2 days before and after the publication of each English article, i.e., the corresponding article of an English article was searched for from the Japanese articles of 5 days' issues. The average number of English articles per day was 24 and that of Japanese articles per 5 days was 1,532 for 1989 -1996 . For 1996 -2001 , the average number of English articles was 18 and that of Japanese articles was 2,885. As there are many candidates for alignment with English articles, we need a reliable measure to estimate the validity of article alignments to search for appropriate Japanese articles from these ambiguous matches.",
                "cite_spans": [
                    {
                        "start": 570,
                        "end": 580,
                        "text": "-July 1996",
                        "ref_id": null
                    },
                    {
                        "start": 581,
                        "end": 592,
                        "text": "-July \"1989",
                        "ref_id": null
                    },
                    {
                        "start": 593,
                        "end": 604,
                        "text": "-July -1996",
                        "ref_id": null
                    },
                    {
                        "start": 644,
                        "end": 654,
                        "text": "-July 1996",
                        "ref_id": null
                    },
                    {
                        "start": 655,
                        "end": 666,
                        "text": "-July \"1996",
                        "ref_id": null
                    },
                    {
                        "start": 667,
                        "end": 678,
                        "text": "-July -2001",
                        "ref_id": null
                    },
                    {
                        "start": 1181,
                        "end": 1185,
                        "text": "1989",
                        "ref_id": null
                    },
                    {
                        "start": 1186,
                        "end": 1191,
                        "text": "-1996",
                        "ref_id": null
                    },
                    {
                        "start": 1192,
                        "end": 1202,
                        "text": ". For 1996",
                        "ref_id": null
                    },
                    {
                        "start": 1203,
                        "end": 1208,
                        "text": "-2001",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Correct article alignment does not guarantee the existence of one-to-one correspondence between English and Japanese sentences in article alignment because literal translations are exceptional. Original Japanese articles may be restructured to conform to the style of English newspapers, additional descriptions may be added to fill cultural gaps, and detailed descriptions may be omitted. A typical example of a restructured English and Japanese article pair is:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Part of an English article: e1 Two bullet holes were found at the home of Kengo Tanaka, 65, president of Bungei Shunju, in Akabane, Tokyo, by his wife Kimiko, 64, at around 9 a.m. Monday. /e1 e2 Police suspect right-wing activists, who have mounted criticism against articles about the Imperial family appearing in the Shukan Bunshun, the publisher's weekly magazine, were responsible for the shooting. /e2 e3 Police received an anonymous phone call shortly after 1 a.m. Monday by a caller who reported hearing gunfire near Tanaka's residence. /e3 e4 Police found nothing after investigating the report, but later found a bullet in the Tanakas' bedroom, where they were sleeping at the time of the shooting. /e4",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Part of a literal translation of a Japanese article: j1 At about 8:55 a.m. on the 29th, Kimiko Tanaka, 64, the wife of Bungei Shunju's president Kengo Tanaka, 65, found bullet holes on the eastern wall of their two-story house at 4 Akabane Nishi, Kitaku, Tokyo. /j1 j2 As a result of an investigation, the officers of the Akabane police station found two holes on the exterior wall of the bedroom and a bullet in the bedroom. /j2 j3 After receiving an anonymous phone call shortly after 1 a.m. saying that two or three gunshots were heard near Tanaka's residence, police officers hurried to the scene for investigation, but no bullet holes were found. /j3 j4 When gunshots were heard, Mr. and Mrs. Tanaka were sleeping in the bedroom. /j4 j5 Since Shukan Bunshun, a weekly magazine published by Bungei Shunju, recently ran an article criticizing the Imperial family, Akabane police suspect rightwing activists who have mounted criticism against the recent article to be responsible for the shooting and have been investigating the incident. /j5",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "where there is a three-to-four correspondence between {e1, e3, e4} and {j1, j2, j3, j4}, together with a one-to-one correspondence between e2 and j5.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Such sentence matches are of particular interest to researchers studying human translations and/or stylistic differences between English and Japanese newspapers. However, their usefulness as resources for NLP such as machine translation is limited for the time being. It is therefore important to extract sentence alignments that are as literal as possible. To achieve this, a reliable measure of the validity of sentence alignments is necessary.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We adopt a standard strategy to align articles and sentences. First, we use a method based on CLIR to align Japanese and English articles (Collier et al., 1998; Matsumoto and Tanaka, 2002) and then a method based on DP matching to align Japanese and English sentences (Gale and Church, 1993; Utsuro et al., 1994) in these articles. As each of these methods uses existing NLP techniques, we describe them briefly focusing on basic similarity measures, which we will compare with our proposed measures in Section 5.",
                "cite_spans": [
                    {
                        "start": 138,
                        "end": 160,
                        "text": "(Collier et al., 1998;",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 161,
                        "end": 188,
                        "text": "Matsumoto and Tanaka, 2002)",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 268,
                        "end": 291,
                        "text": "(Gale and Church, 1993;",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 292,
                        "end": 312,
                        "text": "Utsuro et al., 1994)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Basic Alignment Methods",
                "sec_num": "3"
            },
            {
                "text": "We first convert each of the Japanese articles into a set of English words. We use ChaSen 1 to segment each of the Japanese articles into words. We next extract content words, which are then translated into English words by looking them up in the EDR Japanese-English bilingual dictionary, 2 EDICT, and ENAMDICT, 3 which have about 230,000, 100,000, and 180,000 entries, respectively. We select two English words for each of the Japanese words using simple heuristic rules based on the frequencies of English words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Translation of words",
                "sec_num": null
            },
            {
                "text": "We use each of the English articles as a query and search for the Japanese article that is most similar to the query article. The similarity between an English article and a (word-based English translation of) Japanese article is measured by BM25 (Robertson and Walker, 1994) . BM25 and its variants have been proven to be quite efficient in information retrieval. Readers are referred to papers by the Text REtrieval Conference (TREC) 4 , for example.",
                "cite_spans": [
                    {
                        "start": 247,
                        "end": 275,
                        "text": "(Robertson and Walker, 1994)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Article retrieval",
                "sec_num": null
            },
            {
                "text": "The definition of BM25 is:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Article retrieval",
                "sec_num": null
            },
            {
                "text": "BM25(J, E) = T \u2208E w (1) (k 1 + 1)tf K + tf (k 3 + 1)qtf k 3 + qtf",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Article retrieval",
                "sec_num": null
            },
            {
                "text": "where J is the set of translated English words of a Japanese article and E is the set of words of an English article. The words are stemmed and stop words are removed.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Article retrieval",
                "sec_num": null
            },
            {
                "text": "T is a word contained in E.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Article retrieval",
                "sec_num": null
            },
            {
                "text": "w (1) is the weight of T , w (1) = log (N \u2212n+0.5) (n+0.5) . N is the number of Japanese articles to be searched.",
                "cite_spans": [
                    {
                        "start": 2,
                        "end": 5,
                        "text": "(1)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Article retrieval",
                "sec_num": null
            },
            {
                "text": "n is the number of articles containing T .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Article retrieval",
                "sec_num": null
            },
            {
                "text": "K is k 1 ((1 \u2212 b) + b dl avdl )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Article retrieval",
                "sec_num": null
            },
            {
                "text": ". k 1 , b and k 3 are parameters set to 1, 1, and 1000, respectively. dl is the document length of J and avdl is the average document length in words. tf is the frequency of occurrence of T in J. qtf is the frequency of T in E.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Article retrieval",
                "sec_num": null
            },
            {
                "text": "To summarize, we first translate each of the Japanese articles into a set of English words. We then use each of the English articles as a query and search for the most similar Japanese article in terms of BM25 and assume that it corresponds to the English article.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Article retrieval",
                "sec_num": null
            },
            {
                "text": "The sentences 5 in the aligned Japanese and English articles are aligned by a method based on DP matching (Gale and Church, 1993; Utsuro et al., 1994) .",
                "cite_spans": [
                    {
                        "start": 106,
                        "end": 129,
                        "text": "(Gale and Church, 1993;",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 130,
                        "end": 150,
                        "text": "Utsuro et al., 1994)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence alignment",
                "sec_num": "3.2"
            },
            {
                "text": "We allow 1-to-n or n-to-1 (1 \u2264 n \u2264 6) alignments when aligning the sentences. Readers are referred to Utsuro et al. (1994) for a concise description of the algorithm. Here, we only discuss the similarities between Japanese and English sentences for alignment. Let J i and E i be the words of Japanese and English sentences for i-th alignment. The similarity 6 between J i and E i is:",
                "cite_spans": [
                    {
                        "start": 102,
                        "end": 122,
                        "text": "Utsuro et al. (1994)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence alignment",
                "sec_num": "3.2"
            },
            {
                "text": "SIM(J i , E i ) = co(J i \u00d7 E i ) + 1 l(J i ) + l(E i ) \u2212 2co(J i \u00d7 E i ) + 2 where l(X) = x\u2208X f (x) f (x) is the frequency of x in the sentences. co(J i \u00d7 E i ) = (j,e)\u2208J i \u00d7E i min(f (j), f (e)) J i \u00d7 E i = {(j, e)|j \u2208 J i , e \u2208 E i } and J i \u00d7 E i is",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence alignment",
                "sec_num": "3.2"
            },
            {
                "text": "a one-to-one correspondence between Japanese and English words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence alignment",
                "sec_num": "3.2"
            },
            {
                "text": "J i and E i are obtained as follows.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence alignment",
                "sec_num": "3.2"
            },
            {
                "text": "We use ChaSen to morphologically analyze the Japanese sentences and extract content words, which consists of J i . We use Brill's tagger (Brill, 1992) to POS-tag the English sentences, extract content words, and use Word-Net's library 7 to obtain lemmas of the words, which consists of E i . We use simple heuristics to obtain J i \u00d7 E i , i.e., a one-to-one correspondence between the words in J i and E i , by looking up Japanese-English and English-Japanese dictionaries made up by combining entries in the EDR Japanese-English bilingual dictionary and the EDR English-Japanese bilingual dictionary. Each of the constructed dictionaries has over 300,000 entries. We evaluated the implemented program against a corpus consisting of manually aligned Japanese and English sentences. The source texts were Japanese white papers (JEIDA, 2000) . The style of translation was generally literal reflecting the nature of government documents. We used 12 pairs of texts for evaluation. The average number of Japanese sentences per text was 413 and that of English sentences was 495.",
                "cite_spans": [
                    {
                        "start": 137,
                        "end": 150,
                        "text": "(Brill, 1992)",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 826,
                        "end": 839,
                        "text": "(JEIDA, 2000)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence alignment",
                "sec_num": "3.2"
            },
            {
                "text": "The recall, R, and precision, P , of the program against this corpus were R = 0.982 and P = 0.986, respectively, where The number of pairs in a one-to-n alignment is n. For example, if sentences {J 1 } and {E 1 , E 2 , E 3 } are aligned, then three pairs J 1 , E 1 , J 1 , E 2 , and J 1 , E 3 are obtained.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence alignment",
                "sec_num": "3.2"
            },
            {
                "text": "This recall and precision are quite good considering the relatively large differences in the language structures between Japanese and English.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence alignment",
                "sec_num": "3.2"
            },
            {
                "text": "We use BM25 and SIM to evaluate the similarity in articles and sentences, respectively. These measures, however, cannot be used to reliably discriminate between correct and incorrect alignments as will be discussed in Section 5. This motivated us to devise more reliable measures based on basic similarities.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reliable Measures",
                "sec_num": "4"
            },
            {
                "text": "BM25 measures the similarity between two bags of words. It is not sensitive to differences in the order of sentences between two articles. To remedy this, we define a measure that uses the similarities in sentence alignments in the article alignment. We define AVSIM(J, E) as the similarity between Japanese article, J, and English article, E:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reliable Measures",
                "sec_num": "4"
            },
            {
                "text": "AVSIM(J, E) = m k=1 SIM(J k , E k ) m where (J 1 , E 1 ), (J 2 , E 2 ), . . . (J m , E m )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reliable Measures",
                "sec_num": "4"
            },
            {
                "text": "are the sentence alignments obtained by the method described in Section 3.2. The sentence alignments in a correctly aligned article alignment should have more similarity than the ones in an incorrectly aligned article alignment. Consequently, article alignments with high AVSIM are likely to be correct. Our sentence alignment program aligns sentences accurately if the English sentences are literal translations of the Japanese as discussed in Section 3.2. However, the relation between English news sentences and Japanese news sentences are not literal translations. Thus, the results for sentence alignments include many incorrect alignments. To discriminate between correct and incorrect alignments, we take advantage of the similarity in article alignments containing sentence alignments so that the sentence alignments in a similar article alignment will have a high value. We define",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reliable Measures",
                "sec_num": "4"
            },
            {
                "text": "SntScore(J i , E i ) = AVSIM(J, E) \u00d7 SIM(J i , E i ) SntScore(J i , E i ) is the similarity in the i-th align- ment, (J i , E i )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reliable Measures",
                "sec_num": "4"
            },
            {
                "text": ", in article alignment J and E. When we compare the validity of two sentence alignments in the same article alignment, the rank order of sentence alignments obtained by applying SntScore is the same as that of SIM because they share a common AVSIM. However, when we compare the validity of two sentence alignments in different article alignments, SntScore prefers the sentence alignment with the more similar (high AVSIM) article alignment even if their SIM has the same value, while SIM cannot discriminate between the validity of two sentence alignments if their SIM has the same value. Therefore, SntScore is more appropriate than SIM if we want to compare sentence alignments in different article alignments, because, in general, a sentence alignment in a reliable article alignment is more reliable than one in an unreliable article alignment.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reliable Measures",
                "sec_num": "4"
            },
            {
                "text": "The next section compares the effectiveness of AVSIM to that of BM25, and that of SntScore to that of SIM.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reliable Measures",
                "sec_num": "4"
            },
            {
                "text": "Here, we discuss the results of evaluating article and sentence alignments.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation of Alignment",
                "sec_num": "5"
            },
            {
                "text": "We first estimate the precision of article alignments by using randomly sampled alignments. Next, we sort them in descending order of BM25 and AVSIM to see whether these measures can be used to provide correct alignments with a high ranking. Finally, we show that the absolute values of AVSIM correspond well with human judgment.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation of article alignment",
                "sec_num": "5.1"
            },
            {
                "text": "Each English article was aligned with a Japanese article with the highest BM25. We sampled 100 article alignments from each of 1996-2001 and 1989-1996 . We then classified the samples into four categories: \"A\", \"B\", \"C\", and \"D\". \"A\" means that there was more than 50% to 60% overlap in the content of articles. \"B\" means more than 20% to 30% and less than 50% to 60% overlap. \"D\" means that there was no overlap at all. \"C\" means that alignment was not included in \"A\",\"B\" or \"D\". We regard alignments that were judged to be A or B to be suitable for NLP because of their relatively large overlap. 1996-2001 1989-1996 The results of evaluations are in Table 1 . 8 Here, \"ratio\" means the ratio of the number of articles judged to correspond to the respective category against the total number of articles. For example, 0.59 in line \"A\" of 1996-2001 means that 59 out of 100 samples were evaluated as A. \"Lower\" and \"upper\" mean the lower and upper bounds of the 95% confidence interval for ratio.",
                "cite_spans": [
                    {
                        "start": 127,
                        "end": 150,
                        "text": "1996-2001 and 1989-1996",
                        "ref_id": null
                    },
                    {
                        "start": 599,
                        "end": 618,
                        "text": "1996-2001 1989-1996",
                        "ref_id": null
                    },
                    {
                        "start": 663,
                        "end": 664,
                        "text": "8",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 653,
                        "end": 660,
                        "text": "Table 1",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Randomly sampled article alignments",
                "sec_num": null
            },
            {
                "text": "The table shows that the precision (= sum of the ratios of A and B) for 1996-2001 was higher than that for 1989-1996. They were 0.71 for 1996-2001 and 0.44 for 1989-1996. This is because the English articles from 1996-2001 were translations of Japanese articles, while those from 1989-1996 were not necessarily translations as explained in Section 2. Although the precision for 1996-2001 was higher than that for 1989-1996, it is still too low to use them as NLP resources. In other words, the article alignments included many incorrect alignments.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Randomly sampled article alignments",
                "sec_num": null
            },
            {
                "text": "We want to extract alignments which will be evaluated as A or B from these noisy alignments. To do this, we have to sort all alignments according to some measures that determine their validity and extract highly ranked ones. To achieve this, AVSIM is more reliable than BM25 as is explained below.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Randomly sampled article alignments",
                "sec_num": null
            },
            {
                "text": "We sorted the same alignments in Table 1 in decreasing order of AVSIM and BM25. Alignments judged to be A or B were regarded as correct. The number, N, of correct alignments and precision, P, up to each rank are shown in Table 2 . 1996-2001 1989-1996 From the table, we can conclude that AVSIM ranks correct alignments higher than BM25. Its greater accuracy indicates that it is important to take similarities in sentence alignments into account when estimating the validity of article alignments. Table 2 shows that AVSIM is reliable in ranking correct and incorrect alignments. This section reveals that not only rank order but also absolute values of AVSIM are reliable for discriminating between correct and incorrect alignments. That is, they correspond well with human evaluations. This means that a threshold value is set for each of 1996-2001 and 1989-1996 so that valid alignments can be extracted by selecting alignments whose AVSIM is larger than the threshold.",
                "cite_spans": [
                    {
                        "start": 231,
                        "end": 250,
                        "text": "1996-2001 1989-1996",
                        "ref_id": null
                    },
                    {
                        "start": 841,
                        "end": 864,
                        "text": "1996-2001 and 1989-1996",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 33,
                        "end": 40,
                        "text": "Table 1",
                        "ref_id": "TABREF1"
                    },
                    {
                        "start": 221,
                        "end": 228,
                        "text": "Table 2",
                        "ref_id": "TABREF3"
                    },
                    {
                        "start": 498,
                        "end": 505,
                        "text": "Table 2",
                        "ref_id": "TABREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Sorted alignments: AVSIM vs. BM25",
                "sec_num": null
            },
            {
                "text": "We used the same data in Table 1 to calculate statistics on AVSIM. They are shown in Tables 3  and 4 for 1996-2001 and 1989-1996, respectively (1996) (1997) (1998) (1999) (2000) (2001) In these tables, \"N\" means the number of alignments against the corresponding human judgment. (1989) (1990) (1991) (1992) (1993) (1994) (1995) (1996) \"Av.\" means the average value of AVSIM. \"Lower\" and \"upper\" mean the lower and upper bounds of the 95% confidence interval for the average. \"Th.\" means the threshold for AVSIM that can be used to discriminate between the alignments estimated to be the corresponding evaluations. For example, in Table 3, evaluations A and B are separated by 0.168. These thresholds were identified through linear discriminant analysis. The asterisks \"**\" and \"*\" in the \"sig.\" column mean that the difference in averages for AVSIM is statistically significant at 1% and 5% based on a one-sided Welch test.",
                "cite_spans": [
                    {
                        "start": 105,
                        "end": 142,
                        "text": "1996-2001 and 1989-1996, respectively",
                        "ref_id": null
                    },
                    {
                        "start": 143,
                        "end": 149,
                        "text": "(1996)",
                        "ref_id": null
                    },
                    {
                        "start": 150,
                        "end": 156,
                        "text": "(1997)",
                        "ref_id": null
                    },
                    {
                        "start": 157,
                        "end": 163,
                        "text": "(1998)",
                        "ref_id": null
                    },
                    {
                        "start": 164,
                        "end": 170,
                        "text": "(1999)",
                        "ref_id": null
                    },
                    {
                        "start": 171,
                        "end": 177,
                        "text": "(2000)",
                        "ref_id": null
                    },
                    {
                        "start": 178,
                        "end": 184,
                        "text": "(2001)",
                        "ref_id": null
                    },
                    {
                        "start": 279,
                        "end": 285,
                        "text": "(1989)",
                        "ref_id": null
                    },
                    {
                        "start": 286,
                        "end": 292,
                        "text": "(1990)",
                        "ref_id": null
                    },
                    {
                        "start": 293,
                        "end": 299,
                        "text": "(1991)",
                        "ref_id": null
                    },
                    {
                        "start": 300,
                        "end": 306,
                        "text": "(1992)",
                        "ref_id": null
                    },
                    {
                        "start": 307,
                        "end": 313,
                        "text": "(1993)",
                        "ref_id": null
                    },
                    {
                        "start": 314,
                        "end": 320,
                        "text": "(1994)",
                        "ref_id": null
                    },
                    {
                        "start": 321,
                        "end": 327,
                        "text": "(1995)",
                        "ref_id": null
                    },
                    {
                        "start": 328,
                        "end": 334,
                        "text": "(1996)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 25,
                        "end": 32,
                        "text": "Table 1",
                        "ref_id": "TABREF1"
                    },
                    {
                        "start": 85,
                        "end": 104,
                        "text": "Tables 3  and 4 for",
                        "ref_id": "TABREF5"
                    }
                ],
                "eq_spans": [],
                "section": "AVSIM and human judgment",
                "sec_num": null
            },
            {
                "text": "In these tables, except for the differences in the averages for B and C in Table 4 , all differences in averages are statistically significant. This indicates that AVSIM can discriminate between differences in judgment. In other words, the AVSIM values correspond well with human judgment. We then tried to determine why B and C in Table 4 were not separated by inspecting the article alignments and found that alignments evaluated as C in Table 4 had relatively large overlaps compared with alignments judged as C in Table 3 . It was more difficult to distinguish B or C in Table 4 than in Table 3 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 75,
                        "end": 82,
                        "text": "Table 4",
                        "ref_id": "TABREF7"
                    },
                    {
                        "start": 332,
                        "end": 339,
                        "text": "Table 4",
                        "ref_id": "TABREF7"
                    },
                    {
                        "start": 440,
                        "end": 447,
                        "text": "Table 4",
                        "ref_id": "TABREF7"
                    },
                    {
                        "start": 518,
                        "end": 525,
                        "text": "Table 3",
                        "ref_id": "TABREF5"
                    },
                    {
                        "start": 575,
                        "end": 598,
                        "text": "Table 4 than in Table 3",
                        "ref_id": "TABREF5"
                    }
                ],
                "eq_spans": [],
                "section": "AVSIM and human judgment",
                "sec_num": null
            },
            {
                "text": "We next classified all article alignments in 1996-2001 and 1989-1996 based on the thresholds in Tables 3 and 4. The numbers of alignments are in Table  5 . It shows that the number of alignments estimated to be A or B was 46738 (= 31495 + 15243). We regard about 47,000 article alignments to be sufficiently large to be useful as a resource for NLP such as bilingual lexicon acquisition and for language education. 1996-2001 1989-1996 In summary, AVSIM is more reliable than BM25 and corresponds well with human judgment. By using thresholds, we can extract about 47,000 article alignments which are estimated to be A or B evaluations.",
                "cite_spans": [
                    {
                        "start": 45,
                        "end": 68,
                        "text": "1996-2001 and 1989-1996",
                        "ref_id": null
                    },
                    {
                        "start": 404,
                        "end": 434,
                        "text": "education. 1996-2001 1989-1996",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 145,
                        "end": 153,
                        "text": "Table  5",
                        "ref_id": "TABREF9"
                    }
                ],
                "eq_spans": [],
                "section": "AVSIM and human judgment",
                "sec_num": null
            },
            {
                "text": "Sentence alignments in article alignments have many errors even if they have been obtained from correct article alignments due to free translation as discussed in Section 2. To extract only correct alignments, we sorted whole sentence alignments in whole article alignments in decreasing order of SntScore and selected only the higher ranked sentence alignments so that the selected alignments would be sufficiently precise to be useful as NLP resources.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation of sentence alignment",
                "sec_num": "5.2"
            },
            {
                "text": "The number of whole sentence alignments was about 1,300,000. The most important category for sentence alignment is one-to-one. Thus, we want to discard as many errors in this category as possible. In the first step, we classified whole oneto-one alignments into two classes: the first consisted of alignments whose Japanese and English sentences ended with periods, question marks, exclamation marks, or other readily identifiable characteristics. We call this class \"one-to-one\". The second class consisted of the one-to-one alignments not belonging to the first class. The alignments in this class, together with the whole one-to-n alignments, are called \"one-to-many\". One-to-one had about 640,000 alignments and one-to-many had about 660,000 alignments.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation of sentence alignment",
                "sec_num": "5.2"
            },
            {
                "text": "We first evaluated the precision of one-to-one alignments by sorting them in decreasing order of SntScore. We randomly extracted 100 samples from each of 10 blocks ranked at the top-300,000 alignments. (A block had 30,000 alignments.) We classified these 1000 samples into two classes: The first was \"match\" (A), the second was \"not match\" (D). We judged a sample as \"A\" if the Japanese and English sentences of the sample shared a common event (approximately a clause). \"D\" consisted of the samples not belonging to \"A\". The results of evaluation are in Table 6. 9  range # of A's # of D's  1 -100  0  30001 -99  1  60001 -99  1  90001 -97  3  120001 -96  4  150001 -92  8  180001 -82  18  210001 -74  26  240001 -47  53  270001 -30  70   Table 6 : One-to-one: Rank vs. judgment",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 555,
                        "end": 747,
                        "text": "Table 6. 9  range # of A's # of D's  1 -100  0  30001 -99  1  60001 -99  1  90001 -97  3  120001 -96  4  150001 -92  8  180001 -82  18  210001 -74  26  240001 -47  53  270001 -30  70   Table 6",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation of sentence alignment",
                "sec_num": "5.2"
            },
            {
                "text": "This table shows that the number of A's decreases rapidly as the rank increases. This means that SntScore ranks appropriate one-to-one alignments highly. The table indicates that the top-150,000 oneto-one alignments are sufficiently reliable. 10 The ratio of A's in these alignments was 0.982.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation of sentence alignment",
                "sec_num": "5.2"
            },
            {
                "text": "We then evaluated precision for one-to-many alignments by sorting them in decreasing order of SntScore. We classified one-to-many into three categories: \"1-90000\", \"90001-180000\", and \"180001-270000\", each of which was covered by the range of SntScore of one-to-one that was presented in Table  6 . We randomly sampled 100 one-to-many alignments from these categories and judged them to be A or D (see Table 7 ). Table 7 indicates that the 38,090 alignments in the range from \"1-90000\" are sufficiently reliable.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 288,
                        "end": 296,
                        "text": "Table  6",
                        "ref_id": null
                    },
                    {
                        "start": 402,
                        "end": 409,
                        "text": "Table 7",
                        "ref_id": null
                    },
                    {
                        "start": 413,
                        "end": 420,
                        "text": "Table 7",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation of sentence alignment",
                "sec_num": "5.2"
            },
            {
                "text": "range # of one-to-many # of A's # of D 's  1 -38090  98  2  90001 -59228  87  13  180001 -71711  61  39   Table 7 : One-to-many: Rank vs. judgment Tables 6 and 7 show that we can extract valid alignments by sorting alignments according to SntScore and by selecting only higher ranked sentence alignments.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 39,
                        "end": 113,
                        "text": "'s  1 -38090  98  2  90001 -59228  87  13  180001 -71711  61  39   Table 7",
                        "ref_id": "TABREF1"
                    },
                    {
                        "start": 147,
                        "end": 161,
                        "text": "Tables 6 and 7",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation of sentence alignment",
                "sec_num": "5.2"
            },
            {
                "text": "Overall, evaluations between the first and second check were consistent.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation of sentence alignment",
                "sec_num": "5.2"
            },
            {
                "text": "10 The notion of \"appropriate (correct) sentence alignment\" depends on applications. Machine translation, for example, may require more precise (literal) alignment. To get literal alignments beyond a sharing of a common event, we will select a set of alignments from the top of the sorted alignments that satisfies the required literalness. This is because, in general, higher ranked alignments are more literal translations, because those alignments tend to have many one-to-one corresponding words and to be contained in highly similar article alignments.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation of sentence alignment",
                "sec_num": "5.2"
            },
            {
                "text": "We compared SntScore with SIM and found that SntScore is more reliable than SIM in discriminating between correct and incorrect alignments.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Comparison with SIM",
                "sec_num": null
            },
            {
                "text": "We first sorted the one-to-one alignments in decreasing order of SIM and randomly sampled 100 alignments from the top-150,000 alignments. We classified the samples into A or D. The number of A's was 93, and that of D's was 7. The precision was 0.93. However, in Table 6 , the number of A's was 491 and D's was 9, for the 500 samples extracted from the top-150,000 alignments. The precision was 0.982. Thus, the precision of SntScore was higher than that of SIM and this difference is statistically significant at 1% based on a one-sided proportional test.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 262,
                        "end": 269,
                        "text": "Table 6",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Comparison with SIM",
                "sec_num": null
            },
            {
                "text": "We then sorted the one-to-many alignments by SIM and sampled 100 alignments from the top 38,090 and judged them. There were 89 A's and 11 D's. The precision was 0.89. However, in Table 7, there were 98 A's and 2 D's for samples from the top 38,090 alignments. The precision was 0.98. This difference is also significant at 1% based on a one-sided proportional test.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Comparison with SIM",
                "sec_num": null
            },
            {
                "text": "Thus, SntScore is more reliable than SIM. This high precision in SntScore indicates that it is important to take the similarities of article alignments into account when estimating the validity of sentence alignments.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Comparison with SIM",
                "sec_num": null
            },
            {
                "text": "Much work has been done on article alignment. Collier et al. (1998) compared the use of machine translation (MT) with the use of bilingual dictionary term lookup (DTL) for news article alignment in Japanese and English. They revealed that DTL is superior to MT at high-recall levels. That is, if we want to obtain many article alignments, then DTL is more appropriate than MT. In a preliminary experiment, we also compared MT and DTL for the data in Table  1 and found that DTL was superior to MT. 11 These experimental results indicate that DTL is more appropriate than MT in article alignment. Matsumoto and Tanaka (2002) attempted to align Japanese and English news articles in the Nikkei Industrial Daily. Their method achieved a 97% precision in aligning articles, which is quite high. They also applied their method to NHK broadcast news. However, they obtained a lower precision of 69.8% for the NHK corpus. Thus, the precision of their method depends on the corpora. Therefore, it is not clear whether their method would have achieved a high accuracy in the Yomiuri corpus treated in this paper.",
                "cite_spans": [
                    {
                        "start": 46,
                        "end": 67,
                        "text": "Collier et al. (1998)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 596,
                        "end": 623,
                        "text": "Matsumoto and Tanaka (2002)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 450,
                        "end": 458,
                        "text": "Table  1",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "6"
            },
            {
                "text": "There are two significant differences between our work and previous works.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "6"
            },
            {
                "text": "(1) We have proposed AVSIM, which uses similarities in sentences aligned by DP matching, as a reliable measure for article alignment. Previous works, on the other hand, have used measures based on bag-of-words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "6"
            },
            {
                "text": "(2) A more important difference is that we have actually obtained not only article alignments but also sentence alignments on a large scale. In addition to that, we are distributing the alignment data for research and educational purposes. This is the first attempt at a Japanese-English bilingual corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "6"
            },
            {
                "text": "As of late-October 2002, we have been distributing the alignment data discussed in this paper for research and educational purposes. 12 All the information on the article and sentence alignments are numerically encoded so that users who have the Yomiuri data can recover the results of alignments. The data also contains the top-150,000 one-to-one sentence alignments and the top-30,000 one-to-many sentence alignments as raw sentences. The Yomiuri Shimbun generously allowed us to distribute them for research and educational purposes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Availability",
                "sec_num": "7"
            },
            {
                "text": "We have sent over 30 data sets to organizations on their request. About half of these were NLPrelated. The other half were linguistics-related. A few requests were from high-school and junior-highschool teachers of English. A psycho-linguist was also included. It is obvious that people from both inside and outside the NLP community are interested 12 http://www.crl.go.jp/jt/a132/members/mutiyama/jea/index.html in this Japanese-English alignment data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Availability",
                "sec_num": "7"
            },
            {
                "text": "We have proposed two measures for extracting valid article and sentence alignments. The measure for article alignment uses similarities in sentences aligned by DP matching and that for sentence alignment uses similarities in articles aligned by CLIR. They enhance each other and allow valid article and sentence alignments to be reliably extracted from an extremely noisy Japanese-English parallel corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "8"
            },
            {
                "text": "We are distributing the alignment data discussed in this paper so that it can be used for research and educational purposes. It has attracted the attention of people both inside and outside the NLP community.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "8"
            },
            {
                "text": "We have applied our measures to a Japanese and English bilingual corpus and these are language independent. It is therefore reasonable to expect that they can be applied to any language pair and still retain good performance, particularly since their effectiveness has been demonstrated in such a disparate language pair as Japanese and English.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "8"
            },
            {
                "text": "http://chasen.aist-nara.ac.jp/ 2 http://www.iijnet.or.jp/edr/ 3 http://www.csse.monash.edu.au/\u02dcjwb/edict.html",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "http://trec.nist.gov/5 We split the Japanese articles into sentences by using simple heuristics and split the English articles into sentences by using MXTERMINATOR(Reynar and Ratnaparkhi, 1997).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "SIM(J i , E i ) is different from the similarity function used inUtsuro et al. (1994). We use SIM because it performed well in a preliminary experiment.7 http://www.cogsci.princeton.edu/\u02dcwn/",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "The evaluations were done by the authors. We double checked the sample articles from 1996-2001. Our second checks are presented inTable 1. The ratio of categories in the first check were A=0.62, B=0.09, C=0.09, and D=0.20. Comparing these figures with those inTable 1, we concluded that first and second evaluations were consistent.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "Evaluations were done by the authors. We double checked all samples. In the 100 samples, there were a maximum of two or three where the first and second evaluations were different.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "We translated the English articles into Japanese with an MT system. We then used the translated English articles as queries and searched the database consisting of Japanese articles. The direction of translation was opposite to the one described in Section 3.1. Therefore this comparison is not as objective as it could be. However, it gives us some idea into a comparison of MT and DTL.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "A simple rule-based part of speech tagger",
                "authors": [
                    {
                        "first": "Eric",
                        "middle": [],
                        "last": "Brill",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "ANLP-92",
                "volume": "",
                "issue": "",
                "pages": "152--155",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Eric Brill. 1992. A simple rule-based part of speech tagger. In ANLP-92, pages 152-155.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Machine translation vs. dictionary term translation -a comparison for English-Japanese news article alignment",
                "authors": [
                    {
                        "first": "Nigel",
                        "middle": [],
                        "last": "Collier",
                        "suffix": ""
                    },
                    {
                        "first": "Hideki",
                        "middle": [],
                        "last": "Hirakawa",
                        "suffix": ""
                    },
                    {
                        "first": "Akira",
                        "middle": [],
                        "last": "Kumano",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "COLING-ACL'98",
                "volume": "",
                "issue": "",
                "pages": "263--267",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Nigel Collier, Hideki Hirakawa, and Akira Kumano. 1998. Ma- chine translation vs. dictionary term translation -a com- parison for English-Japanese news article alignment. In COLING-ACL'98, pages 263-267.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "A program for aligning sentences in bilingual corpora",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "William",
                        "suffix": ""
                    },
                    {
                        "first": "Kenneth",
                        "middle": [
                            "W"
                        ],
                        "last": "Gale",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Church",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Computational Linguistics",
                "volume": "19",
                "issue": "1",
                "pages": "75--102",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "William A. Gale and Kenneth W. Church. 1993. A program for aligning sentences in bilingual corpora. Computational Linguistics, 19(1):75-102.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Japan Electronic Industry Development Association JEIDA",
                "authors": [],
                "year": 2000,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Japan Electronic Industry Development Association JEIDA. 2000. Sizen Gengo Syori-ni Kan-suru Tyousa Houkoku-syo (Report on natural language processing systems).",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Automatic alignment of Japanese and English newspaper articles using an MT system and a bilingual company name dictionary",
                "authors": [
                    {
                        "first": "Kenji",
                        "middle": [],
                        "last": "Matsumoto",
                        "suffix": ""
                    },
                    {
                        "first": "Hideki",
                        "middle": [],
                        "last": "Tanaka",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "LREC-2002",
                "volume": "",
                "issue": "",
                "pages": "480--484",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Kenji Matsumoto and Hideki Tanaka. 2002. Automatic align- ment of Japanese and English newspaper articles using an MT system and a bilingual company name dictionary. In LREC-2002, pages 480-484.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "A maximum entropy approach to identifying sentence boundaries",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Jeffrey",
                        "suffix": ""
                    },
                    {
                        "first": "Adwait",
                        "middle": [],
                        "last": "Reynar",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Ratnaparkhi",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jeffrey C. Reynar and Adwait Ratnaparkhi. 1997. A maxi- mum entropy approach to identifying sentence boundaries. In ANLP-97.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Some simple effective approximations to the 2-Poisson model for probabilistic weighted retrieval",
                "authors": [
                    {
                        "first": "S",
                        "middle": [
                            "E"
                        ],
                        "last": "Robertson",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Walker",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "SIGIR'94",
                "volume": "",
                "issue": "",
                "pages": "232--241",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "S. E. Robertson and S. Walker. 1994. Some simple effec- tive approximations to the 2-Poisson model for probabilistic weighted retrieval. In SIGIR'94, pages 232-241.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Bilingual text matching using bilingual dictionary and statistics",
                "authors": [
                    {
                        "first": "Takehito",
                        "middle": [],
                        "last": "Utsuro",
                        "suffix": ""
                    },
                    {
                        "first": "Hiroshi",
                        "middle": [],
                        "last": "Ikeda",
                        "suffix": ""
                    },
                    {
                        "first": "Masaya",
                        "middle": [],
                        "last": "Yamane",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "COLING'94",
                "volume": "",
                "issue": "",
                "pages": "1076--1082",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Takehito Utsuro, Hiroshi Ikeda, Masaya Yamane, Yuji Mat- sumoto, and Makoto Nagao. 1994. Bilingual text match- ing using bilingual dictionary and statistics. In COLING'94, pages 1076-1082.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "uris": null,
                "num": null,
                "type_str": "figure",
                "text": "R = number of correctly aligned sentence pairs total number of sentence pairs aligned in corpus P = number of correctly aligned sentence pairs total number of sentence pairs proposed by program"
            },
            "TABREF1": {
                "type_str": "table",
                "content": "<table/>",
                "num": null,
                "text": "Ratio of article alignments",
                "html": null
            },
            "TABREF3": {
                "type_str": "table",
                "content": "<table/>",
                "num": null,
                "text": "",
                "html": null
            },
            "TABREF5": {
                "type_str": "table",
                "content": "<table/>",
                "num": null,
                "text": "",
                "html": null
            },
            "TABREF7": {
                "type_str": "table",
                "content": "<table/>",
                "num": null,
                "text": "Statistics on AVSIM",
                "html": null
            },
            "TABREF9": {
                "type_str": "table",
                "content": "<table/>",
                "num": null,
                "text": "",
                "html": null
            }
        }
    }
}