File size: 56,149 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
{
    "paper_id": "P97-1017",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T09:15:21.028430Z"
    },
    "title": "Machine Transliteration",
    "authors": [
        {
            "first": "Kevin",
            "middle": [],
            "last": "Knight",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Southern California Marina del Rey",
                "location": {
                    "postCode": "90292",
                    "region": "CA"
                }
            },
            "email": "knight~isi@edu"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "It is challenging to translate names and technical terms across languages with different alphabets and sound inventories. These items are commonly transliterated, i.e., replaced with approximate phonetic equivalents. For example, computer in English comes out as ~ i/l:::'=-~-(konpyuutaa) in Japanese. Translating such items from Japanese back to English is even more challenging, and of practical interest, as transliterated items make up the bulk of text phrases not found in bilingual dictionaries. We describe and evaluate a method for performing backwards transliterations by machine. This method uses a generative model, incorporating several distinct stages in the transliteration process.",
    "pdf_parse": {
        "paper_id": "P97-1017",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "It is challenging to translate names and technical terms across languages with different alphabets and sound inventories. These items are commonly transliterated, i.e., replaced with approximate phonetic equivalents. For example, computer in English comes out as ~ i/l:::'=-~-(konpyuutaa) in Japanese. Translating such items from Japanese back to English is even more challenging, and of practical interest, as transliterated items make up the bulk of text phrases not found in bilingual dictionaries. We describe and evaluate a method for performing backwards transliterations by machine. This method uses a generative model, incorporating several distinct stages in the transliteration process.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Translators must deal with many problems, and one of the most frequent is translating proper names and technical terms. For language pairs like Spanish/English, this presents no great challenge: a phrase like Antonio Gil usually gets translated as Antonio Gil. However, the situation is more complicated for language pairs that employ very different alphabets and sound systems, such as Japanese/English and Arabic/English. Phonetic translation across these pairs is called transliteration. We will look at Japanese/English transliteration in this paper. Japanese frequently imports vocabulary from other languages, primarily (but not exclusively) from English. It has a special phonetic alphabet called katakana, which is used primarily (but not exclusively) to write down foreign names and loanwords. To write a word like golf bag in katakana, some compromises must be made. For example, Japanese has no distinct L and R sounds: the two English sounds collapse onto the same Japanese sound. A similar compromise must be struck for English H and F. Also, Japanese generally uses an alternating consonant-vowel structure, making it impossible to pronounce LFB without intervening vowels. Katakana writing is a syllabary rather than an alphabet--there is one symbol for ga (~I), another for gi (4e), another for gu (P'), etc. So the way to write gol]bag in katakana is =~'~ 7 ~ ~, ~, roughly pronounced goruhubaggu. Here are a few more examples:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "\u2022 \"J~ vY v (anj ira jyonson) Notice how the transliteration is more phonetic than orthographic; the letter h in Johnson does not produce any katakana. Also, a dot-separator (.) is used to separate words, but not consistently. And transliteration is clearly an information-losing operation: aisukuriimu loses the distinction between ice cream and I scream.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "Transliteration is not trivial to automate, but we will be concerned with an even more challenging problem--going from katakana back to English, i.e., back-transliteration. Automating backtransliteration has great practical importance in Japanese/English machine translation. Katakana phrases are the largest source of text phrases that do not appear in bilingual dictionaries or training corpora (a.k.a. \"not-found words\"). However, very little computational work has been done in this area; (Yamron et al., 1994) briefly mentions a patternmatching approach, while (Arbabi et al., 1994) discuss a hybrid neural-net/expert-system approach to (forward) transliteration.",
                "cite_spans": [
                    {
                        "start": 493,
                        "end": 514,
                        "text": "(Yamron et al., 1994)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 566,
                        "end": 587,
                        "text": "(Arbabi et al., 1994)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "The information-losing aspect of transliteration makes it hard to invert. Here are some problem instances, taken from actual newspaper articles: 1",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "ITexts used in ARPA Machine Translation evaluations, November 1994. English translations appear later in this paper.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "Here are a few observations about backtransliteration:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "\u2022 Back-transliteration is less forgiving than transliteration. There are many ways to write an English word like switch in katakana, all equally valid, but we do not have this flexibility in the reverse direction. For example, we cannot drop the t in switch, nor can we write arture when we mean archer.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "\u2022 Back-transliteration is harder than romanization, which is a (frequently invertible) transformation of a non-roman alphabet into roman letters. There are several romanization schemes for katakana writing--we have already been using one in our examples. Katakana Writing follows Japanese sound patterns closely, so katakana often doubles as a Japanese pronunciation guide. However, as we shall see, there are many spelling variations that complicate the mapping between Japanese sounds and katakana writing.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "\u2022 Finally, not all katakana phrases can be \"sounded out\" by back-transliteration. Some phrases are shorthand, e.g., r] _ 7\" ~ (uaapuro) should be translated as word processing. Others are onomatopoetic and difficult to translate. These cases must be solved by techniques other than those described here.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "The most desirable feature of an automatic backtransliterator is accuracy. If possible, our techniques should also be:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "\u2022 portable to new language pairs like Arabic/English with minimal effort, possibly reusing resources.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "\u2022 robust against errors introduced by optical character recognition.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "\u2022 relevant to speech recognition situations in which the speaker has a heavy foreign accent.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "\u2022 able to take textual (topical/syntactic) context into account, or at least be able to return a ranked list of possible English translations.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "Like most problems in computational linguistics, this one requires full world knowledge for a 100% solution. Choosing between Katarina and Catalina (both good guesses for ~' ~ ~ \")-) might even require detailed knowledge of geography and figure skating. At that level, human translators find the problem quite difficult as well. so we only aim to match or possibly exceed their performance.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Angela Johnson TvzJ~",
                "sec_num": null
            },
            {
                "text": "Bilingual glossaries contain many entries mapping katakana phrases onto English phrases, e.g.: (aircraft carrier --, ~ T ~ ~ 7 I. ~ ~ ~3 7\" ). It is possible to automatically analyze such pairs to gain enough knowledge to accurately map new katakana phrases that come along, and learning approach travels well to other languages pairs. However, a naive approach to finding direct correspondences between English letters and katakana symbols suffers from a number of problems. One can easily wind up with a system that proposes iskrym as a back-transliteration of aisukuriimu. Taking letter frequencies into account improves this to a more plausible-looking isclim. Moving to real words may give is crime: the i corresponds to ai, the s corresponds to su, etc. Unfortunately, the correct answer here is ice cream. After initial experiments along these lines, we decided to step back and build a generative model of the transliteration process, which goes like this:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Modular Learning Approach",
                "sec_num": "2"
            },
            {
                "text": "1. An English phrase is written.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Modular Learning Approach",
                "sec_num": "2"
            },
            {
                "text": "2. A translator pronounces it in English.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Modular Learning Approach",
                "sec_num": "2"
            },
            {
                "text": "Japanese sound inventory.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The pronunciation is modified to fit the",
                "sec_num": "3."
            },
            {
                "text": "4. The sounds are converted into katakana.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The pronunciation is modified to fit the",
                "sec_num": "3."
            },
            {
                "text": "This divides our problem into five sub-problems. Fortunately, there are techniques for coordinating solutions to such sub-problems, and for using generative models in the reverse direction. These techniques rely on probabilities and Bayes' Rule. Suppose we build an English phrase generator that produces word sequences according to some probability distribution P(w). And suppose we build an English pronouncer that takes a word sequence and assigns it a set of pronunciations, again probabilistically, according to some P(plw). Given a pronunciation p, we may want to search for the word sequence w that maximizes P(wtp ). Bayes\" Rule lets us equivalently maximize P(w). P(plw). exactly the two distributions we have modeled.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Katakana is written.",
                "sec_num": "5."
            },
            {
                "text": "Extending this notion, we settled down to build five probability distributions:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Katakana is written.",
                "sec_num": "5."
            },
            {
                "text": "1. P(w) --generates written English word sequences. 2. P(elw) --pronounces English word sequences.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Katakana is written.",
                "sec_num": "5."
            },
            {
                "text": "3. P(jle) --converts English sounds into Japanese sounds.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Katakana is written.",
                "sec_num": "5."
            },
            {
                "text": "4. P(k[j) ~ converts Japanese sounds to katakana writing.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Katakana is written.",
                "sec_num": "5."
            },
            {
                "text": "Given a katakana string o observed by OCR, we want to find the English word sequence w that maximizes the sum, over all e, j, and k, of P(w) \u2022 P(e[w). P(jle)\" P(kJj). P(olk)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "P(o{k) ~ introduces misspellings caused by optical character recognition (OCR).",
                "sec_num": "5."
            },
            {
                "text": "Following (Pereira et al., 1994 ; Pereira and Riley, I996), we implement P(w) in a weighted finite-state aceeptor (WFSA) and we implement the other distributions in weighted finite-state transducers (WF-STs). A WFSA is an state/transition diagram with weights and symbols on the transitions, making some output sequences more likely than others. A WFST is a WFSA with a pair of symbols on each transition, one input, and one output. Inputs and outputs may include the empty symbol e. Also following (Pereira and Riley, 1996) , we have implemented a general composition algorithm for constructing an integrated model P(zlz) from models P(~IY) and P(ylz), treating WFSAs as WFSTs with identical inputs and outputs. We use this to combine an observed katakana string with each of the models in turn. The result is a large WFSA containing all possible English translations. We use Dijkstra's shortest-path algorithm {Dijkstra, 1959) to extract the most probable one.",
                "cite_spans": [
                    {
                        "start": 10,
                        "end": 31,
                        "text": "(Pereira et al., 1994",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 499,
                        "end": 524,
                        "text": "(Pereira and Riley, 1996)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "P(o{k) ~ introduces misspellings caused by optical character recognition (OCR).",
                "sec_num": "5."
            },
            {
                "text": "The approach is modular. We can test each engine independently and be confident that their results are combined correctly. We do no pruning, so the final WFSA contains every solution, however unlikely. The only approximation is the Viterbi one, which searches for the best path through a WFSA instead of the best sequence (i.e., the same sequence does not receive bonus points for appearing more than once).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "P(o{k) ~ introduces misspellings caused by optical character recognition (OCR).",
                "sec_num": "5."
            },
            {
                "text": "This section describes how we desigued and built each of our five models. For consistency, we continue to print written English word sequences in italics (golf ball), English sound sequences in all capitals (G AA L F B A0 L). Japanese sound sequences in lower case (g o r u h u b o o r u)and katakana sequences naturally ( =':t. 7 .~-~).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Probabilistic Models",
                "sec_num": "3"
            },
            {
                "text": "The first model generates scored word sequences, the idea being that ice cream should score higher than ice creme, which should score higher than nice kreem. We adopted a simple unigram scoring method that multiplies the scores of the known words and phrases in a sequence. Our 262,000-entry frequency list draws its words and phrases from the Wall Street Journal corpus, an online English name list, and an online gazeteer of place names.\" A portion of the WFSA looks like this:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word Sequences",
                "sec_num": "3.1"
            },
            {
                "text": "los / 0.000087 federal / O.O013~",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word Sequences",
                "sec_num": "3.1"
            },
            {
                "text": "angele s~ ~ month 10.000992",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word Sequences",
                "sec_num": "3.1"
            },
            {
                "text": "An ideal word sequence model would look a bit different. It would prefer exactly those strings which are actually grist for Japanese transliteratots. For example, people rarely transliterate auxiliary verbs, but surnames are often transliterated.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word Sequences",
                "sec_num": "3.1"
            },
            {
                "text": "We have approximated such a model by removing high-frequency words like has, an, are, am, were, their, and does, plus unlikely words corresponding to Japanese sound bites, like coup and oh.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word Sequences",
                "sec_num": "3.1"
            },
            {
                "text": "We also built a separate word sequence model containing only English first and last names. If we know (from context) that the transliterated phrase is a personal name, this model is more precise.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word Sequences",
                "sec_num": "3.1"
            },
            {
                "text": "The next WFST converts English word sequences into English sound sequences. We use the English phoneme inventory from the online CMU Pronunciation Dictionary, 3 minus the stress marks. This gives a total of 40 sounds, including 14 vowel sounds (e.g., AA, AE, UW), 25 consonant sounds (e.g., K, 1tlt, It), plus our special symbol (PAUSE). The dictionary has pronunciations for 110,000 words, and we organized a phoneme-tree based WFST from it: E:E :E E:IH \u00a2;::K Note that we insert an optional PAUSE between word pronunciations. Due to memory limitations, we only used the 50,000 most frequent words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Words to English Sounds",
                "sec_num": "3.2"
            },
            {
                "text": "We originally thought to build a general letterto-sound WFST, on the theory that while wrong (overgeneralized) pronunciations might occasionally be generated, Japanese transliterators also mispronounce words. However, our letter-to-sound WFST did not match the performance of Japanese translit-2Available from the ACL Dat~ Collection Initiative. 3ht%p ://~ww. speech, cs. cmu. edu/cgi-bin/cmudict. erators, and it turns out that mispronunciations are modeled adequately in the next stage of the cascade.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Words to English Sounds",
                "sec_num": "3.2"
            },
            {
                "text": "Next, we map English sound sequences onto Japanese sound sequences. This is an inherently information-losing process, as English R and L sounds collapse onto Japanese r, the 14 English vowel sounds collapse onto the 5 Japanese vowel sounds, etc. We face two immediate problems:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "1. What is the target Japanese sound inventory?",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "2. How can we build a WFST to perform the sequence mapping?",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "An obvious target inventory is the Japanese syllabary itself, written down in katakana (e.g., \") or a roman equivalent (e.g., hi). With this approach, the English sound K corresponds to one of 2 (ka), -' Y (ki), ~' (ku), ~ (ke), or = (ko), depending on its context. Unfortunately, because katakana is a syllabary, we would be unable to express an obvious and useful generalization, namely that English g usually corresponds to Japanese k, independent of context. Moreover, the correspondence of Japanese katakana writing to Japanese sound sequences is not perfectly one-to-one (see next section), so an independent sound inventory is well-motivated in any case. Our Japanese sound inventory includes 39 symbols: 5 vowel sounds, 33 consonant sounds (including doubled consonants like kk), and one special symbol (pause). An English sound sequence like (P R OW PAUSE S AA K ER) might map onto a Japanese sound sequence like (p u r o pause s a kk a a). Note that long Japanese vowel sounds are written with two symbols (a a) instead of just one (an). This scheme is attractive because Japanese sequences are almost always longer than English sequences.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "Our WFST is learned automatically from 8,000 pairs of English/Japanese sound sequences, e.g., ( (s AA K ER) --* (s a kk a a)). We were able to produce'these pairs by manipulating a small Englishkatakana glossary. For each glossary entry, we converted English words into English sounds using the previous section's model, and we converted katakana words into Japanese sounds using the next section's model. We then applied the estimationmaximization (EM) algorithm (Baum, 1972) to generate symbol-mapping probabilities, shown in Figure ",
                "cite_spans": [
                    {
                        "start": 464,
                        "end": 476,
                        "text": "(Baum, 1972)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 528,
                        "end": 534,
                        "text": "Figure",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "L OW L OW l /\\ /\\ I r o o r o o",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "2. For each pair, assign an equal weight to each of its alignments, such that those weights sum to 1. In the case above, each alignment gets a weight of 0.5. 3. For each of the 40 English sounds, count up instances of its different mappings, as observed in all alignments of all pairs. Each alignment contributes counts in proportion to its own weight.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "4. For each of the 40 English sounds, normalize the scores of the Japanese sequences it maps to, so that the scores sum to 1. These are the symbolmapping probabilities shown in Figure 1 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 177,
                        "end": 185,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "5. Recompute the alignment scores. Each alignment is scored with the product of the scores of the symbol mappings it contains.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "6. Normalize the alignment scores. Scores for each pair's alignments should sum to 1.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "7. Repeat 3-6 until the symbol-mapping probabilities converge.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "We then build a WFST directly from the symbolmapping probabilities:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "PAUSE:pause AA:a / 0 024 ~ AA:o / 0,018 o < --o",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "Our WFST has 99 states and 283 arcs. We have also built models that allow individual English sounds to be \"swallowed\" (i.e., produce zero Japanese sounds). However, these models are expensive to compute (many more alignments) and lead to a vast number of hypotheses during WFST composition. Furthermore, in disallowing \"swallowing,\" we were able to automatically remove hundreds of potentially harmful pairs from our training set, e.g., ((B AA R B ER SH AA P) --(b a a b a a)). Because no alignments are possible, such pairs are skipped by the learning algorithm; cases like these must be solved by dictionary lookup anyway. Only two pairs failed to align when we wished they had--both involved turning English Y UW into Japanese u, as in ((Y UW K AH L EY L IY) ~ (u kurere)).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "Note also that our model translates each English sound without regard to context. We have built also context-based models, using decision trees receded as WFSTs. For example, at the end of a word, English T is likely to come out as (= o) rather than (1;). However, context-based models proved unnecessary : English sounds (in capitals) with probabilistic mappings to Japanese sound sequences (in lower case), as learned by estimation-maximization. Only mappings with conditional probabilities greater than 1% are shown, so tile figures may not sum to 1.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "for back-transliteration. 4 They are more useful for English-to-Japanese forward transliteration.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "English Sounds to Japanese Sounds",
                "sec_num": "3.3"
            },
            {
                "text": "To map Japanese sound sequences like (m o o 1: a a) onto katakana sequences like (~--$t--), we manually constructed two WFSTs. Composed together, they yield an integrated WFST with 53 states and 303 arcs. The first WFST simply merges long Japanese vowel sounds into new symbols aa, ii, uu, ee, and oo. The second WFST maps Japanese sounds onto katakana symbols. The basic idea is to consume a whole syllable worth of sounds before producing any katakana, e.g.:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Japanese sounds to Katakana",
                "sec_num": "3.4"
            },
            {
                "text": "This fragment shows one kind of spelling variation in Japanese: long vowel sounds (oo) are usually written with a long vowel mark (~-) but are sometimes written with repeated katakana (~). We combined corpus analysis with guidelines from a Japanese textbook (Jorden and Chaplin, 1976) to turn up many spelling variations and unusual katakana symbols:",
                "cite_spans": [
                    {
                        "start": 258,
                        "end": 284,
                        "text": "(Jorden and Chaplin, 1976)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":-:,0951",
                "sec_num": null
            },
            {
                "text": "\u2022 the sound sequence (j \u00b1) is usually written ~, but occasionally \u00a2:. \u2022 (g u a) is usually ~'T, but occasionally YT.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":-:,0951",
                "sec_num": null
            },
            {
                "text": "\u2022 (w o o) is variously ~z'---, ~r-, or with a special, old-style katakana for wo.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":-:,0951",
                "sec_num": null
            },
            {
                "text": "\u2022 (y e) may be =I=, d ~, or d ~.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":-:,0951",
                "sec_num": null
            },
            {
                "text": "\u2022 (w i)is either #~\" or ~ 4.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":-:,0951",
                "sec_num": null
            },
            {
                "text": "\u2022 (n y e) is a rare sound sequence, but is written -~* when it occurs. \u2022 (1: y u) is rarer than (ch y u), but is written ~-~-when it occurs. and so on.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":-:,0951",
                "sec_num": null
            },
            {
                "text": "Spelling variation is clearest in cases where an English word like swiIeh shows up transliterated variously (:~ ~\" :, \u00a2-, :~4 ~, \u00a2-, x ~, 4 ~, 4-) in different dictionaries. Treating these variations as an equivalence class enables us to learn general sound mappings even if our bilingual glossary adheres to a single narrow spelling convention. We do not, however, 4And harmfully restrictive in their unsmoothed incarnations.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":-:,0951",
                "sec_num": null
            },
            {
                "text": "generate all katakana sequences with this model; for example, we do not output strings that begin with a subscripted vowel katakana. So this model also serves to filter out some ill-formed katakana sequences, possibly proposed by optical character recognition.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":-:,0951",
                "sec_num": null
            },
            {
                "text": "Perhaps uncharitably, we can view optical character recognition (OCR) as a device that garbles perfectly good katakana sequences. Typical confusions made by our commercial OCR system include ~ for ~-', \u00a2-for -)', T for 7, and 7 for 7\". To generate pre-OCR text, we collected 19,500 characters worth of katakana words, stored them in a file, and printed them out. To generate post-OCR text, we OCR'd the printouts. We then ran the EM Mgorithm to determine symbol-mapping (\"garbling\") probabilities. Here is part of that table: This model outputs a superset of the 81 katakana symbols, including spurious quote marks, alphabetic symbols, and the numeral 7.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Katakana to OCR",
                "sec_num": "3.5"
            },
            {
                "text": "We can now use the models to do a sample backtransliteration. We start with a katakana phrase as observed by OCR. We then serially compose it with the models, in reverse order. Each intermediate stage is a WFSA that encodes many possibilities. The final stage contains all back-transliterations suggested by the models, and we finally extract the best one.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Example",
                "sec_num": "4"
            },
            {
                "text": "We start with the masutaazutoonamento problem from Section 1. Our OCR observes:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Example",
                "sec_num": "4"
            },
            {
                "text": "~ x ~,--;~\u00b0 1. ---/-j :/ 1.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Example",
                "sec_num": "4"
            },
            {
                "text": "This string has two recognition errors: ~' (ku) for $ (ta), and \u00a2-(ch\u00a3) for \"3-(na). We turn the string into a chained 12-state/ll-arc WFSA and compose it with the P(k[o) model. This yields a fatter 12-state/15-arc WFSA, which accepts the correct spelling at a lower probability. Next comes the P(jlk) model, which produces a 28-state/31-arc WFSA whose highest-scoring sequence is: mas ut aazut o o ch im ent o Next comes P(elj ), yielding a 62-state/241-arc WFSA whose best sequence is:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Example",
                "sec_num": "4"
            },
            {
                "text": "Next to last comes P(wle), which results in a 2982state/4601-arc WFSA whose best sequence (out of myriads) is:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "M AE S T AE AE DH UH T AO AO CH IH M EH N T AO",
                "sec_num": null
            },
            {
                "text": "This English string is closest phonetically to the Japanese, but we are willing to trade phonetic proximity for more sensical English; we restore this WFSA by composing it with P(w) and extract the best translation: masters tournament (Other Section 1 examples are translated correctly as earth day and robert scan leonard.)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "masters tone am ent awe",
                "sec_num": null
            },
            {
                "text": "We have performed two large-scale experiments, one using a full-language P(w) model, and one using a personal name language model. In the first experiment, we extracted 1449 unique katakana phrases from a corpus of 100 short news articles. Of these, 222 were missing from an online 100,000-entry bilingual dictionary. We backtransliterated these 222 phrases. Many of the translations are perfect: technical program, sez scandal, omaha beach, new york times, ramon diaz. Others are close: tanya harding, nickel simpson, danger washington, world cap. Some miss the mark: nancy care again, plus occur, patriot miss real. While it is difficult to judge overall accuracy--some of the phases are onomatopoetic, and others are simply too hard even for good human translators--it is easier to identify system weaknesses, and most of these lie in the P(w) model. For example, nancy kerrigan should be preferred over nancy care again.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiments",
                "sec_num": "5"
            },
            {
                "text": "In a second experiment, we took katakana versions of the names of 100 U.S. politicians, e.g.: -Jm :/. 7' =--(jyon.buroo), T~/~ . ~'0' I\" (a.rhonsu.dama~;'\u00a2o), and \"~'4 3' \u2022 ~7,f :/ (maiku.de~ain). We back-transliterated these by machine and asked four human subjects to do the same. These subjects were native English speakers and news-aware: we gave them brief instructions, examples, and hints. The results were as follows: There is room for improvement on both sides. Being English speakers, the human subjects were good at English name spelling and U.S. politics, but not at Japanese phonetics. A native Japanese speaker might be expert at the latter but not the former. People who are expert in all of these areas, however, are rare.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiments",
                "sec_num": "5"
            },
            {
                "text": "On the automatic side. many errors can be corrected. A first-name/last-name model would rank richard bryan more highly than richard brian. A bigram model would prefer orren hatch over olin hatch. Other errors are due to unigram training problems, or more rarely, incorrect or brittle phonetic models. For example, \"Long\" occurs much more often than \"R.on\" in newspaper text, and our word selection does not exclude phrases like \"Long Island.\" So we get long wyden instead of ton wyden. Rare errors are due to incorrect or brittle phonetic models.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiments",
                "sec_num": "5"
            },
            {
                "text": "Still the machine's performance is impressive. When word separators (,) are removed from the katakana phrases, rendering the task exceedingly difficult for people, the machine's performance is unchanged. When we use OCR. 7% of katakana tokens are mis-recognized, affecting 50% of test strings, but accuracy only drops from 64% to 52%.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiments",
                "sec_num": "5"
            },
            {
                "text": "We have presented a method for automatic backtransliteration which, while far from perfect, is highly competitive. It also achieves the objectives outlined in Section 1. It ports easily to new language pairs; the P(w) and P(e[w) models are entirely reusable, while other models are learned automatically. It is robust against OCR noise, in a rare example of high-level language processing being useful (necessary, even) in improving low-level OCK. We plan to replace our shortest-path extraction algorithm with one of the recently developed kshortest path algorithms (Eppstein, 1994) . We will then return a ranked list of the k best translations for subsequent contextual disambiguation, either by machine or as part of an interactive man-machine system. We also plan to explore probabilistic models for Arabic/English transliteration. Simply identifying which Arabic words to transliterate is a difficult task in itself; and while Japanese tends to insert extra vowel sounds, Arabic is usually written without any (short) vowels. Finally, it should also be possible to embed our phonetic shift model P(jle) inside a speech recognizer, to help adjust for a heavy Japanese accent, although we have not experimented in this area.",
                "cite_spans": [
                    {
                        "start": 567,
                        "end": 583,
                        "text": "(Eppstein, 1994)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "6"
            }
        ],
        "back_matter": [
            {
                "text": "We would like to thank Alton Earl Ingram, Yolanda Gil, Bonnie Glover-Stalls, Richard Whitney, and Kenji Yamada for their helpful comments. We would",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgments",
                "sec_num": null
            },
            {
                "text": "also like to thank our sponsors at the Department of Defense.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "annex",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Algorithms for Arabic name transliteration",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Arbabi",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [
                            "M"
                        ],
                        "last": "Fischthal",
                        "suffix": ""
                    },
                    {
                        "first": "V",
                        "middle": [
                            "C"
                        ],
                        "last": "Cheng",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Bart",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "IBM J. Res. Develop",
                "volume": "38",
                "issue": "2",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Arbabi, S. M. Fischthal, and V. C. Cheng andd E. Bart. 1994. Algorithms for Arabic name transliteration. IBM J. Res. Develop., 38(2).",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "An inequality and associated maximization technique in statistical estimation ofprobabilistic functions of a Markov process",
                "authors": [
                    {
                        "first": "L",
                        "middle": [
                            "E"
                        ],
                        "last": "Baum",
                        "suffix": ""
                    }
                ],
                "year": 1972,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "L. E. Baum. 1972. An inequality and associated maximization technique in statistical estimation ofprobabilistic functions of a Markov process. In- equalities, 3.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "A note on two problems in connexion with graphs",
                "authors": [
                    {
                        "first": "E",
                        "middle": [
                            "W"
                        ],
                        "last": "Dijkstra",
                        "suffix": ""
                    }
                ],
                "year": 1959,
                "venue": "Numerische Malhematik",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "E. W. Dijkstra. 1959. A note on two problems in connexion with graphs. Numerische Malhematik, 1.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Finding the k shortest paths",
                "authors": [
                    {
                        "first": "David",
                        "middle": [],
                        "last": "Eppstein",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Proc. 35th Syrup. Foundations of Computer Science",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "David Eppstein. 1994. Finding the k shortest paths. In Proc. 35th Syrup. Foundations of Computer Science. IEEE.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Reading Japanese",
                "authors": [
                    {
                        "first": "E",
                        "middle": [
                            "H"
                        ],
                        "last": "Jorden",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [
                            "I"
                        ],
                        "last": "Chaplin",
                        "suffix": ""
                    }
                ],
                "year": 1976,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "E. H. Jorden and H. I. Chaplin. 1976. Reading Japanese. Yale University Press, New Haven.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Speech recognition by composition of weighted finite automata. In preprint",
                "authors": [
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Pereira",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Riley",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "F. Pereira and M. Riley. 1996. Speech recognition by composition of weighted finite automata. In preprint, cmp-lg/9603001.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Weighted rational transductions and their application to human language processing",
                "authors": [
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Pereira",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Riley",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Sproat",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Proe. ARPA Human Language Technology Workshop",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "F. Pereira, M. Riley, and R. Sproat. 1994. Weighted rational transductions and their application to hu- man language processing. In Proe. ARPA Human Language Technology Workshop.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "The automatic component of the LINGSTAT machine-aided translation system",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Yamron",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Cant",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Demedts",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Dietzel",
                        "suffix": ""
                    },
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "Ito",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Proc. ARPA Workshop on Human Language Technology",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Yamron, J. Cant, A. Demedts, T. Dietzel, and Y. Ito. 1994. The automatic component of the LINGSTAT machine-aided translation sys- tem. In Proc. ARPA Workshop on Human Lan- guage Technology.",
                "links": null
            }
        },
        "ref_entries": {}
    }
}