File size: 53,912 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
{
    "paper_id": "P95-1002",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T08:33:45.875800Z"
    },
    "title": "Automatic Induction of Finite State Transducers for Simple Phonological Rules",
    "authors": [
        {
            "first": "Daniel",
            "middle": [],
            "last": "Gildea",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of California at Berkeley",
                "location": {}
            },
            "email": "gildea@icsi.berkeley.edu"
        },
        {
            "first": "Daniel",
            "middle": [],
            "last": "Jurafsky",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of California at Berkeley",
                "location": {}
            },
            "email": "jurafsky@icsi.berkeley.edu"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "This paper presents a method for learning phonological rules from sample pairs of underlying and surface forms, without negative evidence. The learned rules are represented as finite state transducers that accept underlying forms as input and generate surface forms as output. The algorithm for learning them is an extension of the OSTIA algorithm for learning general subsequential finite state transducers. Although OSTIA is capable of learning arbitrary s.-f.s.t's in the limit, large dictionaries of actual English pronunciations did not give enough samples to correctly induce phonological rules. We then augmented OSTIA with two kinds of knowledge specific to natural language phonology, biases from \"universal grammar\". One bias is that underlying phones are often realized as phonetically similar or identical surface phones. The other biases phonological rules to apply across natural phonological classes. The additions helped in learning more compact, accurate, and general transducers than the unmodified OSTIA algorithm. An implementation of the algorithm successfully learns a number of English postlexical rules.",
    "pdf_parse": {
        "paper_id": "P95-1002",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "This paper presents a method for learning phonological rules from sample pairs of underlying and surface forms, without negative evidence. The learned rules are represented as finite state transducers that accept underlying forms as input and generate surface forms as output. The algorithm for learning them is an extension of the OSTIA algorithm for learning general subsequential finite state transducers. Although OSTIA is capable of learning arbitrary s.-f.s.t's in the limit, large dictionaries of actual English pronunciations did not give enough samples to correctly induce phonological rules. We then augmented OSTIA with two kinds of knowledge specific to natural language phonology, biases from \"universal grammar\". One bias is that underlying phones are often realized as phonetically similar or identical surface phones. The other biases phonological rules to apply across natural phonological classes. The additions helped in learning more compact, accurate, and general transducers than the unmodified OSTIA algorithm. An implementation of the algorithm successfully learns a number of English postlexical rules.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": ". first observed that traditional phonological rewrite rules can be expressed as regular relations if one accepts the constraint that no rule may reapply directly to its own output. This means that finite state transducers can be used to represent phonological rules, greatly simplifying the problem of parsing the output of phonological rules in order to obtain the underlying, lexical forms (Karttunen 1993) . In this paper we explore another consequence of FST models of phonological rules: their weaker generative capacity also makes them easier to learn. We describe our preliminary algorithm for learning rules from sample pairs of input and output strings, and the results we obtained.",
                "cite_spans": [
                    {
                        "start": 393,
                        "end": 409,
                        "text": "(Karttunen 1993)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In order to take advantage of recent work in transducer induction, we have chosen to represent rules as subsequential finite state transducers. Subsequential finite state transducers are a subtype of finite state transducers with the following properties:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "1. The transducer is deterministic, that is, there is only one arc leaving a given state for each input symbol.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "2. Each time a transition is made, exactly one symbol of the input string is consumed.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "3. A unique end of string symbol is introduced. At the end of each input string, the transducer makes an additional transition on the end of string symbol.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "4. All states are accepting. The length of the output strings associated with a subsequential transducer's transitions is not constrained.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The subsequential transducer for the English flapping rule in 1 is shown in Figure 1 ; an underlying t is realized as a flap after a stressed vowel and any number of r's, and before an unstressed vowel.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 76,
                        "end": 84,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "(1) t ~ dx / (r r* V 2 The OSTIA Algorithm",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Our phonological-rule induction algorithm is based on augmenting the Onward Subsequential Transducer Inference Algorithm (OSTIA) of Oncina et al. (1993) . This section outlines the OSTIA algorithm to provide background for the modifications that follow. OSTIA takes as input a training set of input-output pairs. The algorithm begins by constructing a tree transducer which covers all the training samples. The root of the tree is the transducer's initial state, and each leaf of the tree corresponds to the end of an input sample.",
                "cite_spans": [
                    {
                        "start": 132,
                        "end": 152,
                        "text": "Oncina et al. (1993)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The output symbols are placed as near the root of the tree as possible while avoiding conflicts in the output of a given arc. An example of the result of this initial tree construction is shown in Figure 2 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 197,
                        "end": 205,
                        "text": "Figure 2",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "At this point, the transducer covers all and only the strings of the training set. OSTIA now attempts to generalize the transducer, by merging some of its states together. For each pair of states (s, t) in the transducer, the algorithm will attempt to merge s with t, building a new #:t : Subsequential Transducer for English Flapping: Labels on arcs are of the form (input symbol):(output symbol). Labels with no colon indicate the same input and output symbols. 'V' indicates any unstressed vowel, \"v\" any stressed vowel, 'dx' a flap, and 'C' any consonant other than 't', 'r' or 'dx'. '#' is the end of string symbol. far. However, when trying to learn phonological rules from linguistic data, the necessary training set may not be available. In particular, systematic phonological constraints such as syllable structure may rule out the necessary strings. The algorithm does not have the language bias which would allow it to avoid linguistically unnatural transducers. b:bae ae:0 n:nd t:0 mml er .\"dxer #:t state with all of the incoming and outgoing transitions of s and f. The result of the first merging operation on the transducer of Figure 2 is shown in Figure 3 , and the end result of the OSTIA alogrithm in shown in Figure 4 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 1143,
                        "end": 1151,
                        "text": "Figure 2",
                        "ref_id": "FIGREF0"
                    },
                    {
                        "start": 1164,
                        "end": 1172,
                        "text": "Figure 3",
                        "ref_id": "FIGREF1"
                    },
                    {
                        "start": 1229,
                        "end": 1237,
                        "text": "Figure 4",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The OSTIA algorithm can be proven to learn any subsequential relation in the limit. That is, given an infinite sequence of valid input/output pairs, it will at some point derive the target transducer from the samples seen so For example, OSTIA's tendency to produce overly \"clumped\" transducers is illustrated by the arcs with out \"b ae\" and \"n d\" in the transducer in Figure 4 , or even Figure 2. OSTIA's default behavior is to emit the remainder of the output string for a transduction as soon as enough input symbols have been seen to uniquely identify the input string in the training set. This results in machines which may, seemingly at random, insert or delete sequences of four or five phonemes, something which is linguistically implausible. In addition, the incorrect distribution of output symbols prevents the optimal merging of states during the learning process, resulting in large and inaccurate transducers.",
                "cite_spans": [
                    {
                        "start": 388,
                        "end": 394,
                        "text": "Figure",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 369,
                        "end": 377,
                        "text": "Figure 4",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Problems Using OSTIA to learn Phonological Rules",
                "sec_num": "3"
            },
            {
                "text": "Another example of an unnatural generalization is shown in 4, the final transducer induced by OSTIA on the three word training set of Figure 2 . For example, the transducer of Figure 4 will insert an 'ae' after any 'b', and delete any 'ae' from the input. Perhaps worse, it will fail completely upon seeing any symbol other than 'er' or end-of-string after a 't'. While it might be unreasonable to expect any transducer trained on three samples to be perfect, the transducer of Figure 4 illustrates on a small scale how the OSTIA algorithm might be improved.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 134,
                        "end": 142,
                        "text": "Figure 2",
                        "ref_id": "FIGREF0"
                    },
                    {
                        "start": 176,
                        "end": 184,
                        "text": "Figure 4",
                        "ref_id": "FIGREF2"
                    },
                    {
                        "start": 478,
                        "end": 486,
                        "text": "Figure 4",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Problems Using OSTIA to learn Phonological Rules",
                "sec_num": "3"
            },
            {
                "text": "Similarly, if the OSTIA algorithm is training on cases of flapping in which the preceding environment is every stressed vowel but one, the algorithm has no way of knowing that it can generalize the environment to all stressed vowels. The algorithm needs knowledge about classes of phonemes to fill in accidental gaps in training data coverage.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Problems Using OSTIA to learn Phonological Rules",
                "sec_num": "3"
            },
            {
                "text": "Our first modification of OSTIA was to add the bias that, as a default, a phoneme is realized as itself, or as a similar phone. Our algorithm guesses the most probable phoneme to phoneme alignment between the input and output strings, and uses this information to distribute the output symbols among the arcs of the initial tree transducer. This is demonstrated for the word \"importance\" in The modification proceeds in two stages. First, a dynamic programming method is used to compute a correspondence between input and output phonemes. The alignment uses the algorithm of Wagner & Fischer (1974) , which calculates the insertions, deletions, and substitutions which make up the minimum edit distance between the underlying and surface strings. The costs of edit operations are based on phonetic features; we used 26 binary articulatory features. The cost function for substitutions was equal to the number of features changed between the two phonemes. The cost of insertions and deletions was 6 (roughly one quarter the maximum possible substitution cost). From the sequence of edit operations, a mapping of output phonemes to input phonemes is generated according to the following rules:",
                "cite_spans": [
                    {
                        "start": 575,
                        "end": 598,
                        "text": "Wagner & Fischer (1974)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Using Alignment Information",
                "sec_num": "4"
            },
            {
                "text": "\u2022 Any phoneme maps to an input phoneme for which it substitutes",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Using Alignment Information",
                "sec_num": "4"
            },
            {
                "text": "\u2022 Inserted phonemes map to the input phoneme immediately following the first substitution to the left of the inserted phoneme Second, when adding a new arc to the tree, all the unused output phonemes up to and including those which map to the arc's input phoneme become the new arc's output, and are now marked as having been used. When walking down branches of the tree to add a new input/output sample, the longest common prefix, n, of the sample's unused output and the output of each arc is calculated. The next n symbols of the transduction's output are now marked as having been used. If the length, l, of the arc's output string is greater than n, it is necessary to push back the last l -n symbols onto arcs further down the tree. A tree transducer constructed by this process is shown in Figure 7 , for comparison with the unaligned version in Figure 2 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 797,
                        "end": 805,
                        "text": "Figure 7",
                        "ref_id": null
                    },
                    {
                        "start": 853,
                        "end": 861,
                        "text": "Figure 2",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Using Alignment Information",
                "sec_num": "4"
            },
            {
                "text": "Results of our alignment algorithm are summarized in \u00a76. The denser distribution of output symbols resulting from the alignment constrains the merging of states early in the merging loop of the algorithm. Interestingly, preventing the wrong states from merging early on allows more merging later, and results in more compact transducers.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Using Alignment Information",
                "sec_num": "4"
            },
            {
                "text": "In order to allow OSTIA to make natural generalizations in its rules, we added a decision tree to each state of the machine, describing the behavior of that state. For example, the decision tree for state 2 of the machine in Figure  1 is shown in Figure 8 . Note that if the underlying phone is an unstressed vowel ([-cons,-stress]), the machine outputs a flap, followed by the underlying vowel, otherwise it outputs a 't' followed by the underlying phone. The decision trees describe the behavior of the machine at a given state in terms of the next input symbol by generalizing from the arcs leaving the state. The decision trees classify the arcs leaving each state based on the arc's input symbol into groups with the same behavior. The same 26 binary phonetic features used in calculating edit distance were used to classify phonemes in the decision trees. Thus the branches of the decision tree are labeled with phonetic feature values of the arc's input symbol, and the leaves of the tree correspond to the different behaviors. By an arc's behavior, we mean its output string considered as a function of its input phoneme, and its destination state. Two arcs are considered to have the same behavior if they agree each of the following:",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 225,
                        "end": 234,
                        "text": "Figure  1",
                        "ref_id": null
                    },
                    {
                        "start": 247,
                        "end": 255,
                        "text": "Figure 8",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Generalizing Behavior With Decision Trees",
                "sec_num": "5"
            },
            {
                "text": "\u2022 the index i of the output symbol corresponding to the input symbol (determined from the alignment procedure)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generalizing Behavior With Decision Trees",
                "sec_num": "5"
            },
            {
                "text": "\u2022 the difference of the phonetic feature vectors of the input symbol and symbol i of the output string \u2022 the suffix of the output string beginning at position i+1",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generalizing Behavior With Decision Trees",
                "sec_num": "5"
            },
            {
                "text": "\u2022 the destination state",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generalizing Behavior With Decision Trees",
                "sec_num": "5"
            },
            {
                "text": "After the process of merging states terminates, a decision tree is induced at each state to classify the outgoing arcs. Figure 9 shows a tree induced at the initial state of the transducer for flapping.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 120,
                        "end": 128,
                        "text": "Figure 9",
                        "ref_id": "FIGREF5"
                    }
                ],
                "eq_spans": [],
                "section": "Generalizing Behavior With Decision Trees",
                "sec_num": "5"
            },
            {
                "text": "Using phonetic features to build a decision tree guarantees that each leaf of the tree represents a natural class of phonemes, that is, a set of phonemes that can be described by specifying values for some subset of the phonetic features. Thus if we think of the transducer as a set of rewrite rules, we can now express the context of each rule as a regular expression of natural classes of preceding phonemes. stress j\"",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generalizing Behavior With Decision Trees",
                "sec_num": "5"
            },
            {
                "text": ".. Some induced transducers may need to be generalized even further, since the input transducer to the decision tree learning may have arcs which are incorrect merely because of accidental prior structure. Consider again the English flapping rule, which applies in the context of a preceding stressed vowel. Our algorithm first learned a transducer whose decision tree is shown in Figure 9 . In this transducer all arcs leaving state 0 correctly lead to the flapping state on stressed vowels, except for those stressed vowels which happen not to have occurred in the training set. For these unseen vowels (which consisted of the rounded diphthongs 'oy' and 'ow' with secondary stress), the transducers incorrectly returns to state 0. In this case, we wish the algorithm to make the generalization that the rule applies after all stressed vowels. This type of generalization can be accomplished by pruning the decision trees at each state of the machine. Pruning is done by stepping through each state of the machine and pruning as many decision nodes as possible at each state. The entire training set of transductions is tested after each branch is pruned. If any errors are found, the outcome of the pruned node's other child is tested. If errors are still found, the pruning operation is reversed. This process continues at the fringe of the decision tree until no more pruning is possible. Figure 10 shows the correct decision tree for flapping, obtained by pruning the tree in Figure 9 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 381,
                        "end": 389,
                        "text": "Figure 9",
                        "ref_id": "FIGREF5"
                    },
                    {
                        "start": 1394,
                        "end": 1403,
                        "text": "Figure 10",
                        "ref_id": null
                    },
                    {
                        "start": 1482,
                        "end": 1490,
                        "text": "Figure 9",
                        "ref_id": "FIGREF5"
                    }
                ],
                "eq_spans": [],
                "section": "Generalizing Behavior With Decision Trees",
                "sec_num": "5"
            },
            {
                "text": "The process of pruning the decision trees is complicated by the fact that the pruning operations allowed at one state depend on the status of the trees at each other state. Thus it is necessary to make several passes through the states, attempting additional pruning at each pass, until no more improvement is possible. In addition, testing each pruning operation against the entire training set is expensive, but in the case of synthetic data it gives the best results. For other applications it may be desirable to keep a cross validation set for this purpose.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generalizing Behavior With Decision Trees",
                "sec_num": "5"
            },
            {
                "text": "We tested our induction algorithm using a synthetic corpus of 99,279 input/output pairs. Each pair consisted of an underlying and a surface pronunciation of an individual word of English. The underlying string of each pair was taken from the phoneme-based CMU pronunciation dictionary. The surface string was generated from each underlying form by mechanically applying the one or more rules we were attempting to induce in each experiment.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Discussion",
                "sec_num": "6"
            },
            {
                "text": "In our first experiment, we applied the flapping rule in (2) to training corpora of between 6250 and 50,000 words. Figure 11 shows the transducer induced from As can be seen from Figure 12 , the use of alignment information in creating the initial tree transducer dramatically decreases the number of states in the learned transducer as well as the error performance on test data. The improved algorithm induced a flapping transducer with the minimum number of states with as few as 6250 samples. The use of alignment information also reduced the learning time; the additional cost of calculating alignments is more than compensated for by quicker merging of states.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 115,
                        "end": 124,
                        "text": "Figure 11",
                        "ref_id": "FIGREF6"
                    },
                    {
                        "start": 179,
                        "end": 188,
                        "text": "Figure 12",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Results and Discussion",
                "sec_num": "6"
            },
            {
                "text": "The algorithm also successfully induced transducers with the minimum number of states for the t-insertion and t-deletion rules below, given only 6250 samples.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Discussion",
                "sec_num": "6"
            },
            {
                "text": "In our second experiment, we applied our learning algorithm to a more difficult problem: inducing multiple rules at once. A data set was constructed by applying the t-insertion rule in (3), the t-deletion rule in (4) and the flapping rule already seen in (2) one after another. As is seen in Figure 13 , a transducer of minimum size (five states) was obtained with 12500 or more sample transductions.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 292,
                        "end": 301,
                        "text": "Figure 13",
                        "ref_id": "FIGREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Results and Discussion",
                "sec_num": "6"
            },
            {
                "text": "(",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Discussion",
                "sec_num": "6"
            },
            {
                "text": "3) 0 ---, t/n s (4) t---,O/n [+vocalic] -stress",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Discussion",
                "sec_num": "6"
            },
            {
                "text": "The effects of adding decision tress at each state of the machine for the composition of t-insertion, t-deletion and flapping are shown in Figure 14 . An examination of the few errors (three samples) in the induced flapping and three-rule transducers points out a flaw in our model. While the learned transducer correctly makes the generalization that flapping occurs after any stressed vowel, it does not flap after two stressed vowels in a row. This is possible because no samples containing two stressed vowels in a row (or separated by an 'r') immediately followed by a flap were in the training data. This transducer will flap a 't' after any odd number of stressed vowels, rather than simply after any stressed vowel. Such a rule seems quite unnatural phonologically, and makes for an odd context-sensitive rewrite rule. Any sort of simplest hypothesis criterion applied to a system of rewrite rules would prefer a rule such as",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 139,
                        "end": 148,
                        "text": "Figure 14",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Results and Discussion",
                "sec_num": "6"
            },
            {
                "text": "--+ V -+ v",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Discussion",
                "sec_num": "6"
            },
            {
                "text": "which is the equivalent of the transducer learned from the training data. This suggests that, the traditional formalism of context-sensitive rewrite rules contains implicit generalizations about how phonological rules usually work that are not present in the transducer system. We hope that further experimentation will lead to a way of expressing this language bias in our induction system. Johnson (1984) gives one of the first computational algorithms for phonological rule induction. His algorithm works for rules of the form",
                "cite_spans": [
                    {
                        "start": 392,
                        "end": 406,
                        "text": "Johnson (1984)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Discussion",
                "sec_num": "6"
            },
            {
                "text": "(5) a ---+ b/C",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "7"
            },
            {
                "text": "where C is the feature matrix of the segments around a. Johnson's algorithm sets up a system of constraint equations which C must satisfy, by considering both the positive contexts, i.e., all the contexts Ci in which a b occurs on the surface, as well as all the negative contexts Cj in which an a occurs on the surface. The set of all positive and negative contexts will not generally determine a unique rule, but will determine a set of possible rules. Touretzky et al. (1990) extended Johnson's insight by using the version spaces algorithm of Mitchell (1981) to induce phonological rules in their Many Maps architecture. Like Johnson's, their system looks at the underlying and surface realizations of single segments. For each segment, the system uses the version space algorithm to search for the proper statement of the context. Riley (1991) and Withgott & Chen (1993) first proposed a decision-tree approach to segmental mapping. A decision tree is induced for each phoneme, classifying possible realizations of the phoneme in terms of contextual factors such as stress and the surrounding phonemes. However, since the decision tree for each phoneme is learned separately, the the technique misses generalizations about the behavior of similar phonemes. In addition, no generalizations are made about similar context phonemes. In a transducer based formalism, generalizations about similar context phonemes naturally follow from generalizations about individual phonemes' behavior, as the context is represented by the current state of the machine, which in turn depends on the behavior of the machine on the previous phonemes.",
                "cite_spans": [
                    {
                        "start": 455,
                        "end": 478,
                        "text": "Touretzky et al. (1990)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 547,
                        "end": 562,
                        "text": "Mitchell (1981)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 836,
                        "end": 848,
                        "text": "Riley (1991)",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 853,
                        "end": 875,
                        "text": "Withgott & Chen (1993)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "7"
            },
            {
                "text": "We hope that our hybrid model will be more successful at learning long distance dependencies than the simple decision tree approach. To model long distance rules such as vowel harmony in a simple decision tree approach, one must add more distant phonemes to the features used to learn the decision tree. In a transducer, this information is represented in the current state of the transducer.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "7"
            },
            {
                "text": "Inferring finite state transducers seems to hold promise as a method for learning phonological rules. Both of our initial augmentations of OSTIA to bias it toward phonological naturalness improve performance. Using information on the alignment between input and output strings allows the algorithm to learn more compact, more accurate transducers. The addition of decision trees at each state of the resulting transducer further improves accuracy and results in phonologically more natural transducers. We believe that further and more integrated uses of phonological naturalness, such as generalizing across similar phenomena at different states of the transducer, interleaving the merging of states and generalization of transitions, and adding memory to the model of transduction, could help even more.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "8"
            },
            {
                "text": "Our current algorithm and most previous algorithms are designed for obligatory rules. These algorithms fall completely when faced with optional, probabilistic rules, such as flapping. This is the advantage of probabilistic approaches such as the Riley/Withgott approach. One area we hope to investigate is the generalization of our algorithm to probabilistic rules with probabilistic finitestate transducers, perhaps by augmenting PFST induction techniques such as Stolcke & Omohundro (1994) with insights from phonological naturalness.",
                "cite_spans": [
                    {
                        "start": 465,
                        "end": 491,
                        "text": "Stolcke & Omohundro (1994)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "8"
            },
            {
                "text": "Besides aiding in the development of a practical tool for learning phonological rules, our results point to the use of constraints from universal grammar as a strong factor in the machine and possibly human learning of natural language phonology.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "8"
            }
        ],
        "back_matter": [
            {
                "text": "Thanks to Jerry Feldman, Eric Fosler, Isabel Galiano-Ronda, Lauri Karttunen, Jose Oncina,Andreas Stolcke, and Gary Tajchman. This work was partially funded by ICSI.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgments",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "A statistical model for generating pronunciation networks",
                "authors": [
                    {
                        "first": "Michael",
                        "middle": [
                            "D"
                        ],
                        "last": "Riley",
                        "suffix": ""
                    }
                ],
                "year": 1991,
                "venue": "IEEE ICASSP-91",
                "volume": "",
                "issue": "",
                "pages": "737--740",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "RILEY, MICHAEL D. 1991. A statistical model for gener- ating pronunciation networks. In IEEE ICASSP-91, 737-740.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Best-first model merging for hidden Markov model induction",
                "authors": [
                    {
                        "first": "Andreas",
                        "middle": [],
                        "last": "Stolcke",
                        "suffix": ""
                    },
                    {
                        "first": "8\u00a2",
                        "middle": [],
                        "last": "Stephen Omohundro",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "STOLCKE, ANDREAS, 8\u00a2 STEPHEN OMOHUNDRO. 1994. Best-first model merging for hidden Markov model induction. Technical Report TR-94-003, Interna- tional Computer Science Institute, Berkeley, CA.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Phonological rule induction: An architectural solution",
                "authors": [
                    {
                        "first": "David",
                        "middle": [
                            "S"
                        ],
                        "last": "Touretzky",
                        "suffix": ""
                    },
                    {
                        "first": "&",
                        "middle": [],
                        "last": "Gillette Elvgren Iii",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [
                            "W Wheeler"
                        ],
                        "last": "Deirdre",
                        "suffix": ""
                    }
                ],
                "year": 1990,
                "venue": "Proceedings of the 12th Annual Conference of the Cognitive Science Society (COGSCI-90)",
                "volume": "",
                "issue": "",
                "pages": "348--355",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "TOURETZKY, DAVID S., GILLETTE ELVGREN III, & DEIRDRE W. WHEELER. 1990. Phonological rule induction: An architectural solution. In Proceed- ings of the 12th Annual Conference of the Cognitive Science Society (COGSCI-90), 348-355.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Journal of the Association for Computation Machinery 21",
                "authors": [
                    {
                        "first": "R",
                        "middle": [
                            "A"
                        ],
                        "last": "Wagner",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [
                            "J"
                        ],
                        "last": "Fischer",
                        "suffix": ""
                    }
                ],
                "year": 1974,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "168--173",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "WAGNER, R. A., & M. J. FISCHER. 1974. The string-to- string correction problem. Journal of the Associa- tion for Computation Machinery 21.168-173.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Computation Models of American Speech",
                "authors": [
                    {
                        "first": "M",
                        "middle": [
                            "M"
                        ],
                        "last": "Withgott",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "& E R",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Chen",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Center for the Study of Language and Information",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "WITHGOTT, M. M., & E R. CHEN. 1993. Computation Models of American Speech. Center for the Study of Language and Information.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "FormalAspects of Phonological Description",
                "authors": [
                    {
                        "first": "C",
                        "middle": [
                            "Douglas"
                        ],
                        "last": "Johnson",
                        "suffix": ""
                    }
                ],
                "year": 1972,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "JOHNSON, C. DOUGLAS. 1972. FormalAspects of Phono- logical Description. The Hague: Mouton.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "A discovery procedure for certain phonological rules",
                "authors": [
                    {
                        "first": "Mark",
                        "middle": [],
                        "last": "Johnson",
                        "suffix": ""
                    }
                ],
                "year": 1984,
                "venue": "Proceedings of the Tenth International Conference on Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "344--347",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "JOHNSON, MARK. 1984. A discovery procedure for certain phonological rules. In Proceedings of the Tenth International Conference on Computational Linguistics, 344-347, Stanford.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Finite-state constraints",
                "authors": [
                    {
                        "first": "Lauri",
                        "middle": [],
                        "last": "Karti'unen",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "The Last Phonological Rule",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "KARTI'UNEN, LAURI. 1993. Finite-state constraints. In The Last Phonological Rule, ed. by John Goldsmith. University of Chicago Press.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Generalization as search",
                "authors": [
                    {
                        "first": "Tom",
                        "middle": [
                            "M"
                        ],
                        "last": "Mitchell",
                        "suffix": ""
                    }
                ],
                "year": 1981,
                "venue": "Readings in Artificial Intelligence",
                "volume": "",
                "issue": "",
                "pages": "517--542",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "MITCHELL, TOM M. 1981. Generalization as search. In Readings in Artificial Intelligence, ed. by Bon- nie Lynn Webber & Nils J. Nilsson, 517-542. Los Altos: Moi'gan Kaufmann.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Learning subsequential transducers for pattern recognition tasks",
                "authors": [
                    {
                        "first": "Jo",
                        "middle": [
                            "$"
                        ],
                        "last": "Oncina",
                        "suffix": ""
                    },
                    {
                        "first": "Pedro",
                        "middle": [],
                        "last": "1~",
                        "suffix": ""
                    },
                    {
                        "first": "& Enrique",
                        "middle": [],
                        "last": "Garc[a",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Vidal",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence",
                "volume": "15",
                "issue": "",
                "pages": "448--458",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "ONCINA, JO$1~, PEDRO GARC[A, & ENRIQUE VIDAL. 1993. Learning subsequential transducers for pat- tern recognition tasks. IEEE Transactions on Pattern Analysis and Machine Intelligence 15.448-458.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "type_str": "figure",
                "text": "Onward Tree Transducer for \"bat\", \"batter\", and \"band\" with Flapping Applied   c",
                "num": null,
                "uris": null
            },
            "FIGREF1": {
                "type_str": "figure",
                "text": "Result of Merging States 0 and 1 ofFigure 2",
                "num": null,
                "uris": null
            },
            "FIGREF2": {
                "type_str": "figure",
                "text": "Final Result of Merging Process on Transducer fromFigure 2",
                "num": null,
                "uris": null
            },
            "FIGREF3": {
                "type_str": "figure",
                "text": "Figures 5 and 6. ih m p oal r t ah n s IIII /111 ih m p oal dx ah n t s Figure 5: Alignment of \"importance\" with flapping, rdeletion and t-insertion",
                "num": null,
                "uris": null
            },
            "FIGREF4": {
                "type_str": "figure",
                "text": "the prefix of length i -1 of the output string ~tInitial Tree Transducer Constructed with Alignment Information: Note that output symbols have been pushed back across state 3 during the : dx [ ], Destination State: 0 2: Output: t [ ], Destination State: 0 3: On end of string: Output: t, Destination State: 0 Example Decision Tree: This tree describes the behavior of State 2 of the transducer in Figure 1. [ ] in the output string indicates the arc's input symbol (with no features changed).",
                "num": null,
                "uris": null
            },
            "FIGREF5": {
                "type_str": "figure",
                "text": "Decision Tree Before Pruning: The initial state of the flapping transducer",
                "num": null,
                "uris": null
            },
            "FIGREF6": {
                "type_str": "figure",
                "text": "Flapping Transducer Induced from 50,000 Samples",
                "num": null,
                "uris": null
            },
            "FIGREF7": {
                "type_str": "figure",
                "text": "The Same Decision Tree After Pruning",
                "num": null,
                "uris": null
            },
            "FIGREF8": {
                "type_str": "figure",
                "text": "Results on Three Rules Composed 12,500 Training, 49,280 Test Figure 15 shows the final transducer induced from this corpus of 12,500 words with pruned decision trees. Three Rule Transducer Induced from 12,500 Samples",
                "num": null,
                "uris": null
            }
        }
    }
}