File size: 61,744 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
{
    "paper_id": "P94-1024",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T09:18:58.493387Z"
    },
    "title": "A MARKOV LANGUAGE LEARNING MODEL FOR FINITE PARAMETER SPACES",
    "authors": [
        {
            "first": "Partha",
            "middle": [],
            "last": "Niyogi",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Massachusetts Institute of Technology",
                "location": {
                    "postCode": "E25-201, 02139",
                    "settlement": "Cambridge",
                    "region": "MA",
                    "country": "USA"
                }
            },
            "email": ""
        },
        {
            "first": "Robert",
            "middle": [
                "C"
            ],
            "last": "Berwick",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Massachusetts Institute of Technology",
                "location": {
                    "postCode": "E25-201, 02139",
                    "settlement": "Cambridge",
                    "region": "MA",
                    "country": "USA"
                }
            },
            "email": "berwick@ai.nfit.edu"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "This paper shows how to formally characterize language learning in a finite parameter space as a Markov structure, hnportant new language learning results follow directly: explicitly calculated sample complexity learning times under different input distribution assumptions (including CHILDES database language input) and learning regimes. We also briefly describe a new way to formally model (rapid) diachronic syntax change.",
    "pdf_parse": {
        "paper_id": "P94-1024",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "This paper shows how to formally characterize language learning in a finite parameter space as a Markov structure, hnportant new language learning results follow directly: explicitly calculated sample complexity learning times under different input distribution assumptions (including CHILDES database language input) and learning regimes. We also briefly describe a new way to formally model (rapid) diachronic syntax change.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Recently, several researchers, including Gibson and Wexler (1994) , henceforth GW, Dresher and Kaye (1990) ; and Clark and Roberts (1993) have modeled language learning in a (finite) space whose grammars are characterized by a finite number of parameters or nlength Boolean-valued vectors. Many current linguistic theories now employ such parametric models explicitly or in spirit, including Lexical-Functional Grammar and versions of HPSG, besides GB variants. With all such models, key questions about sample complexity, convergence time, and alternative modeling assumptions are difficult to assess without a precise mathematical formalization. Previous research has usually addressed only the question of convergence in the limit without probing the equally important question of sample complexity: it is of not much use that a learner can acquire a language if sample complexity is extraordinarily high, hence psychologically implausible. This remains a relatively undeveloped area of language learning theory. The current paper aims to fill that gap. We choose as a starting point the GW Triggering Learning Algorithm (TLA). Our central result is that the performance of this algorithm and others like it is completely modeled by a Markov chain. We explore the basic computational consequences of this, including some surprising results about sample complexity and convergence time, the dominance of random walk over gradient ascent, and the applicability of these results to actual child language acquisition and possibly language change.",
                "cite_spans": [
                    {
                        "start": 41,
                        "end": 65,
                        "text": "Gibson and Wexler (1994)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 83,
                        "end": 106,
                        "text": "Dresher and Kaye (1990)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 113,
                        "end": 137,
                        "text": "Clark and Roberts (1993)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "BACKGROUND MOTIVATION: TRIGGERS AND LANGUAGE ACQUISITION",
                "sec_num": null
            },
            {
                "text": "Background. Following Gold (1967) the basic framework is that of identification in the limit. We assume some familiarity with Gold's assumptions. The learner receives an (infinite) sequence of (positive) example sentences from some target language. After each, the learner either (i) stays in the same state; or (ii) moves to a new state (change its parameter settings). If after some finite number of examples the learner converges to the correct target language and never changes its guess, then it has correctly identified the target language in the limit; otherwise, it fails.",
                "cite_spans": [
                    {
                        "start": 22,
                        "end": 33,
                        "text": "Gold (1967)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "BACKGROUND MOTIVATION: TRIGGERS AND LANGUAGE ACQUISITION",
                "sec_num": null
            },
            {
                "text": "In the GW model (and others) the learner obeys two additional fundamental constraints: (1) the single.value constraint--the learner can change only 1 parameter value each step; and (2) the greediness constraint--if the learner is given a positive example it cannot recognize and changes one parameter value, finding that it can accept the example, then the learner retains that new value. The TLA essentially simulates this; see Gibson and Wexler (1994) for details.",
                "cite_spans": [
                    {
                        "start": 429,
                        "end": 453,
                        "text": "Gibson and Wexler (1994)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "BACKGROUND MOTIVATION: TRIGGERS AND LANGUAGE ACQUISITION",
                "sec_num": null
            },
            {
                "text": "Previous parameter models leave open key questions addressable by a more precise formalization as a Markov chain. The correspondence is direct. Each point i in the Markov space is a possible parameter setting. Transitions between states stand for probabilities b that the learner will move from hypothesis state i to state j. As we show below, given a distribution over L(G), we can calculate the actual b's themselves. Thus, we can picture the TLA learning space as a directed, labeled graph V with 2 n vertices. See figure 1 for an example in a 3-parameter system. 1 We can now use Markov theory to describe TLA parameter spaces, as in lsaacson and 1GW construct an identical transition diagram in the description of their computer program for calculating local maxima. However, this diagram is not explicitly presented as a Markov structure and does not include transition probabilities.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "THE MARKOV FORMULATION",
                "sec_num": null
            },
            {
                "text": "Madsen (1976) . By the single value hypothesis, the system can only move 1 Hamming bit at a time, either toward the target language or 1 bit away. Surface strings can force the learner from one hypothesis state to another. For instance, if state i corresponds to a grammar that generates a language that is a proper subset of another grammar hypothesis j, there can never be a transition from j to i, and there must be one from i to j. Once we reach the target grammar there is nothing that can move the learner from this state, since all remaining positive evidence will not cause the learner to change its hypothesis: an Absorbing State (AS) in the Markov literature. Clearly, one can conclude at once the following important learnability result:",
                "cite_spans": [
                    {
                        "start": 7,
                        "end": 13,
                        "text": "(1976)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "THE MARKOV FORMULATION",
                "sec_num": null
            },
            {
                "text": "Theorem 1 Given a Markov chain C corresponding to a GW TLA learner, 3 exactly 1 AS (corresponding to the target grammar/language) iff C is learnable.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "THE MARKOV FORMULATION",
                "sec_num": null
            },
            {
                "text": "Proof. \u00a2::. By assumption, C is learnable. Now assume for sake of contradiction that there is not exactly one AS. Then there must be either 0 AS or > 1 AS. In the first case, by the definition of an absorbing state, there is no hypothesis in which the learner will remain forever. Therefore C is not learnable, a contradiction. In the second case, without loss of generality, assume there are exactly two absorbing states, the first S corresponding to the target parameter setting, and the second S ~ corresponding to some other setting. By the definition of an absorbing state, in the limit C will with some nonzero probability enter S I, and never exit S I. Then C is not learnable, a contradiction. Hence our assumption that there is not exactly 1 AS must be false.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "THE MARKOV FORMULATION",
                "sec_num": null
            },
            {
                "text": "=\u00a2.. Assume that there exists exactly 1 AS i in the Markov chain M. Then, by the definition of an absorbing state, after some number of steps n, no matter what the starting state, M will end up in state i, corresponding to the target grammar. | Corollary 0.1 Given a Markov chain corresponding to a (finite) family of grammars in a G W learning system, if there exist 2 or more AS, then that family is not learnable.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "THE MARKOV FORMULATION",
                "sec_num": null
            },
            {
                "text": "We now derive the transition probabilities for the Markov TLA structure, the key to establishing sample complexity results. Let the target language L~ be L~ = {sl, s2, s3, ...} and P a probability distribution on these strings. Suppose the learner is in a state corresponding to language Ls. With probability P(sj), it receives a string sj. There are two cases given current parameter settings.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DERIVATION OF TRANSITION PROBABILITIES FOR THE MARKOV TLA STRUCTURE",
                "sec_num": null
            },
            {
                "text": "Case I. The learner can syntactically analyze the received string sj. Then parameter values are unchanged. This is so only when sj \u2022 L~. The probability of remaining in the state s is P(sj).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DERIVATION OF TRANSITION PROBABILITIES FOR THE MARKOV TLA STRUCTURE",
                "sec_num": null
            },
            {
                "text": "Case II. The learner cannot syntactically analyze the string. Then sj ~ Ls; the learner is in state s, and has n neighboring states (Hamming distance of 1). The learner picks one of these uniformly at random. If nj of these neighboring states correspond to languages which contain sj and the learner picks any one of them (with probability nj/n), it stays in that state. If the learner picks any of the other states (with probability ( n -nj)/n) then it remains in state s. Note that nj could take values between 0 and n. Thus the probability that the learner remains in state s is P(sj)(( n -nj )/n).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DERIVATION OF TRANSITION PROBABILITIES FOR THE MARKOV TLA STRUCTURE",
                "sec_num": null
            },
            {
                "text": "The probability of moving to each of the other nj states is P(sj)(nj/n).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DERIVATION OF TRANSITION PROBABILITIES FOR THE MARKOV TLA STRUCTURE",
                "sec_num": null
            },
            {
                "text": "The probability that the learner will remain in its original state s is the sum of the probabilities of these two cases: ~,jEL, P(sj) + E,jCL,(1 -nj/n)P(sj).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DERIVATION OF TRANSITION PROBABILITIES FOR THE MARKOV TLA STRUCTURE",
                "sec_num": null
            },
            {
                "text": "To compute the transition probability from s to k, note that this transition will occur with probability 1/n for all the strings sj E Lk but not in L~. These strings occur with probability P(sj) each and so the transition probability is: , jeL, , , j\u00a2L, , , jeLk (1/n) ",
                "cite_spans": [
                    {
                        "start": 238,
                        "end": 239,
                        "text": ",",
                        "ref_id": null
                    },
                    {
                        "start": 240,
                        "end": 244,
                        "text": "jeL,",
                        "ref_id": null
                    },
                    {
                        "start": 245,
                        "end": 246,
                        "text": ",",
                        "ref_id": null
                    },
                    {
                        "start": 247,
                        "end": 248,
                        "text": ",",
                        "ref_id": null
                    },
                    {
                        "start": 249,
                        "end": 253,
                        "text": "j\u00a2L,",
                        "ref_id": null
                    },
                    {
                        "start": 254,
                        "end": 255,
                        "text": ",",
                        "ref_id": null
                    },
                    {
                        "start": 256,
                        "end": 257,
                        "text": ",",
                        "ref_id": null
                    },
                    {
                        "start": 258,
                        "end": 268,
                        "text": "jeLk (1/n)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DERIVATION OF TRANSITION PROBABILITIES FOR THE MARKOV TLA STRUCTURE",
                "sec_num": null
            },
            {
                "text": "P[s ~ k] = ~",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DERIVATION OF TRANSITION PROBABILITIES FOR THE MARKOV TLA STRUCTURE",
                "sec_num": null
            },
            {
                "text": "P(si) \u2022 Summing over all strings sj E ( Lt N Lk ) \\ L, (set dif- ference) it is easy to see that sj \u2022 ( Lt N Lk ) \\ Ls \u00a2~ sj \u2022 (L, N nk) \\ (L, n Ls). Rewriting, we have P[s ---* k] = ~,je(L,nLk)\\(L,nL.)(1/n)P(sj)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DERIVATION OF TRANSITION PROBABILITIES FOR THE MARKOV TLA STRUCTURE",
                "sec_num": null
            },
            {
                "text": ". Now we can compute the transition probabilities between any two states. Thus the self-transition probability can be given as,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DERIVATION OF TRANSITION PROBABILITIES FOR THE MARKOV TLA STRUCTURE",
                "sec_num": null
            },
            {
                "text": "P[s --, s] = 1-~-'~ k is a neighboring state of, P[s ---, k].",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DERIVATION OF TRANSITION PROBABILITIES FOR THE MARKOV TLA STRUCTURE",
                "sec_num": null
            },
            {
                "text": "Consider the 3-parameter natural language system described by Gibson and Wexler (1994) , designed to cover basic word orders (X-bar structures) plus the verbsecond phenomena of Germanic languages, lts binary parameters are: (1) Spec(ifier) initial (0) or final (1);",
                "cite_spans": [
                    {
                        "start": 62,
                        "end": 86,
                        "text": "Gibson and Wexler (1994)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Example.",
                "sec_num": null
            },
            {
                "text": "(2) Compl(ement) initial (0) or final (1); and Verb Second (V2) does not exist (0) or does exist (l). Possible \"words\" in this language include S(ubject), V(erb), O(bject), D(irect) O(bject), Adv(erb) phrase, and so forth. Given these alternatives, Gibson and Wexler (1994) show that there are 12 possible surface strings for each (-V2) grammar and 18 possible surface strings for each (+V2) grammar, restricted to unembedded or \"degree-0\" examples for reasons of psychological plausibility (see Gibson and Wexler for discussion). For instance, the parameter setting [0 1 0]= Specifier initial, Complement final, and -V2, works out to the possible basic English surface phrase order of Subject-Verb-Object (SVO).",
                "cite_spans": [
                    {
                        "start": 249,
                        "end": 273,
                        "text": "Gibson and Wexler (1994)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Example.",
                "sec_num": null
            },
            {
                "text": "As in figure 1 below, suppose the SVO (\"English\", setting #5=[0 1 0]) is the target grammar. The figure's shaded rings represent increasing Hamming distances from the target. Each labeled circle is a Markov state. Surrounding the bulls-eye target are the 3 other parameter arrays that differ from [0 1 0] by one binary digit: e.g., [0, 0, 0], or Spec-first, Comp-first, -V2, basic order SOV or \"Japanese\". Around it are the three settings that differ from the target by exactly one binary digit; surrounding those are the 3 hypotheses two binary digits away from the target; the third ring out contains the single hypothesis that differs from the target by 3 binary digits.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Example.",
                "sec_num": null
            },
            {
                "text": "Plainly there are exactly 2 absorbing states in this Markov chain. One is the target grammar (by definition); the other is state 2. State 4 is also a sink that leads only to state 4 or state 2. GW call these two nontarget states local maxima because local gradient ascent will converge to these without reaching the desired target. Hence this system is not learnable. More importantly though, in addition to these local maxima, we show (see below) that there are other states (not detected in GW or described by Clark) from which the learner will never reach the target with (high) positive probability. Example: we show that if the learner starts at hypothesis VOS-V2, then with probability 0.33 in the limit, the learner will never converge to the SVO target. Crucially, we must use set differences to build the Markov figure straightforwardly, as indicated in the next section. In short, while it is possible to reach \"English\"from some source languages like \"Japanese,\" this is not possible for other starting points (exactly 4 other initial states).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Example.",
                "sec_num": null
            },
            {
                "text": "It is easy to imagine alternatives to the TLA that avoid the local maxima problem. As it stands the learner only changes a parameter setting if that change allows the learner to analyze the sentence it could not analyze before. If we relax this condition so that under unanalyzability the learner picks a random parameter to change, then the problem with local maxima disappears, because there can be only 1 Absorbing State, the target grammar. All other states have exit arcs. Thus, by our main theorem, such a system is learnable. We discuss other alternatives below.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Example.",
                "sec_num": null
            },
            {
                "text": "Perhaps the most significant advantage of the Markov chain formulation is that one can calculate the number of examples needed to acquire a language. Recall it is not enough to demonstrate convergence in the limit; learning must also be feasible. This is particularly true in the case of finite parameter spaces where convergence might not be as much of a problem as feasibility. Fortunately, given the transition matrix of a Markov chain, the problem of how long it takes to converge has been well studied.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CONVERGENCE TIMES FOR THE MARKOV CHAIN MODEL",
                "sec_num": null
            },
            {
                "text": "Consider the example in the previous section. The target grammar is SVO-V2 (grammar ~5 in GW). For simplicity, assume a uniform distribution on L5. Then the probability of a particular string sj in L5 is 1/12 because there are 12 (degree-0) strings in L~. We directly compute the transition matrix (0 entries elsewhere): States 2 and 5 are absorbing; thus this chain contains local maxima. Also, state 4 exits only to either itself or to state 2, hence is also a local maximum. If T is the transition probability matrix of a chain, then the corresponding i, j element of T m is the probability that the learner moves from state i to state j in m steps. For learnability to hold irrespective starting state, the probability of reaching state 5 should approach 1 as m goes to infinity, i.e., column 5 of T m should contain all l's, and O's elsewhere. Direct computation shows this to be false:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "SOME TRANSITION MATRICES AND THEIR CONVERGENCE CURVES",
                "sec_num": null
            },
            {
                "text": "L1 L2",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "SOME TRANSITION MATRICES AND THEIR CONVERGENCE CURVES",
                "sec_num": null
            },
            {
                "text": "L1 L2 L3 L4 Ls L6 L7 Ls L1 L2 L3 L4 L5 L6 L7 Ls ! 3 1 1 3 1",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "SOME TRANSITION MATRICES AND THEIR CONVERGENCE CURVES",
                "sec_num": null
            },
            {
                "text": "We see that if the learner starts out in states 2 or 4, it will certainly end up in state 2 in the limit. These two states correspond to local maxima grammars in the GW framework. We also see that if the learner starts in states 5 through 8, it will certainly converge in the limit to the target grammar.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "SOME TRANSITION MATRICES AND THEIR CONVERGENCE CURVES",
                "sec_num": null
            },
            {
                "text": "States 1 and 3 are much more interesting, and constitute new results about this parameterization. If the learner starts in either of these states, it reaches the target grammar with probability 2/3 and state 2 with probability 1/3. Thus, local maxima are not the only problem for parameter space learnability. To our knowledge, GW and other researchers have focused exclusively on local maxima. However, while it is true that states 2 and 4 will, with probability l, not converge to the target grammar, it is also true that states l and 3 will not converge to the target, with probability 1/3. Thus, the number of \"bad\" initial hypotheses is significantly larger than realized generally (in fact, 12 out of 56 of the possible source-target grammar pairs in the 3parameter system). This difference is again due to the new probabilistic framework introduced in the current paper. The quantity p(m) is easy to interpret. Thus p(m) = 0.95 rneans that for every initial state of the learner the probability that it is in the target state after m examples is at least 0.95. Further there is one initial state (the worst initial state with respect to the target, which in our example is Ls) for which this probability is exactly 0.95. We find on looking at the curve that the learner converges with high probability within 100 to 200 (degree-0) example sentences, a psychologically plausible number.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "SOME TRANSITION MATRICES AND THEIR CONVERGENCE CURVES",
                "sec_num": null
            },
            {
                "text": "We can now compare the convergence time of TLA to other algorithms. Perhaps the simplest is random walk: start the learner at a random point in the 3-parameter space, and then, if an input sentence cannot be analyzed, move 1-bit randomly from state to state. Note that this regime cannot suffer from the local maxima problem, since there is always some finite probability of exiting a non-target state.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "SOME TRANSITION MATRICES AND THEIR CONVERGENCE CURVES",
                "sec_num": null
            },
            {
                "text": "Computing the convergence curves for a random walk algorithm (RWA) on the 8 state space, we find that the convergence times are actually faster than for the TLA; see figure 2. Since the RWA is also superior in that it does not suffer from the same local maxima problem as TLA, the conceptual support for the TLA is by no means clear. Of course, it may be that the TLA has empirical support, in the sense of independent evidence that children do use this procedure (given by the pattern of their errors, etc.), but this evidence is lacking, as far as we know.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "SOME TRANSITION MATRICES AND THEIR CONVERGENCE CURVES",
                "sec_num": null
            },
            {
                "text": "In the earlier section we assumed that the data was uniformly distributed. We computed the transition matrix for a particular target language and showed that convergence times were of the order of 100-200 samples. In this section we show that the convergence times depend crucially upon the distribution. In particular we can choose a distribution which will make the convergence time as large as we want. Thus the distribution-free convergence time for the 3-parameter system is infinite.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DISTRIBUTIONAL ASSUMPTIONS: PART I",
                "sec_num": null
            },
            {
                "text": "As before, we consider the situation where the target language is L1. There are no local maxima problems for this choice. We begin by letting the distribution be parametrized by the variables a, b, c, d where",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DISTRIBUTIONAL ASSUMPTIONS: PART I",
                "sec_num": null
            },
            {
                "text": "a = P(A = {Adv(erb)Phrase V S}) b = P(B = {Adv V O S, Adv Aux V S}) c = P(C={AdvV O1 O2S, AdvAuxVOS, Adv Aux V O1 02 S}) d = P(D={VS})",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DISTRIBUTIONAL ASSUMPTIONS: PART I",
                "sec_num": null
            },
            {
                "text": "Thus each of the sets A, B, C and D contain different degree-O sentences of L1. Clearly the probability of the set L, \\{AUBUCUD} is 1-(a+b+c+d). The elements of each defined subset of La are equally likely with respect to each other. Setting positive values for a, b, c, d such that a + b + c + d < 1 now defines a unique probability for each degree(O) sentence in L1. For example, the probability of AdvVOS is b/2, the probability of AdvAuxVOS is c/3, that of VOS is (1-(a+b+c+d))/6 and so on; see figure 3. We can now obtain the transition matrix corresponding to this distribution. If we compare this matrix with that obtained with a uniform distribution on the sentences of La in the earlier section. This matrix has non-zero elements (transition probabilities) exactly where the earlier matrix had non-zero elements. However, the value of each transition probability now depends upon a,b, c, and d. In particular if we choose a = 1/12, b = 2/12, c = 3/12, d = 1/12 (this is equivalent to assuming a uniform distribution) we obtain the appropriate transition matrix as before. Looking more closely at the general transition matrix, we see that the transition probability from state 2 to state 1 is (1-(a+b+c))/3.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DISTRIBUTIONAL ASSUMPTIONS: PART I",
                "sec_num": null
            },
            {
                "text": "Clearly if we make a arbitrarily close to 1, then this transition probability is arbitrarily close to 0 so that the number of samples needed to converge can be made arbitrarily large. Thus choosing large values for a and small values for b will result in large convergence times.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DISTRIBUTIONAL ASSUMPTIONS: PART I",
                "sec_num": null
            },
            {
                "text": "This means that the sample complexity cannot be bounded in a distribution-free sense, because by choosing a highly unfavorable distribution the sample complexity can be made as high as possible. For example, we now give the convergence curves calculated for different choices of a, b,c, d. We see that for a uniform distribution the convergence occurs within 200 samples. By choosing a distribution with a = 0.9999 and b = c = d = 0.000001, the convergence time can be pushed up to as much as 50 million samples. (Of course, this distribution is presumably not psychologically realistic.) For a = 0.99, b = c = d = 0.0001, the sample complexity is on the order of 100,000 positive examples. Remark. The preceding calculation provides a worstcase convergence time. We can also calculate average convergence times using standard results from Markov chain theory (see Isaacson and Madsen, 1976) , as in table 2. These support our previous results.",
                "cite_spans": [
                    {
                        "start": 865,
                        "end": 891,
                        "text": "Isaacson and Madsen, 1976)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DISTRIBUTIONAL ASSUMPTIONS: PART I",
                "sec_num": null
            },
            {
                "text": "There are also well-known convergence theorems derived from a consideration of the eigenvalues of the transition matrix. We state without proof a convergence result for transition matrices stated in terms of its eigenvalues. Table 1 : Complete list of problem states, i.e., all combinations of starting grammar and target grammar which result in non-learnability of the target. The items marked with an asterisk are those listed in the original paper by Gibson and Wexler (1994) . ~l , . . . , .~n. Let x0 (an ndimensional vector) represent the starting probability of being in each state of the chain and r be the limiting probability of being in each state. Then after k transitions, the probability of being in each state x0T k can be described by",
                "cite_spans": [
                    {
                        "start": 454,
                        "end": 478,
                        "text": "Gibson and Wexler (1994)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 481,
                        "end": 485,
                        "text": "~l ,",
                        "ref_id": null
                    },
                    {
                        "start": 486,
                        "end": 493,
                        "text": ". . . ,",
                        "ref_id": null
                    },
                    {
                        "start": 494,
                        "end": 530,
                        "text": ".~n. Let x0 (an ndimensional vector)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 225,
                        "end": 232,
                        "text": "Table 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "DISTRIBUTIONAL ASSUMPTIONS: PART I",
                "sec_num": null
            },
            {
                "text": "Initial Grammar Target Grammar (svo-v2) (svo+v2)* (soy-v2) (SOV+V2)* (VOS-V2) (VOS+V2)* (OVS-V2) (ovs+v2)* (vos-v2) (VOS+V2)* (OVS-V2) (OVS+V2)* (OVS-V2) (ovs-v2) (ovs-v2) (ovs-v2) (svo-v2) (svo-v2) (svo-v2) (svo-v2) (sov-v2) (soy-v2) (soy-v2) (sov-v2)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DISTRIBUTIONAL ASSUMPTIONS: PART I",
                "sec_num": null
            },
            {
                "text": "n I1 x0T k-~ I1=11 ~ mfx0y~x, I1~< max I~,lk ~ II x0y,x, II 2<i<n i=1 - - i=2",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DISTRIBUTIONAL ASSUMPTIONS: PART I",
                "sec_num": null
            },
            {
                "text": "where the Yi's are the right eigenvectors ofT.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DISTRIBUTIONAL ASSUMPTIONS: PART I",
                "sec_num": null
            },
            {
                "text": "This theorem bounds the convergence rate to the limiting distribution 7r (in cases where there is only one absorption state, 7r will have a 1 corresponding to that state and 0 everywhere else). Using this result we bound the rates of convergence (in terms of number k of samples). It should be plain that these results could be used to establish standard errors and confidence bounds on convergence times in the usual way, another advantage of our new approach; see table 3.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DISTRIBUTIONAL ASSUMPTIONS: PART I",
                "sec_num": null
            },
            {
                "text": "The Markov model also allows us to easily determine the effect of distributional changes in the input. This is important for either computer or child acquisition studies, since we can use corpus distributions to compute convergence times in advance. For instance, it can be easily shown that convergence times depend crucially upon the distribution chosen (so in particular the TLA learning model does not follow any distributionfree PAC results). Specifically, we can choose a distribution that will make the convergence time as large as we want. For example, in the situation where the target language is L1, we can increase the convergence time arbitrarily by increasing the probability of the string {Adv(verb) V S}. By choosing a more unfavorable distribution the convergence time can be pushed up to as much as 50 million samples. While not surprising in itself, the specificity of the model allows us to be precise about the required sample size.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DISTRIBUTIONAL ASSUMPTIONS, PART II",
                "sec_num": null
            },
            {
                "text": "It is of interest to examine the fidelity of the model using real language distributions, namely, the CHILDES database. We have carried out preliminary direct experiments using the CHILDES caretaker English input to \"Nina\" and German input to \"Katrin\"; these consist of 43,612 and 632 sentences each, respectively. We note, following well-known results by psycholinguists, that both corpuses contain a much higher percentage of auxinversion and wh-questions than \"ordinary\" text (e.g., the LOB): 25,890 questions, and 11,775 wh-questions; 201 and 99 in the German corpus; but only 2,506 questions or 3.7% out of 53,495 LOB sentences.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CHILDES DISTRIBUTIONS",
                "sec_num": null
            },
            {
                "text": "To test convergence, an implemented system using a newer version of deMarcken's partial parser (see deMarcken, 1990) analyzed each degree-0 or degree-1 sentence as falling into one of the input patterns SVO, S Aux V, etc., as appropriate for the target language. Sentences not parsable into these patterns were discarded (presumably \"too complex\" in some sense following a tradition established by many other researchers; see Wexler and Culicover (1980) When run through the TLA, we discover that convergence falls roughly along the TLA convergence time displayed in figure 1-roughly 100 examples to asymptote. Thus, the feasibility of the basic model is confirmed by actual caretaker input, at least in this simple case, for both English and German. We are continuing to explore this model with other languages and distributional assumptions. However, there is one very important new complication that must be taken into account: we have found that one must (obviously) add patterns to cover the predominance of auxiliary inversions and wh-questions. However, that largely begs the question of whether the language is verb-second or not. Thus, as far as we can tell, we have not yet arrived at a satisfactory parameter-setting account for V2 acquisition.",
                "cite_spans": [
                    {
                        "start": 426,
                        "end": 453,
                        "text": "Wexler and Culicover (1980)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CHILDES DISTRIBUTIONS",
                "sec_num": null
            },
            {
                "text": "OF THE LEARNING MODEL AND EXTENSIONS The Markov formulation allows one to more easily explore algorithm variants. Besides the TLA, we consider the possible three simple learning algorithm regimes by dropping either or both of the Single Value and Greediness constraints. The key result is that ahnost any other regime works faster than local gradient ascent and avoids problems with local maxima. See figure 4 for a representative result. Thus, most interestingly, parameterized language learning appears particularly robust under algorithmic changes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "VARIANTS",
                "sec_num": null
            },
            {
                "text": "We remark here that the \"batch\" phonological parameter learning system of Dresher and Kaye (1990) is susceptible to a more direct PAC-type analysis, since their system sets parameters in an \"off-line\" mode. We state without proof some results that can be given in such cases.",
                "cite_spans": [
                    {
                        "start": 74,
                        "end": 97,
                        "text": "Dresher and Kaye (1990)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "EXTENSIONS, DIACHRONIC CHANGE AND CONCLUSIONS",
                "sec_num": null
            },
            {
                "text": "Learning scenario TLA (uniform) TLA(a = 0.99) TLA(a = 0.9999) RW ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "EXTENSIONS, DIACHRONIC CHANGE AND CONCLUSIONS",
                "sec_num": null
            },
            {
                "text": "o((1-lo-~) ~) o((1 -10-6) k) o(0.89 k) q ~, d d ,/ / i// /' //// L.~, 2' 0 4' o 6' 0 s'o 6o",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "EXTENSIONS, DIACHRONIC CHANGE AND CONCLUSIONS",
                "sec_num": null
            },
            {
                "text": "Number of samples Figure 4 : Convergence rates for different learning algorithms when L1 is the target language. The curve with the slowest rate (large dashes) represents the TLA, the one with the fastest rate (small dashes) is the Random Walk (RWA) with no greediness or single value constraints. Random walks with exactly one of the greediness and single value constraints have performances in between.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 18,
                        "end": 26,
                        "text": "Figure 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "EXTENSIONS, DIACHRONIC CHANGE AND CONCLUSIONS",
                "sec_num": null
            },
            {
                "text": "Theorem 3 If the learner draws more than M = 1 In(l/b) samples, then it will identify the tarln (l/(1-bt)) get with confidence greater than 1 -6. ( Here bt = P(Lt \\ Uj~tLj)).",
                "cite_spans": [
                    {
                        "start": 96,
                        "end": 106,
                        "text": "(l/(1-bt))",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "EXTENSIONS, DIACHRONIC CHANGE AND CONCLUSIONS",
                "sec_num": null
            },
            {
                "text": "Finally, the Markov model also points to an intriguing new model for syntactic change. One simply has to introduce two or more target languages that emit positive example strings with (probably different) frequencies: each corresponding to difference language sources. If the model is run as before, then there can be a large probability for a learner to converge to a state different from the highest frequency emitting target state: that is, the learner can acquire a different parameter setting, for example, a -V2 setting, even in a predominantly +V2 environment. This is of course one of the historical changes that occurred in the development of English. Space does not permit us to explore all the consequences of this new Markov model; we remark here that once again we can compute convergence times and stability under different distributions of target frequencies, combining it with the usual dynamical models of genotype fixation. In this case, the interesting result is that the TLA actually boosts diachronic change by orders of magnitude, since as observed earlier, it can permit the learner to arrive at a different convergent state even when there is just one target language emitter. In contrast, the local maxima targets are stable, and never undergo change. Whether this powerful \"boost\" effect plays a role in diachronic change remains a topic for future investigation. As far as we know, the possibility for formally modeling the kind of saltation indicated by the Markov model has not been noted previously and has only been vaguely stated by authors such as Lightfoot (1990) .",
                "cite_spans": [
                    {
                        "start": 1581,
                        "end": 1597,
                        "text": "Lightfoot (1990)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "EXTENSIONS, DIACHRONIC CHANGE AND CONCLUSIONS",
                "sec_num": null
            },
            {
                "text": "In conclusion, by introducing a formal mathematical model for language acquisition, we can provide rigorous results on parameter learning, algorithmic variation, sample complexity, and diachronic syntax change. These results are of interest for corpus-based acquisition and investigations of child acquisition, as well as pointing the way to a more rigorous bridge between modern computational learning theory and computational linguistics.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "EXTENSIONS, DIACHRONIC CHANGE AND CONCLUSIONS",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "We would like to thank Ken Wexler, Ted Gibson, and an anonymous ACL reviewer for valuable discussions and comments on this work. Dr. Leonardo Topa provided invaluable programming assistance. All residual errors are ours. This research is supported by NSF grant 9217041-ASC and ARPA under the HPCC program.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "ACKNOWLEDGMENTS",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "A Computational Model of Language Learnability and Language Change",
                "authors": [
                    {
                        "first": "Robin",
                        "middle": [],
                        "last": "Clark",
                        "suffix": ""
                    },
                    {
                        "first": "Ian",
                        "middle": [],
                        "last": "Roberts",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Linguistic Inquiry",
                "volume": "24",
                "issue": "2",
                "pages": "299--345",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Clark, Robin and Roberts, Ian (1993). \"A Compu- tational Model of Language Learnability and Lan- guage Change.\" Linguistic Inquiry, 24(2):299-345.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Parsing the LOB Corpus",
                "authors": [
                    {
                        "first": "Carl",
                        "middle": [],
                        "last": "Demarcken",
                        "suffix": ""
                    }
                ],
                "year": 1990,
                "venue": "Proceedings of the 25th Annual Meeting of the Association for Computational Linguistics. Pittsburgh, PA: Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "243--251",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "deMarcken, Carl (1990). \"Parsing the LOB Corpus.\" Proceedings of the 25th Annual Meeting of the As- sociation for Computational Linguistics. Pitts- burgh, PA: Association for Computational Linguis- tics, 243-251.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "A Computational Learning Model For Metrical Phonology",
                "authors": [
                    {
                        "first": "Elan",
                        "middle": [],
                        "last": "Dresher",
                        "suffix": ""
                    },
                    {
                        "first": "Jonathan",
                        "middle": [],
                        "last": "Kaye",
                        "suffix": ""
                    }
                ],
                "year": 1990,
                "venue": "Cognition",
                "volume": "34",
                "issue": "1",
                "pages": "137--195",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Dresher, Elan and Kaye, Jonathan (1990). \"A Compu- tational Learning Model For Metrical Phonology.\" Cognition, 34(1):137-195.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Triggers",
                "authors": [
                    {
                        "first": "Edward",
                        "middle": [],
                        "last": "Gibson",
                        "suffix": ""
                    },
                    {
                        "first": "Kenneth",
                        "middle": [],
                        "last": "Wexler",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Linguistic Inquiry",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gibson, Edward and Wexler, Kenneth (1994). \"Trig- gers.\" Linguistic Inquiry, to appear.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Language Identification in the Limit",
                "authors": [
                    {
                        "first": "E",
                        "middle": [
                            "M"
                        ],
                        "last": "Gold",
                        "suffix": ""
                    }
                ],
                "year": 1967,
                "venue": "Information and Control",
                "volume": "10",
                "issue": "4",
                "pages": "447--474",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gold, E.M. (1967). \"Language Identification in the Limit.\" Information and Control, 10(4): 447-474.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Markov Chains",
                "authors": [
                    {
                        "first": "David",
                        "middle": [],
                        "last": "Isaacson",
                        "suffix": ""
                    },
                    {
                        "first": "John",
                        "middle": [],
                        "last": "Masden",
                        "suffix": ""
                    }
                ],
                "year": 1976,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Isaacson, David and Masden, John (1976). Markov Chains. New York: John Wiley.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "How to Set Parameters. Cambridge",
                "authors": [
                    {
                        "first": "David",
                        "middle": [],
                        "last": "Lightfoot",
                        "suffix": ""
                    }
                ],
                "year": 1990,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Lightfoot, David (1990). How to Set Parameters. Cam- bridge, MA: MIT Press.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Formal Principles of Language Acquisition",
                "authors": [
                    {
                        "first": "Kenneth",
                        "middle": [],
                        "last": "Wexler",
                        "suffix": ""
                    },
                    {
                        "first": "Peter",
                        "middle": [],
                        "last": "Culicover",
                        "suffix": ""
                    }
                ],
                "year": 1980,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Wexler, Kenneth and Culicover, Peter (1980). Formal Principles of Language Acquisition. Cambridge, MA: MIT Press.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "The 8 parameter settings in the GW example, shown as a Markov structure, with transition probabilities omitted. Directed arrows between circles (states) represent possible nonzero (possible learner) transitions. The target grammar (in this case, number 5, setting [0 1 0]), lies at dead center."
            },
            "FIGREF1": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "shows a plot of the quantity p(m) = min{pi(rn)} as a function of m, the number of examples. Here Pi denotes the probability of being in state 1 at the end of m examples in the case where the learner started in state i. Naturally we want lim pi(m)= 1 and for this example this is indeed the case. The next figure shows a plot of the following quantity as a function of m, the number of examples. p(m) = min{pi(m)}"
            },
            "FIGREF2": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "Convergence as a function of number of examples. The probability of converging to the target state after m examples is plotted against m. The data from the target is assumed to be distributed uniformly over degree-0 sentences. The solid line represents TLA convergence times and the dotted line is a random walk learning algorithm (RWA) which actually converges fasler than the TLA in this case."
            },
            "FIGREF3": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "Rates of convergence for TLA with L1 as the target language for different distributions. The probability of converging to the target after m samples is plotted against log(m). The three curves show how unfavorable distributions can increase convergence times. The dashed nine assumes uniform distribution and is the same curve as plotted in figure 2."
            },
            "TABREF1": {
                "type_str": "table",
                "text": "~f f ...............",
                "content": "<table><tr><td/><td/><td colspan=\"2\">State of Initial Grammar</td><td>Probability of Not</td></tr><tr><td/><td/><td colspan=\"2\">(Markov Structure)</td><td>Converging to Target</td></tr><tr><td/><td/><td>Not Sink</td><td/><td>0.5</td></tr><tr><td/><td/><td>Sink</td><td/><td>1.0</td></tr><tr><td/><td/><td>Not Sink</td><td/><td>0.15</td></tr><tr><td/><td/><td>Sink</td><td/><td>1.0</td></tr><tr><td/><td/><td>Not Sink</td><td/><td>0.33</td></tr><tr><td/><td/><td>Sink</td><td/><td>1.0</td></tr><tr><td/><td/><td>Not Sink</td><td/><td>0.33</td></tr><tr><td/><td/><td>Not Sink</td><td/><td>1.0</td></tr><tr><td/><td/><td>Not Sink</td><td/><td>0.33</td></tr><tr><td/><td/><td>Sink</td><td/><td>1.0</td></tr><tr><td/><td/><td>Not Sink</td><td/><td>0.08</td></tr><tr><td/><td/><td>Sink</td><td/><td>1.0</td></tr><tr><td>~m</td><td/><td/><td/><td/></tr><tr><td>~o</td><td/><td/><td/><td/></tr><tr><td>-@ ;\u00b01 6</td><td>16o</td><td>260 Number of examples (m}</td><td>360</td><td>460</td></tr></table>",
                "html": null,
                "num": null
            },
            "TABREF2": {
                "type_str": "table",
                "text": "Mean and standard deviation convergence times to target 5 (English) given different distributions over the target language, and a uniform distribution over initial states. The first distribution is uniform over the target",
                "content": "<table><tr><td colspan=\"2\">language; the other distributions</td><td>alter the value of a as discussed in the main text.</td></tr><tr><td>Learning</td><td>Mean abs.</td><td>Std. Dev.</td></tr><tr><td>scenario</td><td>time</td><td>of abs. time</td></tr><tr><td>TEA (uniform)</td><td>34.8</td><td>22.3</td></tr><tr><td>TLA (a = 0.99)</td><td>45000</td><td>33000</td></tr><tr><td>TLA (a = 0.9999)</td><td>4.5 \u00d7 106</td><td>3.3 \u00d7 l06</td></tr><tr><td>RW</td><td>9.6</td><td>10.1</td></tr></table>",
                "html": null,
                "num": null
            },
            "TABREF4": {
                "type_str": "table",
                "text": "Convergence rates derived from eigenvalue calculations.",
                "content": "<table><tr><td>Rate of Convergence</td></tr><tr><td>0(0.94 ~)</td></tr></table>",
                "html": null,
                "num": null
            }
        }
    }
}