| { |
| "paper_id": "W19-0110", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T06:30:56.742200Z" |
| }, |
| "title": "Learning exceptionality and variation with lexically scaled MaxEnt \u21e4", |
| "authors": [ |
| { |
| "first": "Coral", |
| "middle": [], |
| "last": "Hughto", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Massachusetts Amherst", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Lamont", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Massachusetts Amherst", |
| "location": {} |
| }, |
| "email": "alamont@linguist.umass.edu" |
| }, |
| { |
| "first": "Brandon", |
| "middle": [], |
| "last": "Prickett", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Massachusetts Amherst", |
| "location": {} |
| }, |
| "email": "bprickett@linguist.umass.edu" |
| }, |
| { |
| "first": "Gaja", |
| "middle": [], |
| "last": "Jarosz", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Massachusetts Amherst", |
| "location": {} |
| }, |
| "email": "jarosz@linguist.umass.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "A growing body of research in phonology addresses the representation and learning of variable processes and exceptional, lexically conditioned processes. Linzen et al. (2013) present a MaxEnt model with additive lexical scales to account for data exhibiting both variation and exceptionality. In this paper, we implement a learning model for lexically scaled MaxEnt grammars which we show to be successful across a range of data containing patterns of variation and exceptionality. We also explore how the model's parameters and the rate of exceptionality in the data influence its performance and predictions for novel forms.", |
| "pdf_parse": { |
| "paper_id": "W19-0110", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "A growing body of research in phonology addresses the representation and learning of variable processes and exceptional, lexically conditioned processes. Linzen et al. (2013) present a MaxEnt model with additive lexical scales to account for data exhibiting both variation and exceptionality. In this paper, we implement a learning model for lexically scaled MaxEnt grammars which we show to be successful across a range of data containing patterns of variation and exceptionality. We also explore how the model's parameters and the rate of exceptionality in the data influence its performance and predictions for novel forms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "While phonological research often focuses on categorical generalizations, a growing body of research addresses the representation and learning of variable processes and exceptional processes, where application is lexically conditioned (see Coetzee and Pater (2011) and Pater (2010) for overviews). A few recent studies have modeled processes that exhibit both variation and exceptionality (Hayes and Londe, 2006; Pater et al., 2012; Linzen et al., 2013; Nazarov, 2018; Shih, 2018; Zymet, 2018) . Linzen et al. (2013) model co-existing exceptionality and variation in Russian using a Maximum En-\u21e4 We wish to thank the SCiL 2019 anonymous reviewers, the UMass Linguistics Sound Workshop, Brendan O'Connor, and Joe Pater for valuable comments. tropy (MaxEnt) grammar (Goldwater and Johnson, 2003) with additive, lexically specified scales. Russian contains a vowel alternation process that exhibits both variation and idiosyncratic lexical conditioning (exceptionality). Linzen et al. show that speakers apply this process variably and that its variation differs across lexical items. In their lexical scaling framework, each lexical item is associated with a vector of scales that are added to the general weights of the grammar's constraints. These summed weights are used to calculate the probability of the input's surface realization. This allows the likelihood of a phonological process to differ across morphemes, since the scales can modulate how constraints are weighted for different lexemes. While Linzen et al. (2013) show that a lexically scaled MaxEnt grammar can successfully represent Russian speakers' knowledge of a pattern that is both variable and exceptional, they do not show how such a grammar would be learned.", |
| "cite_spans": [ |
| { |
| "start": 240, |
| "end": 264, |
| "text": "Coetzee and Pater (2011)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 269, |
| "end": 281, |
| "text": "Pater (2010)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 389, |
| "end": 412, |
| "text": "(Hayes and Londe, 2006;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 413, |
| "end": 432, |
| "text": "Pater et al., 2012;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 433, |
| "end": 453, |
| "text": "Linzen et al., 2013;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 454, |
| "end": 468, |
| "text": "Nazarov, 2018;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 469, |
| "end": 480, |
| "text": "Shih, 2018;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 481, |
| "end": 493, |
| "text": "Zymet, 2018)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 496, |
| "end": 516, |
| "text": "Linzen et al. (2013)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 764, |
| "end": 793, |
| "text": "(Goldwater and Johnson, 2003)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 968, |
| "end": 981, |
| "text": "Linzen et al.", |
| "ref_id": null |
| }, |
| { |
| "start": 1506, |
| "end": 1526, |
| "text": "Linzen et al. (2013)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we introduce a model for learning lexically scaled MaxEnt grammars from data exhibiting both variation and exceptionality. 1 The primary challenge for formalizing learning in this framework is generalizing appropriately beyond the learning data and limiting the learner's reliance on lexical scales. Since every morpheme can potentially scale the weight of every constraint, there is potential for massively over-fitting the learning data and failing to generalize. We approach this challenge as a problem of feature selection and seek a learner that utilizes scales (i.e., assigning them nonzero weights) only when needed to account for lexical conditioning. We propose an objective function relying on an L1 (linear) prior ( \u00a72), rather than the more commonly used L2 (quadratic) prior ( \u00a74.1), to formalize these criteria.", |
| "cite_spans": [ |
| { |
| "start": 138, |
| "end": 139, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our approach differs in a number of ways from previous models for learning exceptionality and variation. We assume the learner must induce a weighting for general phonological constraints and make lexical conditioning choices without prior knowledge of which lexical items behave exceptionally (Allen and Becker, 2015; Becker and Gouskova, 2016) . Rather than splitting the learning of general phonological patterns and the learning of exceptions/classes into distinct learning phases (Nazarov, 2018; Shih, 2018) , or treating the learning of lexical conditioning as emergent from repeated exposure to the lexicon (Zuraw, 2000; Zuraw, 2010) , we seek to formally characterize the criteria that favor the desired balance of lexical sensitivity and generalization in a model that optimizes general weights and lexical conditioning in parallel. Our approach is most similar to Moore-Cantwell and Pater (2016) ; however, we argue for an L1 prior rather than an L2 prior ( \u00a74.1). We demonstrate the capacity of our model to learn variation and exceptionality using a variety of toy languages based on the Russian process mentioned above ( \u00a73).", |
| "cite_spans": [ |
| { |
| "start": 294, |
| "end": 318, |
| "text": "(Allen and Becker, 2015;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 319, |
| "end": 345, |
| "text": "Becker and Gouskova, 2016)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 485, |
| "end": 500, |
| "text": "(Nazarov, 2018;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 501, |
| "end": 512, |
| "text": "Shih, 2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 614, |
| "end": 627, |
| "text": "(Zuraw, 2000;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 628, |
| "end": 640, |
| "text": "Zuraw, 2010)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 874, |
| "end": 905, |
| "text": "Moore-Cantwell and Pater (2016)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We also explore the model's predictions for novel data, examining how the learner decides which patterns to treat as exceptional and which to generalize ( \u00a74). Previous behavioral investigations of speakers' productive knowledge of lexically conditioned (morpho-)phonological alternations have found that speakers extend statistical tendencies in the lexicon to novel forms (Zuraw, 2000; Ernestus and Baayen, 2003; Hayes and Londe, 2006; Hayes et al., 2009; Linzen et al., 2013; Becker and Gouskova, 2016) . In some cases, the absolute rates of application of a process in nonce forms closely follow rates observed in the lexicon, yielding so-called \"frequencymatching\" behavior (Hayes and Londe, 2006; Hayes et al., 2009; Zymet, 2018) . In other cases, however, rates of application of exceptional processes are systematically skewed lower as compared to the lexi-cal rates (Zuraw, 2000; Albright and Hayes, 2003; Ernestus and Baayen, 2003) . Under a variety of learning assumptions, frequency-matching behavior is not automatic. To better understand some of the factors that may play a role in these divergent findings, we examine the properties of the data distribution and parameters of the model that affect frequency-matching behavior on nonce forms.", |
| "cite_spans": [ |
| { |
| "start": 374, |
| "end": 387, |
| "text": "(Zuraw, 2000;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 388, |
| "end": 414, |
| "text": "Ernestus and Baayen, 2003;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 415, |
| "end": 437, |
| "text": "Hayes and Londe, 2006;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 438, |
| "end": 457, |
| "text": "Hayes et al., 2009;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 458, |
| "end": 478, |
| "text": "Linzen et al., 2013;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 479, |
| "end": 505, |
| "text": "Becker and Gouskova, 2016)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 679, |
| "end": 702, |
| "text": "(Hayes and Londe, 2006;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 703, |
| "end": 722, |
| "text": "Hayes et al., 2009;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 723, |
| "end": 735, |
| "text": "Zymet, 2018)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 875, |
| "end": 888, |
| "text": "(Zuraw, 2000;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 889, |
| "end": 914, |
| "text": "Albright and Hayes, 2003;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 915, |
| "end": 941, |
| "text": "Ernestus and Baayen, 2003)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2 Lexically scaled MaxEnt Linzen et al.'s (2013) scaled weights framework uses weighted constraints to represent probabilistic phonological patterns, and adds scales on those weights to represent lexicalized behavior for individual morphemes. In MaxEnt (Goldwater and Johnson, 2003) , the probability of some surface representation (SR), given a grammar and an underlying representation (UR), is calculated as in (1):", |
| "cite_spans": [ |
| { |
| "start": 26, |
| "end": 48, |
| "text": "Linzen et al.'s (2013)", |
| "ref_id": null |
| }, |
| { |
| "start": 253, |
| "end": 282, |
| "text": "(Goldwater and Johnson, 2003)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(i) = e H i P k2K i e H k", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Here H i is the harmony of a given (UR, SR) pair i, and K i is the set of candidates that share the same UR as i (including i itself). Typically, harmony is the weighted sum of a candidate's constraint violations (Goldwater and Johnson, 2003) , however in Linzen et al.'s (2013) framework, harmony is a function of a candidate's violations, the general weights, and the relevant scales, as shown formally in (2):", |
| "cite_spans": [ |
| { |
| "start": 213, |
| "end": 242, |
| "text": "(Goldwater and Johnson, 2003)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 256, |
| "end": 278, |
| "text": "Linzen et al.'s (2013)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "H i = X 2 (w + X m2\u00b5 i s m )(v i )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Here is the set of constraints, w is the general weight of constraint , \u00b5 i is the set of morphemes in the (UR, SR) pair i, s m is the scale that morpheme m has for constraint , and v i is the number of violations assigned to candidate i by constraint . The parameters of the model are the general constraint weights and the additive lexical scales. For succinctness, we refer to these simply as \"weights\" and \"scales\", respectively. Every morpheme is associated with a scale for every constraint that is added to that constraint's weight. This model is closely related to other approaches relying on additive scales (Boersma and Hayes, 2001 ; Coetzee and Kawahara, 2013; Hsu and Jesney, 2016) and multiplicative scales (Kimper, 2011) . However, in Linzen et al.'s framework, scaling is not restricted to faithfulness constraints or systematic factors (e.g., register, frequency): all constraints are available for lexical scaling by all morphemes. Using both weights and scales enables the model to represent lexicalized exceptions by employing the scales to modulate the effect of some constraints. We leave exploring the relationship between this approach and indexed constraints (Kraska-Szlenk, 1995; Pater, 1996) , a closely related framework, to future work.", |
| "cite_spans": [ |
| { |
| "start": 617, |
| "end": 641, |
| "text": "(Boersma and Hayes, 2001", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 720, |
| "end": 734, |
| "text": "(Kimper, 2011)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1183, |
| "end": 1204, |
| "text": "(Kraska-Szlenk, 1995;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 1205, |
| "end": 1217, |
| "text": "Pater, 1996)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We formalized learning as minimizing the objective function in (3), the sum of the negative log likelihood and an L1 prior on weights and scales. X", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "i log p(i)+C X 2 |w |+C X 2 X m2M |s m | (3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Here, M is the set of morphemes in the language, and C is a parameter that controls the overall strength of the prior. Our goal was to determine whether learning of phonological generalizations could occur without formally distinguishing between weights and scales. Accordingly, this prior penalizes both weights and scales with a single strength parameter C. In simulations reported here, both weights and scales are restricted to nonnegative values, but this is not a inherent restriction of the model. For optimization, we used a form of gradient descent adapted for L1 priors -the \"L1 (Clipping)\" method described by Tsuruoka et al. (2009) .", |
| "cite_spans": [ |
| { |
| "start": 621, |
| "end": 643, |
| "text": "Tsuruoka et al. (2009)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To explore the capacity of this model to learn a range of variable and exceptional patterns, it was trained on four toy languages based on the Russian vowel alternation described by Linzen et al. (2013) .", |
| "cite_spans": [ |
| { |
| "start": 182, |
| "end": 202, |
| "text": "Linzen et al. (2013)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning variation and exceptionality", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In Russian, underlyingly CV prepositions surface as C before words beginning with vowels or single consonants; we follow Linzen et al. (2013, \u00a75.1) in treating this alternation as deletion. Before words beginning with consonant clusters, vowel deletion is variable and lexically conditioned. For example, the vowel in /sa/ \"from, with\" variably surfaces with certain cluster-initial words (4a), categorically deletes with certain others (4b), and categorically surfaces with others (4c) (Linzen et al., 2013, 455) . 4a.", |
| "cite_spans": [ |
| { |
| "start": 487, |
| "end": 513, |
| "text": "(Linzen et al., 2013, 455)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning variation and exceptionality", |
| "sec_num": "3" |
| }, |
| { |
| "text": "[s \u21e0 sa] mn\u00f3\u00fc@stv@m \"with a large amount, (mathematical) set\" b.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning variation and exceptionality", |
| "sec_num": "3" |
| }, |
| { |
| "text": "[s \u21e0 *s@] prik\u00e1z@m \"with the order\" c. [*s \u21e0 s@] st@rik\u00f3m \"with the old man\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning variation and exceptionality", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The factors influencing vowel deletion in Russian span multiple phonological dimensions such as stress and sonority profile. For the purposes of testing our learning model, we focused on whether words began with one or two consonants. The four toy languages consisted of 3 prefixes /ape-/, /ate-/, and /ake-/ concatenated with 420 stems, giving 1260 forms in total. Stems were all consonant-initial, beginning either with a single consonant (\"C-stems\"), or a biconsonantal cluster (\"CC-stems\"). Six consonants were used {v, r, l, n, s, t}, giving 6 unique Cstem types, and 36 CC-stem types. Each stem type was replicated 10 times, yielding 420 stems in total.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning variation and exceptionality", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In all four languages, prefix vowels categorically deleted with C-stems, e.g., /ape-naba/ ! [apnaba]. Vowel deletion was conditioned with CC-stems, either categorically failing to apply ( \u00a73.1) or with its application subject to free variation ( \u00a73.2), lexical specification ( \u00a73.3), or both ( \u00a73.4).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning variation and exceptionality", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We used three categorically evaluated constraints (see Linzen et al. (2013, 489-490) ): ALIGN, MAX, and *CCC. ALIGN prefers vowel deletion, and is violated by candidates containing the final prefix vowel. MAX disprefers vowel deletion, and is violated by candidates lacking the final prefix vowel. *CCC is violated by candidates with triconsonantal clusters, and so disprefers deletion with CC-stems.", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 84, |
| "text": "Linzen et al. (2013, 489-490)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning variation and exceptionality", |
| "sec_num": "3" |
| }, |
| { |
| "text": "After training, the model was tested by evaluating its performance on the learning data and its predictions on a set of nonce forms comprising 3 novel prefixes concatenated with 42 novel stems. Following previous work, we assume that predictions for novel forms are generated using only the general weights.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning variation and exceptionality", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Learning was evaluated according to quantitative and qualitative criteria. Quantitatively, learning was considered successful if the KL-Divergence (Kullback and Leibler, 1951) between the likelihood assigned by the model and the observed probability in the training data was close to zero, indicating that the model succeeded in accounting for the learning data. 2 Qualitatively, learning was considered successful only if the model appropriately divided weight between the general constraints and *CCC MAX ALIGN /ape-taba/ 11.5 0.0 4.5 O E a. apetaba 0 0 -1 0.00 0.00 b. aptaba 0 -1 0 1.00 1.00 /ape-tnaba/ 11.5 0.0 4.5 O E a. apetnaba 0 0 -1 1.00 1.00 b. aptnaba -1 -1 0 0.00 0.00", |
| "cite_spans": [ |
| { |
| "start": 147, |
| "end": 175, |
| "text": "(Kullback and Leibler, 1951)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning variation and exceptionality", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Tableau 1: Categorical language the scales, only using scales when presented with lexically conditioned data. Finally, we required that the model generalizes the observed pattern to nonce forms, deleting (nearly) categorically for novel Cstems while predicting variation for CC-stems in languages with variation and/or lexical conditioning. For all experiments in this section, the model was run with weights and scales initialized at 0.0, for 20,000 epochs, with a learning rate of .001, and prior term C set to 1.0, unless otherwise noted. Overall, the model performed well, successfully learning the four toy languages and using the scales appropriately. In all runs, ALIGN received non-zero weight, reflecting (near) categorical vowel deletion with C-stems. In languages with variable or exceptional deletion, *CCC was weighted closer to ALIGN, predicting variation in nonce forms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning variation and exceptionality", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In the Categorical language, prefix vowels always delete with C-stems and never with CC-stems. The solution learned by the model captures this pattern using the general weights only, putting no weight on the scales, as summarized in Table ( 1). The weight on *CCC is much higher than the weight on ALIGN so that tri-consonantal clusters block vowel deletion, and the weight of ALIGN is above that of MAX, so that prefix vowels always delete with C-stems. *CCC MAX ALIGN General Weights 11.5 0.0 4.5 Morpheme Scales 0.0 0.0 0.0 The model's performance on forms in the training data is illustrated in Tableau (1) with a C-stem, /taba/, and a CC-stem, /tnaba/. Candidate probabil-ities observed in the training data are given in column O. Column E gives the expected candidate probabilities generated by the model, rounded to two decimal places. The model fits the training data extremely well (KL divergence \u21e1 0.002) and, because only the general weights are used, the model predicts that the trained pattern should generalize, yielding the same predicted probabilities for nonce forms.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 233, |
| "end": 240, |
| "text": "Table (", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Categorical language", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In the Variable language, prefix vowels always delete with C-stems and variably delete 33% of the time with CC-stems. As desired, the model captures this pattern using the general weights only, putting no weight on the scales, as shown in Table ( The model's performance on trained forms is illustrated in Tableau (2) below with a C-stem and a CC-stem. The weight of *CCC is above that of ALIGN, but by a smaller margin than in the Categorical language, yielding variable rather than categorical deletion with CC-stems. MAX is weighted below ALIGN, so that deletion occurs (nearly) categorically for C-stems. The probabilities generated by the model (E) fit the training data (O) extremely well (KL divergence \u21e1 0.002) and, because only the general constraints are used, the model predicts that the trained pattern should generalize, yielding the same predicted probabilities for nonce forms. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 239, |
| "end": 246, |
| "text": "Table (", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Variable language", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The Lexical language is identical to the Categorical language, except that one prefix, /ape-/, is exceptional: its vowel always deletes with CC-stems. Averaged across the lexicon, the rate of deletion with CC-stems is therefore 33%, but this pattern cannot be captured using the general weights alone. The scales must be used to distinguish the behavior of the exceptionally deleting prefix from the other two prefixes. As Table ( 3) shows, the model's solution weights the general constraints in the same order as in the Categorical language: *CCC > ALIGN > MAX. The model additionally scales up the weight of ALIGN for the deleting prefix /ape-/, yielding (near) categorical deletion for it, and scales up the weight of *CCC for each of the non-deleting prefixes, preventing deletion for those prefixes. The model's performance on forms in the training data is illustrated in Tableau (3) with a CCstem /tnaba/ paired with a non-deleting prefix /ake-/, and the deleting prefix /ape-/. The weights shown for each input are the sums of the general weights and scales associated with the input morphemes for each constraint. The model's solution fits the training data well (KL divergence \u21e1 0.004), predicting deletion for the deleting prefix and no deletion for each non-deleting prefix. These weights additionally yield deletion of all prefix vowels before Cstems (not shown) in the learning data, as expected. Since this is captured by general constraint weights, the same prediction is made for novel C-stems.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 423, |
| "end": 430, |
| "text": "Table (", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Lexical language", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "*CCC MAX ALIGN General Weights 4.6 0.0 4.1 Deleting Prefix 0.0 0.0 6.4 Non-Deleting Prefixes 5.3 0.0 0.0 Stems 0.0 0.0 0.0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical language", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Because the general weight of ALIGN is somewhat lower but still close to the general weight of *CCC, variable deletion (37%) is predicted for novel prefixes attached to novel CC-stems. Tableau (4) illustrates with the nonce prefix /aPe-/ and the nonce stem /pmaba/. Because this form was not present in the training data, only the expected probabilities are reported. As discussed above, predicting variable deletion is desirable given experimental find-*CCC MAX ALIGN /ake-tnaba/ 10.0 0.0 4.1 O E a. aketnaba 0 0 -1 1.00 1.00 b. aktnaba -1 -1 0 0.00 0.00 /ape-tnaba/ 4.6 0.0 10.5 O E a. apetnaba 0 0 -1 0.00 0.00 b. aptnaba -1 -1 0 1.00 1.00", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical language", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Tableau 3: Lexical language -known prefixes and stems; /ape-/ exceptionally undergoes vowel deletion with CC-stems ings that speakers extend lexical trends to nonce forms (Hayes et al., 2009) . Deletion is the dispreferred outcome in both the training data and the predictions for novel forms, but the predicted rate of deletion for novel forms (37%) is a little higher than that observed in the training data (33%). Interestingly, by using scales for each prefix, the model did not single out any prefix as qualitatively exceptional, despite the fact that such a solution is available. Removing the weight from the scales of the non-deleting prefixes and dividing it between the weight of general *CCC and the deleting prefix's scale of ALIGN produces a solution that is identical in terms of fit to the training data and the total sum of weights across all constraints and scales. That solution identifies only the deleting prefix as exceptional, and produces different predictions for nonce forms. The proposed objective function does not always differentiate among distinct ways of encoding exceptionality. The solution selected by the model in this experiment is arbitrarily influenced by starting the weights at zero. In experiments with weights initialized to random values between 0 and 10, the solutions selected by the model all have equivalent fit to the training data and total weight but vary somewhat in terms of how exceptionality is encoded and the deletion rate predicted for nonce forms. The availability of such varied solutions depends on the rate *CCC MAX ALIGN /aPe-pmaba/ 4.6 0.0 4.1 E a. aPepmaba 0 0 -1 0.63 b. aPpmaba -1 -1 0 0.37", |
| "cite_spans": [ |
| { |
| "start": 171, |
| "end": 191, |
| "text": "(Hayes et al., 2009)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical language", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Tableau 4: Lexical language -nonce prefix and stem of exceptionality in the training data and the prior. These factors are further explored in \u00a74.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical language", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The Fit with the training data is not as close as with the other languages due to pervasive exceptionality: 20% of the stems (84 morphemes) must utilize scales to capture their behavior, which conflicts with the prior's pressure to keep the total weights and scales low. The effect of the prior is explored systematically in the next section, but it is worth noting here that a closer fit with the training data for this language is straightforwardly achieved with a weaker prior; for example, setting C = 0.1 yields a deletion rate of 97% for /ape-vraba/.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Variable-Lexical language", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "This section examines the model's predictions for nonce data, focusing on how the choice of the prior and the rate of exceptionality in the training data affect generalization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generalizing from exceptional data", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Recall that the previous section reported on experiments with the prior term C set to 1.0. Here, we vary C and examine its effects on the model's predictions, using the Lexical language as a test case. Unsurprisingly, the model's fit to the training data decreases as the strength of the prior increases, as there is more pressure to keep all weights and scales close to zero. The model's predictions for novel forms also vary with C, as shown in Figure (1) , which plots the probability of deletion for CC-stems by values of C. As C increases, there is a poorer fit to the training data: known forms which should undergo vowel deletion are slightly less likely to, and known forms which should not undergo vowel deletion are slightly more likely to.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 447, |
| "end": 457, |
| "text": "Figure (1)", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of the prior", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The most striking trend is the convergence of nonce form behavior with the behavior of nondeleting forms. In the Lexical language, two prefixes, and thus two-thirds of the data, categorically do not undergo deletion with CC-stems. As discussed in \u00a73.3, these proportions make multiple ways of encoding exceptionality available to the model. When the prior is weak, the model encodes exceptionality in a distributed way, and its predicted deletion rate for novel forms is intermediate between the deleting and non-deleting forms in the training data. When the prior is strong, however, the learner is forced to set more weights to zero, and the nondeleting forms in the learning data are more easily accommodated by the general constraint weights. This leads the learner to designate one of the prefixes as exceptional and to generalize to novel forms on the basis of the non-deleting prefixes. Thus, with a stronger prior, there is more pressure on the learner to over-extend the more general pattern in the data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of the prior", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "This pattern is clear when we examine the learned weights. Table (5) reports the weights learned with C set to 0.5 and 30. With C set low, the learner assigns weight to the exceptionally deleting prefix as well as the non-deleting prefixes. With C set high, the learner only assigns weight to the exceptional prefix, picking it out as exceptional. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of the prior", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Following Moore-Cantwell and Pater (2016) , this section reports the effect of varying the proportion of exceptional forms in the training data on nonce form predictions. To test this, we started with the Categorical language, in which prefix vowels always delete with C-stems but never delete with CC-stems, and then created data sets which increased the per-centage of CC-stems that trigger deletion of the prefix vowel by 10% increments, forming a total of 11 data sets (with deletion rates of 0%, 10%, . . . , 90%, 100%). In these simulations, epochs were increased up to 80000 (we found this to be necessary to guarantee convergence for languages with pervasive exceptionality and weaker priors). Figure ( 2) plots the probability of deletion as a function of the percentage of triggering CC-stems in the training data. To show how the strength of the prior interacts with the rate of exceptionality in the data, we show curves for three settings of the prior parameter (C = 0.1, 0.5, 1.0). The patterns are qualitatively similar for all three settings, with closer fit to the data for weaker priors. As the percentage of triggering CC-stems in the training data increases, the probability of deleting the prefix vowel before any CC-stem increases. For trained stems, the probability of deleting with a non-triggering stem is always much lower than the probability of deleting with a triggering stem, with rates closer to categorical for lower C values. The rate of exceptionality affects learning of both the majority and minority patterns: the more extreme the imbalance, the more poorly the minority pattern is learned and the more categorically the majority pattern is learned.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 41, |
| "text": "Moore-Cantwell and Pater (2016)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 702, |
| "end": 710, |
| "text": "Figure (", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of exceptionality", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The behavior of nonce forms mirrors the behavior of non-triggering stems when they form a ma-jority of the training data (0%-30%), and mirros the behavior of triggering stems when they form a majority in the training data (60%-100%), with the probability of deleting in nonce forms rising sharply across those data sets where there is not a clear majority (40%-50%). This indicates that, when there is a clear majority pattern, the model more strongly trends towards using the general weights to capture the majority pattern and the scales to capture the behavior of exceptional forms. When there is no clear majority pattern, the model will trend towards learning general weights which more closely reflect the lexical statistics in the training data, using scales to account for the idiosyncratic behavior of each stem.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of exceptionality", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Also noteworthy are the non-linear shape and the displacement of the nonce form curves. As Moore-Cantwell and Pater (2016) found for indexed constraints, we show here that the lexically scaled model also predicts nonce form rates that exaggerate the proportions in the training data. For example, when 70% of the stems trigger deletion in the training data, the model with C = 1.0 exaggerates this to over 85% in novel forms, and when 30% of stems trigger deletion in training, the model predicts fewer than 15% deletion in novel forms. Our results demonstrate two further influences. First, the exaggeration effect is greater for weaker priors since the curves are overall steeper. Second, the curve is shifted leftward: when 50% of the CCstems are triggers, the predicted rate of deletion for nonce forms is above 50%, regardless of C. The predicted deletion rate is generally higher than might be expected on the basis of the trained deletion rate in CC-stems alone. The presence of categorically deleting C-stems in the data exerts an independent pressure to weight ALIGN more heavily than MAX, favoring deletion overall. When the C = 1.0 model is trained without C-stems (not shown), no skew is predicted: 50% deletion is predicted for nonce forms in the 50% training condition. These results indicate that predictions for nonce forms in one context can be influenced by other processes in the language.", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 122, |
| "text": "Moore-Cantwell and Pater (2016)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of exceptionality", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Existing MaxEnt models of phonology overwhelmingly utilize L2 priors rather than L1 priors (Goldwater and Johnson, 2003; Wilson, 2006; Pater et al., 2012) . For our purposes, however, we found that the choice of an L1 prior was crucial. While both L1 and L2 priors penalize higher weights, L1 priors are more effective for learning sparse vectors of weights, with as many zeroes as possible (Yan, 2016) . The primary challenge for learning in the lexically scaled MaxEnt framework is to use scales sparingly. This requires a strong pressure to set weights exactly to zero, which an L1 prior provides. L2 priors favor solutions with small weights distributed across many parameters; setting weights to zero is not generally the optimal solution.", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 120, |
| "text": "(Goldwater and Johnson, 2003;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 121, |
| "end": 134, |
| "text": "Wilson, 2006;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 135, |
| "end": 154, |
| "text": "Pater et al., 2012)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 391, |
| "end": 402, |
| "text": "(Yan, 2016)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why not L2 regularization?", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "X i log p(i) + 1 2 2 ( X 2 w 2 + X 2 X m2M s 2 m ) (5)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why not L2 regularization?", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In experiments with an L2 prior, we found there was no weighting of the prior that simultaneously eliminated weights from the scales in languages without exceptionality while satisfactorily accounting for the training data. An example of the weights learned for the Variable language with a weak L2 prior is shown in Table (6). These weights were learned using the standard L-BFGS-B optimizer (Byrd et al., 1995) and the objective function in (5).", |
| "cite_spans": [ |
| { |
| "start": 393, |
| "end": 412, |
| "text": "(Byrd et al., 1995)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why not L2 regularization?", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "3.00 0.00 2.50 Prefix Scales 1.00 0.00 0.80 C-Stem Scales 0.00 0.00 0.08 CC-Stem Scales 0.01 0.01 0.00 While predicted probabilities for trained C-stems and CC-stems fit the training data well (Tableau 6), the model makes wide-spread use of scales: all morphemes use scales to some degree even though the Variable language does not require them. Consequently, the weight of ALIGN is not high enough to predict (near) categorical deletion for C-stems. The wide-spread use of scales also prevents the model from generalizing the rate of deletion to novel forms: deletion is predicted to apply more frequently to novel forms. Using a stronger prior (lowering ), all weights and scales decrease, but weight remains on the scales and the fit with the training data deteriorates. Thus, the L2 prior fails to predict frequency-matching behavior for free variation, predicting skews not only for lexically-conditioned variation (as predicted by the L1 prior) but also for patterns without lexical conditioning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "*CCC MAX ALIGN General Weights", |
| "sec_num": null |
| }, |
| { |
| "text": "As discussed earlier, we characterized successful learning in terms of feature selection, using scales only when needed to capture lexical conditioning, and we have shown that the L2 prior does not succeed on this criterion, affecting generalization of categorical, lexicalized, and freely variable processes. However, the extent to which language users encode predictable properties of lexical items is not known, and further behavioral research is needed to understand whether and how generalization of variable and exceptional processes is skewed. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "*CCC MAX ALIGN General Weights", |
| "sec_num": null |
| }, |
| { |
| "text": "We found that a general L1 prior was sufficient for keeping the model from overusing scales and generalizing beyond the data. The objective function penalizes weights and scales equally, but scales are more costly when many morphemes require the same scaling. Thus, the pressure against scales follows automatically from their limited utility in the grammar. This contrasts with other approaches to MaxEnt learning of exceptionality, which require either additional priors on some constraints to ensure that the model generalizes (Pater et al., 2012) or distinct phases of learning for general and lexical generalizations (Nazarov, 2018; Shih, 2018) .", |
| "cite_spans": [ |
| { |
| "start": 530, |
| "end": 550, |
| "text": "(Pater et al., 2012)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 622, |
| "end": 637, |
| "text": "(Nazarov, 2018;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 638, |
| "end": 649, |
| "text": "Shih, 2018)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "No extra penalty for scales", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We also found that, when lexical exceptionality is present, there are multiple correct solutions for a given problem with different predictions about generalization. Any model that is tasked with learning both general and exceptional patterns must decide which items in the lexicon are the exceptions and which represent the generalizable pattern. We found that the objective function for our model favors overextending clear majority patterns, but is more ambivalent about what to treat as exceptional given balanced data ( \u00a73.3). This ambivalence was modulated by the strength of the prior ( \u00a74.1) and the presence of related processes in the data ( \u00a74.2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiple correct solutions", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "A number of avenues for future work remain. As mentioned in \u00a72, the differences between this approach and lexically indexed constraints (Pater, 2010) remain to be explored, as do differences from alternative models for learning variability and exceptionality (Nazarov, 2018; Shih, 2018) . Another natural continuation of this research is to apply the learning paradigm described here to more realistic datasets. Following Pater (2007) , Linzen et al. (2013, 489) limit scales to only penalizing exponents of the morphemes they are associated with. This locality condition will be important to incorporate before exploring more complex datasets. Further investigations of the effect of the prior on generalization are needed. While we investigated the consequences of varying C, our focus was limited to the Lexical language. We found that the data distribution, the strength of the prior, and the existence of related processes in the language already introduce strong pressures on the learner's encoding of exceptionality. In some cases, however, we found the proposed prior did not uniquely favor a single solution. Ultimately, these and other modeling decisions require an understanding of how humans perform under similar learning conditions. Connections with experimental work on how humans generalize variable and exceptional patterns is crucial to defining desirable behavior for any learning model.", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 149, |
| "text": "(Pater, 2010)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 259, |
| "end": 274, |
| "text": "(Nazarov, 2018;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 275, |
| "end": 286, |
| "text": "Shih, 2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 422, |
| "end": 434, |
| "text": "Pater (2007)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 437, |
| "end": 462, |
| "text": "Linzen et al. (2013, 489)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Future work", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "This paper introduces and tests a method for learning lexically scaled MaxEnt grammars. We show that an L1 prior places strong constraints on the encoding of exceptionality and identify a number of factors that affect the model's performance on training data and generalizations to nonce forms, which can be tested against human behavior.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Our code is publicly available at https://github. com/chughto/Lexically-Scaled-MaxEnt", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Proceedings of the Society for Computation in Linguistics (SCiL) 2019, pages 91-101. New York City, New York, January 3-6, 2019", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "MaxEnt grammars cannot exactly represent categorical behavior, but probabilities can get arbitrarily close to 0 or 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Rules vs. analogy in English past tenses: A computational/experimental study", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Albright", |
| "suffix": "" |
| }, |
| { |
| "first": "Bruce", |
| "middle": [], |
| "last": "Hayes", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Cognition", |
| "volume": "90", |
| "issue": "2", |
| "pages": "119--161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Albright and Bruce Hayes. 2003. Rules vs. analogy in English past tenses: A computa- tional/experimental study. Cognition, 90(2):119-161.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Learning alternations from surface forms with sublexical phonology. Unpublished manuscript", |
| "authors": [ |
| { |
| "first": "Blake", |
| "middle": [], |
| "last": "Allen", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Becker", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Blake Allen and Michael Becker. 2015. Learning al- ternations from surface forms with sublexical phonol- ogy. Unpublished manuscript, University of British Columbia and Stony Brook University. Available as lingbuzz/002503.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Sourceoriented generalizations as grammar inference in Russian vowel deletion", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Becker", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Gouskova", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Linguistic Inquiry", |
| "volume": "47", |
| "issue": "3", |
| "pages": "391--425", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Becker and Maria Gouskova. 2016. Source- oriented generalizations as grammar inference in Rus- sian vowel deletion. Linguistic Inquiry, 47(3):391- 425.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Empirical tests of the Gradual Learning Algorithm", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Boersma", |
| "suffix": "" |
| }, |
| { |
| "first": "Bruce", |
| "middle": [], |
| "last": "Hayes", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Linguistic Inquiry", |
| "volume": "32", |
| "issue": "1", |
| "pages": "45--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul Boersma and Bruce Hayes. 2001. Empirical tests of the Gradual Learning Algorithm. Linguistic Inquiry, 32(1):45-86.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A limited memory algorithm for bound constrained optimization", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Richard", |
| "suffix": "" |
| }, |
| { |
| "first": "Peihuang", |
| "middle": [], |
| "last": "Byrd", |
| "suffix": "" |
| }, |
| { |
| "first": "Jorge", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ciyou", |
| "middle": [], |
| "last": "Nocedal", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "SIAM Journal on Scientific Computing", |
| "volume": "16", |
| "issue": "5", |
| "pages": "1190--1208", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard H. Byrd, Peihuang Lu, Jorge Nocedal, and Ciyou Zhu. 1995. A limited memory algorithm for bound constrained optimization. SIAM Journal on Scientific Computing, 16(5):1190-1208.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Frequency biases in phonological variation. Natural Language and Linguistic Theory", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Andries", |
| "suffix": "" |
| }, |
| { |
| "first": "Shigeto", |
| "middle": [], |
| "last": "Coetzee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kawahara", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "31", |
| "issue": "", |
| "pages": "47--89", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andries W. Coetzee and Shigeto Kawahara. 2013. Fre- quency biases in phonological variation. Natural Lan- guage and Linguistic Theory, 31(1):47-89.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "The place of variation in phonological theory", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Andries", |
| "suffix": "" |
| }, |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Coetzee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pater", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "The Handbook of Phonological Theory", |
| "volume": "", |
| "issue": "", |
| "pages": "401--434", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andries W. Coetzee and Joe Pater. 2011. The place of variation in phonological theory. In John A. Gold- smith, Jason Riggle, and Alan C. L. Yu, editors, The Handbook of Phonological Theory, pages 401-434. Wiley, 2nd edition.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Predicting the unpredictable: Interpreting neutralized segments in Dutch. Language", |
| "authors": [ |
| { |
| "first": "Mirjam", |
| "middle": [], |
| "last": "Ernestus", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "Harald" |
| ], |
| "last": "Baayen", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "5--38", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mirjam Ernestus and R. Harald Baayen. 2003. Pre- dicting the unpredictable: Interpreting neutralized seg- ments in Dutch. Language, pages 5-38.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Learning OT constraint rankings using a Maximum Entropy model", |
| "authors": [ |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the Stockholm Workshop on Variation within Optimality Theory", |
| "volume": "", |
| "issue": "", |
| "pages": "111--120", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sharon Goldwater and Mark Johnson. 2003. Learn- ing OT constraint rankings using a Maximum Entropy model. In Proceedings of the Stockholm Workshop on Variation within Optimality Theory, pages 111-120.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Stochastic phonological knowledge: The case of Hungarian vowel harmony", |
| "authors": [ |
| { |
| "first": "Bruce", |
| "middle": [], |
| "last": "Hayes", |
| "suffix": "" |
| }, |
| { |
| "first": "Zsuzsa", |
| "middle": [], |
| "last": "Czir\u00e1ky Londe", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Phonology", |
| "volume": "23", |
| "issue": "1", |
| "pages": "59--104", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bruce Hayes and Zsuzsa Czir\u00e1ky Londe. 2006. Stochas- tic phonological knowledge: The case of Hungarian vowel harmony. Phonology, 23(1):59-104.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Natural and unnatural constraints in Hungarian vowel harmony", |
| "authors": [ |
| { |
| "first": "Bruce", |
| "middle": [], |
| "last": "Hayes", |
| "suffix": "" |
| }, |
| { |
| "first": "P\u00e9ter", |
| "middle": [], |
| "last": "Sipt\u00e1r", |
| "suffix": "" |
| }, |
| { |
| "first": "Kie", |
| "middle": [], |
| "last": "Zuraw", |
| "suffix": "" |
| }, |
| { |
| "first": "Zsuzsa", |
| "middle": [], |
| "last": "Londe", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Language", |
| "volume": "85", |
| "issue": "4", |
| "pages": "822--863", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bruce Hayes, P\u00e9ter Sipt\u00e1r, Kie Zuraw, and Zsuzsa Londe. 2009. Natural and unnatural constraints in Hungarian vowel harmony. Language, 85(4):822-863.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Scalar positional markedness and faithfulness in Harmonic Grammar", |
| "authors": [ |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Hsu", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Jesney", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Annual Meeting of the Chicago Linguistic Society", |
| "volume": "51", |
| "issue": "", |
| "pages": "241--255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brian Hsu and Karen Jesney. 2016. Scalar positional markedness and faithfulness in Harmonic Grammar. In Proceedings of the Annual Meeting of the Chicago Linguistic Society, volume 51, pages 241-255.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Competing Triggers: Transparency and Opacity in Vowel Harmony", |
| "authors": [ |
| { |
| "first": "Wendell", |
| "middle": [], |
| "last": "Kimper", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wendell Kimper. 2011. Competing Triggers: Trans- parency and Opacity in Vowel Harmony. Ph.D. thesis, University of Massachusetts Amherst.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "The phonology of stress in Polish", |
| "authors": [ |
| { |
| "first": "Iwona", |
| "middle": [], |
| "last": "Kraska-Szlenk", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iwona Kraska-Szlenk. 1995. The phonology of stress in Polish. Ph.D. thesis, University of Illinois at Urbana- Champaign.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "On information and sufficiency", |
| "authors": [ |
| { |
| "first": "Solomon", |
| "middle": [], |
| "last": "Kullback", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [ |
| "A" |
| ], |
| "last": "Leibler", |
| "suffix": "" |
| } |
| ], |
| "year": 1951, |
| "venue": "The Annals of Mathematical Statistics", |
| "volume": "22", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Solomon Kullback and Richard A. Leibler. 1951. On information and sufficiency. The Annals of Mathemat- ical Statistics, 22(1):79-86, 03.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Lexical and phonological variation in Russian prepositions", |
| "authors": [ |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sofya", |
| "middle": [], |
| "last": "Kasyanenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Gouskova", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Phonology", |
| "volume": "30", |
| "issue": "3", |
| "pages": "453--515", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tal Linzen, Sofya Kasyanenko, and Maria Gouskova. 2013. Lexical and phonological variation in Russian prepositions. Phonology, 30(3):453-515.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Gradient exceptionality in Maximum Entropy grammar with lexically specific constraints", |
| "authors": [ |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Moore", |
| "suffix": "" |
| }, |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Cantwell", |
| "suffix": "" |
| }, |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Pater", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Catalan Journal of Linguistics", |
| "volume": "15", |
| "issue": "", |
| "pages": "53--66", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Claire Moore-Cantwell and Joe Pater. 2016. Gradi- ent exceptionality in Maximum Entropy grammar with lexically specific constraints. Catalan Journal of Lin- guistics, 15:53-66.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Learning within-and betweenword variation in probabilistic OT grammars", |
| "authors": [ |
| { |
| "first": "Aleksei", |
| "middle": [], |
| "last": "Nazarov", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Supplemental Proceedings of the 2017 Annual Meeting on Phonology", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aleksei Nazarov. 2018. Learning within-and between- word variation in probabilistic OT grammars. In Gillian Gallagher, Maria Gouskova, and Sora Yin, ed- itors, Supplemental Proceedings of the 2017 Annual Meeting on Phonology, Washington, DC. Linguistic Society of America.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Learning probabilities over underlying representations", |
| "authors": [ |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Pater", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Jesney", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Staubs", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Twelfth Meeting of the Special Interest Group on Computational Morphology and Phonology", |
| "volume": "", |
| "issue": "", |
| "pages": "62--71", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joe Pater, Karen Jesney, Robert Staubs, and Brian Smith. 2012. Learning probabilities over underlying repre- sentations. In Proceedings of the Twelfth Meeting of the Special Interest Group on Computational Mor- phology and Phonology, pages 62-71. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Consequences of Constraint Ranking", |
| "authors": [ |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Pater", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joe Pater. 1996. Consequences of Constraint Ranking. Ph.D. thesis, McGill University.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "The locus of exceptionality: Morphemespecific phonology as constraint indexation", |
| "authors": [ |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Pater", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "University of Massachusetts Occasional Papers in Linguistics 32: Papers in Optimality Theory III", |
| "volume": "", |
| "issue": "", |
| "pages": "259--296", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joe Pater. 2007. The locus of exceptionality: Morpheme- specific phonology as constraint indexation. In Leah Bateman, Michael O'Keefe, Ehren Reilly, and Adam Werle, editors, University of Massachusetts Occa- sional Papers in Linguistics 32: Papers in Optimality Theory III, pages 259-296. GLSA, Amherst, MA.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Morpheme-specific phonology: Constraint indexation and inconsistency resolution", |
| "authors": [ |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Pater", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Phonological argumentation: Essays on evidence and motivation", |
| "volume": "", |
| "issue": "", |
| "pages": "123--154", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joe Pater. 2010. Morpheme-specific phonology: Con- straint indexation and inconsistency resolution. In Steve Parker, editor, Phonological argumentation: Es- says on evidence and motivation, pages 123-154. Equinox, London.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Learning lexical classes from variable phonology", |
| "authors": [ |
| { |
| "first": "Stephanie", |
| "middle": [ |
| "S" |
| ], |
| "last": "Shih", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Selected Papers from Asian Junior Linguists Conference 2", |
| "volume": "", |
| "issue": "", |
| "pages": "1--15", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephanie S. Shih. 2018. Learning lexical classes from variable phonology. In Yuki Seo and Haruya Ogawa, editors, Selected Papers from Asian Junior Linguists Conference 2, pages 1-15. ICUWPL.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Stochastic gradient descent training for L1-regularized log-linear models with cumulative penalty", |
| "authors": [ |
| { |
| "first": "Yoshimasa", |
| "middle": [], |
| "last": "Tsuruoka", |
| "suffix": "" |
| }, |
| { |
| "first": "Sophia", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ananiadou", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "1", |
| "issue": "", |
| "pages": "477--485", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshimasa Tsuruoka, Jun'ichi Tsujii, and Sophia Ana- niadou. 2009. Stochastic gradient descent training for L1-regularized log-linear models with cumulative penalty. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th Interna- tional Joint Conference on Natural Language Process- ing of the AFNLP: Volume 1, pages 477-485. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Learning phonology with substantive bias: An experimental and computational study of velar palatalization", |
| "authors": [ |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Cognitive Science", |
| "volume": "30", |
| "issue": "", |
| "pages": "945--982", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Colin Wilson. 2006. Learning phonology with substan- tive bias: An experimental and computational study of velar palatalization. Cognitive Science, 30:945-982.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "L1 norm regularization and sparsity explained for dummies", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Shi Yan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shi Yan. 2016. L1 norm regularization and sparsity explained for dummies. https://medium.com/ mlreview/l1-norm-regularization- and-sparsity-explained-for-dummies- 5b0e4be3938a.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Patterned Exceptions in Phonology", |
| "authors": [ |
| { |
| "first": "Kie", |
| "middle": [], |
| "last": "Zuraw", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kie Zuraw. 2000. Patterned Exceptions in Phonology. Ph.D. thesis, University of California, Los Angeles.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "A model of lexical variation and the grammar with application to Tagalog nasal substitution", |
| "authors": [ |
| { |
| "first": "Kie", |
| "middle": [], |
| "last": "Zuraw", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Natural Language & Linguistic Theory", |
| "volume": "28", |
| "issue": "", |
| "pages": "417--472", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kie Zuraw. 2010. A model of lexical variation and the grammar with application to Tagalog nasal sub- stitution. Natural Language & Linguistic Theory, 28(2):417-472.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Lexical propensities in phonology: corpus and experimental evidence, grammar, and learning", |
| "authors": [ |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Zymet", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jesse Zymet. 2018. Lexical propensities in phonol- ogy: corpus and experimental evidence, grammar, and learning. Ph.D. thesis, University of California, Los Angeles.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "num": null, |
| "uris": null, |
| "text": "Probability of deletion with CC-stems by C values", |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "num": null, |
| "uris": null, |
| "text": "Lexical weights and mean scales, C = 0.5 and 30", |
| "type_str": "figure" |
| }, |
| "FIGREF3": { |
| "num": null, |
| "uris": null, |
| "text": "Probability of deletion with CC-stems by percentage of deletion-triggering CC-stems in the training data", |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "text": "Categorical weights and mean scales", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null, |
| "num": null |
| }, |
| "TABREF1": { |
| "text": "Variable weights and mean scales", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null, |
| "num": null |
| }, |
| "TABREF3": { |
| "text": "Lexical weights and mean scales", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null, |
| "num": null |
| }, |
| "TABREF5": { |
| "text": "Variable-Lexical weights and mean scales", |
| "type_str": "table", |
| "content": "<table><tr><td/><td>*CCC</td><td>MAX</td><td>ALIGN</td><td/></tr><tr><td colspan=\"4\">/ape-vraba/ 4.8 0.0 5.5</td><td>O</td><td>E</td></tr><tr><td>a. apevraba</td><td>0</td><td>0</td><td>-1</td><td colspan=\"2\">0.00 0.33</td></tr><tr><td>b. apvraba</td><td>-1</td><td>-1</td><td>0</td><td colspan=\"2\">1.00 0.67</td></tr><tr><td colspan=\"4\">/ape-tnaba/ 4.8 0.0 4.5</td><td>O</td><td>E</td></tr><tr><td>a. apetnaba</td><td>0</td><td>0</td><td>-1</td><td colspan=\"2\">0.67 0.58</td></tr><tr><td>b. aptnaba</td><td>-1</td><td>-1</td><td>0</td><td colspan=\"2\">0.33 0.42</td></tr><tr><td colspan=\"6\">Tableau 5: Variable-Lexical language -known prefixes and</td></tr><tr><td colspan=\"6\">stems; /vraba/ exceptionally triggers prefix vowel deletion</td></tr></table>", |
| "html": null, |
| "num": null |
| }, |
| "TABREF6": { |
| "text": "Variable weights and mean scales, L2 prior, 2 = 1", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null, |
| "num": null |
| } |
| } |
| } |
| } |