| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:29:03.668399Z" |
| }, |
| "title": "Acquiring language from speech by learning to remember and predict", |
| "authors": [ |
| { |
| "first": "Cory", |
| "middle": [], |
| "last": "Shain", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ohio State University", |
| "location": {} |
| }, |
| "email": "shain.3@osu.edu" |
| }, |
| { |
| "first": "Micha", |
| "middle": [], |
| "last": "Elsner", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ohio State University", |
| "location": {} |
| }, |
| "email": "elsner.14@osu.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Classical accounts of child language learning invoke memory limits as a pressure to discover sparse, language-like representations of speech, while more recent proposals stress the importance of prediction for language learning. In this study, we propose a broadcoverage unsupervised neural network model to test memory and prediction as sources of signal by which children might acquire language directly from the perceptual stream. Our model embodies several likely properties of real-time human cognition: it is strictly incremental, it encodes speech into hierarchically organized labeled segments, it allows interactive top-down and bottom-up information flow, it attempts to model its own sequence of latent representations, and its objective function only recruits local signals that are plausibly supported by human working memory capacity. We show that much phonemic structure is learnable from unlabeled speech on the basis of these local signals. We further show that remembering the past and predicting the future both contribute to the linguistic content of acquired representations, and that these contributions are at least partially complementary.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Classical accounts of child language learning invoke memory limits as a pressure to discover sparse, language-like representations of speech, while more recent proposals stress the importance of prediction for language learning. In this study, we propose a broadcoverage unsupervised neural network model to test memory and prediction as sources of signal by which children might acquire language directly from the perceptual stream. Our model embodies several likely properties of real-time human cognition: it is strictly incremental, it encodes speech into hierarchically organized labeled segments, it allows interactive top-down and bottom-up information flow, it attempts to model its own sequence of latent representations, and its objective function only recruits local signals that are plausibly supported by human working memory capacity. We show that much phonemic structure is learnable from unlabeled speech on the basis of these local signals. We further show that remembering the past and predicting the future both contribute to the linguistic content of acquired representations, and that these contributions are at least partially complementary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "How children acquire language from the environment is one of the fundamental mysteries of cognitive science. Much theoretical, experimental, and computational research into this question has focused on acquiring abstractions over lowerorder symbols, such acquiring morphemes from phoneme sequences or syntactic structures from word sequences (Chomsky, 1965; Gold, 1967; Elman, 1991; Saffran et al., 1996; Albright, 2002; Klein and Manning, 2004; Goldwater et al., 2009; Christodoulopoulos et al., 2012, inter alia) . Children, however, do not get symbolic input; symbolic representations at any level of granularity constitute abstractions inferred from highly variable, noisy, and information-rich perceptual signals like audition and vision. This work joins a growing computational literature exploring the kinds of architectures and learning objectives that best support acquisition of linguistic representations directly from the speech signal without supervision (Versteegh et al., 2015; Dunbar et al., 2017) . Such models can be used to test questions about language acquisition under more realistic assumptions about the input signal, especially to the extent that they reflect known constraints on human cognition (Shain and Elsner, 2019; Begu\u0161, 2020) .", |
| "cite_spans": [ |
| { |
| "start": 342, |
| "end": 357, |
| "text": "(Chomsky, 1965;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 358, |
| "end": 369, |
| "text": "Gold, 1967;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 370, |
| "end": 382, |
| "text": "Elman, 1991;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 383, |
| "end": 404, |
| "text": "Saffran et al., 1996;", |
| "ref_id": "BIBREF109" |
| }, |
| { |
| "start": 405, |
| "end": 420, |
| "text": "Albright, 2002;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 421, |
| "end": 445, |
| "text": "Klein and Manning, 2004;", |
| "ref_id": null |
| }, |
| { |
| "start": 446, |
| "end": 469, |
| "text": "Goldwater et al., 2009;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 470, |
| "end": 514, |
| "text": "Christodoulopoulos et al., 2012, inter alia)", |
| "ref_id": null |
| }, |
| { |
| "start": 968, |
| "end": 992, |
| "text": "(Versteegh et al., 2015;", |
| "ref_id": "BIBREF121" |
| }, |
| { |
| "start": 993, |
| "end": 1013, |
| "text": "Dunbar et al., 2017)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 1222, |
| "end": 1246, |
| "text": "(Shain and Elsner, 2019;", |
| "ref_id": "BIBREF114" |
| }, |
| { |
| "start": 1247, |
| "end": 1259, |
| "text": "Begu\u0161, 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This study uses computational modeling to examine two influential and possibly complementary ideas about how people learn abstract representations, including language, from data: learning to remember, and learning to predict. Both hypotheses have been advocated by prior work in language acquisition, cognitive neuroscience, and computational modeling, yet their relative contributions to language learning are not yet clear. Our model permits precise manipulation of memory and prediction pressures during acquisition, allowing direct comparison of these hypotheses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In so doing, we implement several constraints on real-time language processing that have not been simultaneously present in prior modeling of this domain: (1) we jointly segment and label the speech signal without supervision; (2) the learning objective is applied incrementally during real-time processing using only locally available feedback;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "(3) the encoded signal is segmental, sparse, and hierarchically organized; (4) segments are represented featurally as patterns of activation, rather than discrete category symbols; and (5) the system is optimized by modeling its own state at multiple timescales, rather than by modeling the data alone.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Results show a systematic improvement along multiple measures of phoneme induction quality from both learning to remember and learning to predict, suggesting that these two kinds of signals may play complementary roles during child language acquisition. The contributions of this work are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We propose a novel deep neural encoderdecoder for unsupervised speech processing that is incremental, segmental, and useful for testing hypothesized cognitive constraints.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We show empirically that memory-based and prediction-based signals contribute separately to the acquisition of linguistic regularities, simultaneously supporting two existing classes of theories about the learning pressures that underlie human language acquisition.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Many proposals from the language acquisition literature appeal to memory pressures as a learning signal (Newport, 1990; Pinker, 1991; Carstairs-McCarthy, 1994; Rissanen and Ristad, 1994; Baddeley et al., 1998; Goldsmith, 2003; Yang, 2005, inter alia) . For example, Baddeley et al. (1998) invoke constraints on working memory, arguing that because the speech signal is too rich to support full retention during real-time language processing (Baddeley and Hitch, 1974) , infants are guided toward phonemic representations, which constitute an efficient encoding of that signal. Meanwhile, classical theories of language acquisition such as Newport (1990) and Pinker (1991) invoke constraints on long-term memory, arguing that linguistic regularities constitute compressed descriptions of the learner's input and that their discovery reduces the amount of information that must by idiosyncratically stored. Artificial language learning patterns in humans (Kersten and Earles, 2001) and recent computational modeling of the speech domain (e.g. Lee and Glass, 2012; Lee et al., 2015; Elsner and Shain, 2017; Kamper et al., 2017a; Shain and Elsner, 2019) have supported a contribution from memory constraints to language learning. This position also aligns with an extensive computational neuroscience literature on sparse coding, which holds that biological neurons are tuned for memory-efficient representations of recent stimuli (Attneave, 1954; Field, 1996, 2004; Sheridan et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 104, |
| "end": 119, |
| "text": "(Newport, 1990;", |
| "ref_id": "BIBREF90" |
| }, |
| { |
| "start": 120, |
| "end": 133, |
| "text": "Pinker, 1991;", |
| "ref_id": "BIBREF102" |
| }, |
| { |
| "start": 134, |
| "end": 159, |
| "text": "Carstairs-McCarthy, 1994;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 160, |
| "end": 186, |
| "text": "Rissanen and Ristad, 1994;", |
| "ref_id": "BIBREF107" |
| }, |
| { |
| "start": 187, |
| "end": 209, |
| "text": "Baddeley et al., 1998;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 210, |
| "end": 226, |
| "text": "Goldsmith, 2003;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 227, |
| "end": 250, |
| "text": "Yang, 2005, inter alia)", |
| "ref_id": null |
| }, |
| { |
| "start": 266, |
| "end": 288, |
| "text": "Baddeley et al. (1998)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 441, |
| "end": 467, |
| "text": "(Baddeley and Hitch, 1974)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 639, |
| "end": 653, |
| "text": "Newport (1990)", |
| "ref_id": "BIBREF90" |
| }, |
| { |
| "start": 658, |
| "end": 671, |
| "text": "Pinker (1991)", |
| "ref_id": "BIBREF102" |
| }, |
| { |
| "start": 953, |
| "end": 979, |
| "text": "(Kersten and Earles, 2001)", |
| "ref_id": "BIBREF73" |
| }, |
| { |
| "start": 1041, |
| "end": 1061, |
| "text": "Lee and Glass, 2012;", |
| "ref_id": null |
| }, |
| { |
| "start": 1062, |
| "end": 1079, |
| "text": "Lee et al., 2015;", |
| "ref_id": "BIBREF75" |
| }, |
| { |
| "start": 1080, |
| "end": 1103, |
| "text": "Elsner and Shain, 2017;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 1104, |
| "end": 1125, |
| "text": "Kamper et al., 2017a;", |
| "ref_id": "BIBREF70" |
| }, |
| { |
| "start": 1126, |
| "end": 1149, |
| "text": "Shain and Elsner, 2019)", |
| "ref_id": "BIBREF114" |
| }, |
| { |
| "start": 1427, |
| "end": 1443, |
| "text": "(Attneave, 1954;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1444, |
| "end": 1462, |
| "text": "Field, 1996, 2004;", |
| "ref_id": null |
| }, |
| { |
| "start": 1463, |
| "end": 1485, |
| "text": "Sheridan et al., 2017)", |
| "ref_id": "BIBREF115" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background 2.1 Memory, Prediction, and Learning", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Nonetheless, debate exists about the role of memory in language learning. For example, Rohde and Plaut (1999) fail to replicate findings from Elman (1993) in favor of Newport (1990) . In addition, Perfors (2012) fails to find evidence that memory bottlenecks encourage discovery of underlying linguistic regularities in adults and argues that such limitations only support language learning in concert with strong inductive priors. Furthermore, evidence suggests that mental representations during language processing preserve acoustic details over and above symbolic codes (Andruski et al., 1994; McMurray et al., 2002) . Related work has called into question both the memory efficiency of human mental representations and the severity of long-term memory limits. For example, experimental evidence indicates that human mental representations contain redundant information, both of language (Baayen et al., 1997) and of other constructs such as logical relations (Piantadosi et al., 2016) . In addition, recent estimates of mental storage requirements indicate that lexical information, especially semantics, already requires vastly more storage than e.g. phonemes and syntax, suggesting little added memory benefit from optimizing the efficiency with which regularities are stored (Mollica and Piantadosi, 2019) . Finally, recent computational evidence linking memory bottlenecks to success in unsupervised speech processing has relied on storage of arbitrarily long acoustic sequences in their full detail in order to compute reconstruction losses Elsner and Shain, 2017) . This design is inconsistent with known constraints on the storage duration (< 1s) of unanalyzed acoustic traces in human working memory (Baddeley and Hitch, 1974; Cowan, 1984) . It is thus not yet clear (1) how strongly memory pressures constrain mental representations of speech or (2) how much they encourage language learning.", |
| "cite_spans": [ |
| { |
| "start": 87, |
| "end": 109, |
| "text": "Rohde and Plaut (1999)", |
| "ref_id": "BIBREF108" |
| }, |
| { |
| "start": 167, |
| "end": 181, |
| "text": "Newport (1990)", |
| "ref_id": "BIBREF90" |
| }, |
| { |
| "start": 574, |
| "end": 597, |
| "text": "(Andruski et al., 1994;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 598, |
| "end": 620, |
| "text": "McMurray et al., 2002)", |
| "ref_id": "BIBREF85" |
| }, |
| { |
| "start": 892, |
| "end": 913, |
| "text": "(Baayen et al., 1997)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 964, |
| "end": 989, |
| "text": "(Piantadosi et al., 2016)", |
| "ref_id": "BIBREF101" |
| }, |
| { |
| "start": 1283, |
| "end": 1313, |
| "text": "(Mollica and Piantadosi, 2019)", |
| "ref_id": "BIBREF88" |
| }, |
| { |
| "start": 1551, |
| "end": 1574, |
| "text": "Elsner and Shain, 2017)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 1713, |
| "end": 1739, |
| "text": "(Baddeley and Hitch, 1974;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1740, |
| "end": 1752, |
| "text": "Cowan, 1984)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background 2.1 Memory, Prediction, and Learning", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Memory efficiency is not the only objective that can be constructed to learn abstractions over data without supervision. It has also been proposed that language learning may be driven by optimizing prediction of future input (Rohde and Plaut, 1999; Johnson et al., 2013; Phillips and Ehrenhofer, 2015; Apfelbaum and McMurray, 2017) . This proposal aligns with an extensive neuroscience literature arguing that predictive coding for future inputs is a \"canonical computation\" of the human brain (Keller and Mrsic-Flogel, 2018) and may better characterize the tuning of biological neurons than sparse coding (Singer et al., 2018) , possibly because prediction affords advantages in critical tasks (Nijhawan, 1994) and may help organisms filter noise from the perceptual signal by focusing attention on features relevant to prediction (Bialek et al., 2001) . Additional support for a role of prediction in language learning comes from the success of incremental language models in natural language processing, which optimize prediction of future words (Ney et al., 1994; Heafield et al., 2013; Jozefowicz et al., 2016; Radford et al., 2019) . Language models support dramatic performance improvements in language processing tasks (Radford et al., 2019) and have been shown to both (1) acquire linguistic abstractions without direct supervision (Linzen et al., 2016) and 2covary with human language comprehension measures (Frank and Bod, 2011; Goodkind and Bicknell, 2018; van Schijndel and Linzen, 2018) . Finally, experimental evidence indicates that infants chunk the speech stream at points of low transition probability, suggesting that predictive signals are exploited to learn word-like units (Saffran et al., 1996) .", |
| "cite_spans": [ |
| { |
| "start": 225, |
| "end": 248, |
| "text": "(Rohde and Plaut, 1999;", |
| "ref_id": "BIBREF108" |
| }, |
| { |
| "start": 249, |
| "end": 270, |
| "text": "Johnson et al., 2013;", |
| "ref_id": "BIBREF66" |
| }, |
| { |
| "start": 271, |
| "end": 301, |
| "text": "Phillips and Ehrenhofer, 2015;", |
| "ref_id": "BIBREF100" |
| }, |
| { |
| "start": 302, |
| "end": 331, |
| "text": "Apfelbaum and McMurray, 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 494, |
| "end": 525, |
| "text": "(Keller and Mrsic-Flogel, 2018)", |
| "ref_id": "BIBREF72" |
| }, |
| { |
| "start": 606, |
| "end": 627, |
| "text": "(Singer et al., 2018)", |
| "ref_id": "BIBREF117" |
| }, |
| { |
| "start": 695, |
| "end": 711, |
| "text": "(Nijhawan, 1994)", |
| "ref_id": "BIBREF92" |
| }, |
| { |
| "start": 832, |
| "end": 853, |
| "text": "(Bialek et al., 2001)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 1049, |
| "end": 1067, |
| "text": "(Ney et al., 1994;", |
| "ref_id": "BIBREF91" |
| }, |
| { |
| "start": 1068, |
| "end": 1090, |
| "text": "Heafield et al., 2013;", |
| "ref_id": "BIBREF58" |
| }, |
| { |
| "start": 1091, |
| "end": 1115, |
| "text": "Jozefowicz et al., 2016;", |
| "ref_id": "BIBREF67" |
| }, |
| { |
| "start": 1116, |
| "end": 1137, |
| "text": "Radford et al., 2019)", |
| "ref_id": "BIBREF104" |
| }, |
| { |
| "start": 1227, |
| "end": 1249, |
| "text": "(Radford et al., 2019)", |
| "ref_id": "BIBREF104" |
| }, |
| { |
| "start": 1341, |
| "end": 1362, |
| "text": "(Linzen et al., 2016)", |
| "ref_id": "BIBREF76" |
| }, |
| { |
| "start": 1440, |
| "end": 1468, |
| "text": "Goodkind and Bicknell, 2018;", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 1469, |
| "end": 1500, |
| "text": "van Schijndel and Linzen, 2018)", |
| "ref_id": "BIBREF113" |
| }, |
| { |
| "start": 1696, |
| "end": 1718, |
| "text": "(Saffran et al., 1996)", |
| "ref_id": "BIBREF109" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background 2.1 Memory, Prediction, and Learning", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We address these questions computationally by manipulating the presence or absence of memory and prediction pressures in the joint objective of an unsupervised incremental speech processing model, allowing us to quantify the contributions of these two hypothesized learning signals under realistic constraints on real-time processing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background 2.1 Memory, Prediction, and Learning", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Artificial recurrent neural networks such as those employed here were initially proposed as algorithmic-level (Marr, 1982) models of activity in biological neural networks (Little, 1974; Hopfield, 1982) , and subsequent studies support ubiquitous recurrence in the cortex (Harris and Mrsic-Flogel, 2013). In addition, influential theories of biological neural information processing argue that biological neural circuits integrate information at multiple hierarchically-organized timescales (Kiebel et al., 2008; Hasson et al., 2015; Norman-Haignere et al., 2020) . Further neuroscientific evidence indicates that segmentation of the time dimension plays a critical role in human cognition, both in domain-general event processing (Zacks et al., 2001; Jensen, 2006 , inter alia) and in speech processing specifically (Sanders and Neville, 2003; Cunillera et al., 2006 Cunillera et al., , 2009 Kooijman et al., 2013; Lee and Cho, 2016, inter alia). Segmentation or \"chunking\" also plays a central role in several theories of language comprehension (Sanford and Sturt, 2002; Hale, 2006; Frank and Christiansen, 2018) and learning (Monaghan and Christiansen, 2010; McCauley and Christiansen, 2019) . Our model incorporates these notions architecturally, with segment boundaries implemented by \"detector neurons\" that govern information flow between neural populations at larger and smaller timescales (Masquelier, 2018) .", |
| "cite_spans": [ |
| { |
| "start": 110, |
| "end": 122, |
| "text": "(Marr, 1982)", |
| "ref_id": "BIBREF79" |
| }, |
| { |
| "start": 172, |
| "end": 186, |
| "text": "(Little, 1974;", |
| "ref_id": "BIBREF77" |
| }, |
| { |
| "start": 187, |
| "end": 202, |
| "text": "Hopfield, 1982)", |
| "ref_id": "BIBREF63" |
| }, |
| { |
| "start": 491, |
| "end": 512, |
| "text": "(Kiebel et al., 2008;", |
| "ref_id": "BIBREF74" |
| }, |
| { |
| "start": 513, |
| "end": 533, |
| "text": "Hasson et al., 2015;", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 534, |
| "end": 563, |
| "text": "Norman-Haignere et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 731, |
| "end": 751, |
| "text": "(Zacks et al., 2001;", |
| "ref_id": "BIBREF124" |
| }, |
| { |
| "start": 752, |
| "end": 764, |
| "text": "Jensen, 2006", |
| "ref_id": "BIBREF65" |
| }, |
| { |
| "start": 817, |
| "end": 844, |
| "text": "(Sanders and Neville, 2003;", |
| "ref_id": "BIBREF110" |
| }, |
| { |
| "start": 845, |
| "end": 867, |
| "text": "Cunillera et al., 2006", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 868, |
| "end": 892, |
| "text": "Cunillera et al., , 2009", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 893, |
| "end": 915, |
| "text": "Kooijman et al., 2013;", |
| "ref_id": null |
| }, |
| { |
| "start": 916, |
| "end": 916, |
| "text": "", |
| "ref_id": null |
| }, |
| { |
| "start": 1048, |
| "end": 1073, |
| "text": "(Sanford and Sturt, 2002;", |
| "ref_id": "BIBREF111" |
| }, |
| { |
| "start": 1074, |
| "end": 1085, |
| "text": "Hale, 2006;", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 1086, |
| "end": 1115, |
| "text": "Frank and Christiansen, 2018)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 1129, |
| "end": 1162, |
| "text": "(Monaghan and Christiansen, 2010;", |
| "ref_id": "BIBREF89" |
| }, |
| { |
| "start": 1163, |
| "end": 1195, |
| "text": "McCauley and Christiansen, 2019)", |
| "ref_id": "BIBREF81" |
| }, |
| { |
| "start": 1399, |
| "end": 1417, |
| "text": "(Masquelier, 2018)", |
| "ref_id": "BIBREF80" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recurrent, Hierarchical, and Segmental Speech Processing in Humans", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Many theories of linguistic structure posit multiple, hierarchically organized levels of representation (Chomsky, 1957; Goldsmith, 1976) . Such theories predict the existence of abstractions over abstractions, latent structures that describe the distribution of other latent structures. This idea accords with recent theories of generalized Bayesian learning in biological agents, in which neural populations are thought to model the activity of other neural populations within their Markov blanket (Friston, 2010) . The notion of learning through modeling other elements of the agent's own mental state has been exploited in symbolic computational models of language acquisition (Lee and Glass, 2012; Lee et al., 2015) , but not in the context of artificial neural zero-resource speech models, which have so far derived their objective exclusively from the data (Kamper et al., 2017a; Elsner and Shain, 2017) . Our approach incorporates this idea by optimizing higher layers to predict the sequence of activations at lower layers.", |
| "cite_spans": [ |
| { |
| "start": 104, |
| "end": 119, |
| "text": "(Chomsky, 1957;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 120, |
| "end": 136, |
| "text": "Goldsmith, 1976)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 499, |
| "end": 514, |
| "text": "(Friston, 2010)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 680, |
| "end": 701, |
| "text": "(Lee and Glass, 2012;", |
| "ref_id": null |
| }, |
| { |
| "start": 702, |
| "end": 719, |
| "text": "Lee et al., 2015)", |
| "ref_id": "BIBREF75" |
| }, |
| { |
| "start": 863, |
| "end": 885, |
| "text": "(Kamper et al., 2017a;", |
| "ref_id": "BIBREF70" |
| }, |
| { |
| "start": 886, |
| "end": 909, |
| "text": "Elsner and Shain, 2017)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modeling the Mental State", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "This work is part of a growing interest in unsupervised representation learning from raw speech, especially the Zerospeech 2015 (Versteegh et al., 2015) and 2017 (Dunbar et al., 2017) shared tasks and participating systems (Badino et al., 2015; Renshaw et al., 2015; Agenbag and Niesler, 2015; Baljekar et al., 2015; R\u00e4s\u00e4nen et al., 2015; Lyzinski et al., 2015; Zeghidour et al., 2016; Heck et al., 2016; Srivastava and Shrivastava, 2016; Kamper et al., 2017b; Yuan et al., 2017; Heck et al., 2017; Shibata et al., 2017; Ansari et al., 2017a,b) , as well as subsequent deep neural autoencoders (Van Den Oord et al., 2017; Chorowski et al., 2019) inspired by the WaveNet architecture (van den Oord et al., 2016) . Much of this work concerns the discovery of word-like units, while our analyses focus on learning at the phoneme level (see section 4.3). A symbolic Bayesian framework for joint unsupervised phoneme segmentation and clustering is proposed by Lee and Glass (2012) and extended by Lee et al. (2015) . Their system infers a Dirichlet process hidden Markov model to learn a symbolic sequential encoding of the speech stream. A disadvantage of this approach for the present research question is that the categorically distributed phone labels lack any notion of featural relatedness, contrary to widely held assumptions about natural language phonology (Clements, 1985) . In addition, the learning signal derives from a next-frame prediction objective, making it difficult to use the model to factorially manipulate memory and prediction pressures. Another recent framework for unsupervised phone segmentation identifies boundaries at points of high surprisal in a frame-level language model (Michel et al., 2017) . This approach does not generate segment encodings and cannot straightforwardly be used to test claims about the role of memory in language learning.", |
| "cite_spans": [ |
| { |
| "start": 128, |
| "end": 152, |
| "text": "(Versteegh et al., 2015)", |
| "ref_id": "BIBREF121" |
| }, |
| { |
| "start": 162, |
| "end": 183, |
| "text": "(Dunbar et al., 2017)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 223, |
| "end": 244, |
| "text": "(Badino et al., 2015;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 245, |
| "end": 266, |
| "text": "Renshaw et al., 2015;", |
| "ref_id": "BIBREF106" |
| }, |
| { |
| "start": 267, |
| "end": 293, |
| "text": "Agenbag and Niesler, 2015;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 294, |
| "end": 316, |
| "text": "Baljekar et al., 2015;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 317, |
| "end": 338, |
| "text": "R\u00e4s\u00e4nen et al., 2015;", |
| "ref_id": "BIBREF105" |
| }, |
| { |
| "start": 339, |
| "end": 361, |
| "text": "Lyzinski et al., 2015;", |
| "ref_id": "BIBREF78" |
| }, |
| { |
| "start": 362, |
| "end": 385, |
| "text": "Zeghidour et al., 2016;", |
| "ref_id": "BIBREF126" |
| }, |
| { |
| "start": 386, |
| "end": 404, |
| "text": "Heck et al., 2016;", |
| "ref_id": "BIBREF59" |
| }, |
| { |
| "start": 405, |
| "end": 438, |
| "text": "Srivastava and Shrivastava, 2016;", |
| "ref_id": "BIBREF118" |
| }, |
| { |
| "start": 439, |
| "end": 460, |
| "text": "Kamper et al., 2017b;", |
| "ref_id": "BIBREF71" |
| }, |
| { |
| "start": 461, |
| "end": 479, |
| "text": "Yuan et al., 2017;", |
| "ref_id": "BIBREF123" |
| }, |
| { |
| "start": 480, |
| "end": 498, |
| "text": "Heck et al., 2017;", |
| "ref_id": "BIBREF60" |
| }, |
| { |
| "start": 499, |
| "end": 520, |
| "text": "Shibata et al., 2017;", |
| "ref_id": "BIBREF116" |
| }, |
| { |
| "start": 521, |
| "end": 544, |
| "text": "Ansari et al., 2017a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 594, |
| "end": 621, |
| "text": "(Van Den Oord et al., 2017;", |
| "ref_id": "BIBREF119" |
| }, |
| { |
| "start": 622, |
| "end": 645, |
| "text": "Chorowski et al., 2019)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 683, |
| "end": 710, |
| "text": "(van den Oord et al., 2016)", |
| "ref_id": "BIBREF96" |
| }, |
| { |
| "start": 955, |
| "end": 975, |
| "text": "Lee and Glass (2012)", |
| "ref_id": null |
| }, |
| { |
| "start": 992, |
| "end": 1009, |
| "text": "Lee et al. (2015)", |
| "ref_id": "BIBREF75" |
| }, |
| { |
| "start": 1361, |
| "end": 1377, |
| "text": "(Clements, 1985)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 1700, |
| "end": 1721, |
| "text": "(Michel et al., 2017)", |
| "ref_id": "BIBREF87" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Computational Approaches", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Like many prior ANN zero-resource speech processing models (e.g. Kamper et al., , 2017a Elsner and Shain, 2017; Shain and Elsner, 2019) , we employ an encoder-decoder framework. However, unlike previous approaches, our model decodes incrementally and hierarchically, with each layer decoding its inputs at their own timescale over a short window backward into the past and/or forward into the future. The model is thus required not only to describe the input signal (speech), but also its own sequence of latent representations (e.g. phones, words, etc.), much as people are implicitly thought to do in prior symbolic work on unsupervised language learning (Goldwater et al., 2009; Lee et al., 2015 ). Our encoder model closely follows Chung et al. (2017) , and thus the primary technical contribution of this work lies in the cascaded incremental decoder and the layerwise incremental objective described below, both of which are designed to encourage repurposable segment representations based on locally available information. Although encodings are ultimately the quantity of interest in unsupervised encoder-decoder models, prior work has shown that decoder design can be a major determinant of acquired representations (McCoy et al., 2018 (McCoy et al., , 2020 . The overall design is schematized in Figure 1 . Code is available at https://github.com/coryshain/dnnseg.", |
| "cite_spans": [ |
| { |
| "start": 65, |
| "end": 87, |
| "text": "Kamper et al., , 2017a", |
| "ref_id": "BIBREF70" |
| }, |
| { |
| "start": 88, |
| "end": 111, |
| "text": "Elsner and Shain, 2017;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 112, |
| "end": 135, |
| "text": "Shain and Elsner, 2019)", |
| "ref_id": "BIBREF114" |
| }, |
| { |
| "start": 657, |
| "end": 681, |
| "text": "(Goldwater et al., 2009;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 682, |
| "end": 698, |
| "text": "Lee et al., 2015", |
| "ref_id": "BIBREF75" |
| }, |
| { |
| "start": 736, |
| "end": 755, |
| "text": "Chung et al. (2017)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1225, |
| "end": 1244, |
| "text": "(McCoy et al., 2018", |
| "ref_id": "BIBREF82" |
| }, |
| { |
| "start": 1245, |
| "end": 1266, |
| "text": "(McCoy et al., , 2020", |
| "ref_id": "BIBREF83" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1306, |
| "end": 1314, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our encoder closely follows a hierarchical multiscale extension (HM-LSTM, Chung et al., 2017) of long short-term memory (LSTM) networks (Hochreiter and Schmidhuber, 1997) . The encoder consists of multiple LSTM layers linked by discrete boundary neurons that govern memory retention and information flow between layers. When a boundary neuron fires in layer l, it terminates a segment. Layer l then ejects its hidden state representation to layer l + 1, receives top-down input from the hidden state of layer l + 1, and resets its cell state (incremental memory) in order to process the next segment. The hidden state at a boundary thus constitutes a label for the segment terminating at that boundary, which is used to summarize the content of the segment when communicating with other layers. When the boundary neuron at layer l does not fire, layer l + 1 is inert and simply copies its representation forward. As a result, higher layers track information at longer timescales than lower layers, and the segmentation behavior at l determines the input timescale at l + 1. Each layer proceeds by segmenting and labeling its input signal at a timescale learned from data, resulting in a hierarchical sequence of labeled segments. As argued in Chung et al. (2017) , this design enforces a trade-off between recurrent information (which is erased by segmentation) and top-down information (which is made available by segmentation). 1 Although the linguistic quality of discovered HM-LSTM segments is not systematically examined in the original proposal (Chung et al., 2017) and recent analysis has called it into question (K\u00e1d\u00e1r et al., 2018) , our results indicate that HM-LSTMs can discover segmental structure from speech, at least at the phonemic level.", |
| "cite_spans": [ |
| { |
| "start": 74, |
| "end": 93, |
| "text": "Chung et al., 2017)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 136, |
| "end": 170, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF62" |
| }, |
| { |
| "start": 1243, |
| "end": 1262, |
| "text": "Chung et al. (2017)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1430, |
| "end": 1431, |
| "text": "1", |
| "ref_id": null |
| }, |
| { |
| "start": 1551, |
| "end": 1571, |
| "text": "(Chung et al., 2017)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1620, |
| "end": 1640, |
| "text": "(K\u00e1d\u00e1r et al., 2018)", |
| "ref_id": "BIBREF68" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The decoder consists of two multi-layer attentional sequence-to-sequence (seq2seq) LSTMs with L layers each, one backward-directional (memory) and one forward directional (prediction). The LSTMs respectively decode the B previous input segment labels and the F following input segment labels given an encoder representation at layer l and time t. In addition, the predicted sequence from the layer above serves as attention values to inform decoding at the current layer, at a timescale determined by the segmentation patterns of the current layer and the layer below. The internal behavior of the decoder is thus tightly coupled with the segmental behavior of the encoder, providing direct feedback into the encoder decisions. In addition, the label sequence of the decoder at all layers must support both (1) decoding of the perceptual signal (the data), since top-down connections allow higherlevel representations to inform lower-level ones, and (2) decoding of lower-level state sequences. 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We employ an incremental layerwise objective that both reconstructs backward and predicts forward from time t at layer l over the segment labels from layer l \u2212 1 at a timescale defined by the segmentation behavior of layer l\u22121. Thus only the first layer decodes at the timescale of the data; higher layers l decode at the timescale of l\u22121, and representations associated with non-boundaries in l \u2212 1 are ignored by the objective. The objective scans incrementally over the time dimension and imposes a forward and backward cost at every segment boundary identified by layer l \u2212 1. As a result, the first layer (\"phonemes\") is responsible for incrementally decoding the local past and future realization of the acoustic stream, the second layer is responsible for decoding the local past and future realization of the 2 See Appendix C for definition of the decoder.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objective", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\"phoneme\" sequence, etc. 3 Although it is possible to backpropagate into the decoding targets (i.e. encoder representations) at higher layers, thereby encouraging the encoder to discover more predictable segment sequences, we found in practice that doing so resulted in a form of mode collapse where labels became insensitive to the data and converged to a single value for all timesteps. For this reason, we stop the gradients into decoding targets and backpropagate only into the decoder predictions. Thus, the objective encourages encodings at higher layers to change to better predict structures at lower layers-but does not alter the representations at those lower layers to make them more uniform and therefore easier to predict.", |
| "cite_spans": [ |
| { |
| "start": 25, |
| "end": 26, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objective", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We assess the contribution of memory and prediction pressures to phoneme acquisition by (1) manipulating these pressures on models exposed to speech data from two unrelated languages (Xitsonga and English) and (2) evaluating the effect of these manipulations on multiple measures of phoneme induction quality.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Design", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We use the Zerospeech 2015 (Versteegh et al., 2015) challenge data in English and Xitsonga, a Bantu language spoken in South Africa. The Xitsonga data come from the NCHLT corpus (De Vries et al., 2014) and contain 2h29m07s of read speech from 24 speakers. The English data come from the Buckeye Corpus (Pitt et al., 2005) and contain 4h59m05s of spontaneous speech from 12 speakers. For English, we additionally include the official development set in training, which contains 1h39m45s of spontaneous speech from 2 speakers, also from the Buckeye Corpus. English development set performance was used for model development and tuning, but the development set is not included in the evaluations presented here. Xitsonga lacks a development set, so designs selected on the English development set are applied directly to Xitsonga for evaluation. Before fitting, we convert the source audio files into a cochleagram-based spectral representation that approximates the signal generated by the human auditory system (McDermott and Simoncelli, 2011). 4", |
| "cite_spans": [ |
| { |
| "start": 27, |
| "end": 51, |
| "text": "(Versteegh et al., 2015)", |
| "ref_id": "BIBREF121" |
| }, |
| { |
| "start": 302, |
| "end": 321, |
| "text": "(Pitt et al., 2005)", |
| "ref_id": "BIBREF103" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We seek to assess the contribution of both memory and prediction pressures to the content of model representations. To this end, our principal manipulations are the backward (B \u2208 {0, 5, 25, 50}) and forward (F \u2208 {0, 1, 5, 10}) window lengths used by the decoder, which respectively impose a pressure to efficiently remember and accurately predict. Note that the condition B = 0, F = 0 (no reconstruction or prediction) is not well defined because the objective is 0 at any parameterization and thus has no gradient, and we therefore exclude it from consideration in these results. In addition, we manipulate the number of encoder layers (L \u2208 {2, 3, 4}). This is because it is unclear a priori which layers of the encoder are expected to correspond to quantities of interest like phonemes or words, since the representations are unsupervised and the model could additionally or instead discover e.g. subphonemic, morphemic, phrasal, intonational, and other kinds of structures. Although detection of these and other levels of linguistic representation is of interest and is the target of future work, the annotations provided by the Zerospeech 2015 data support phoneme-level and word-level analyses only, and we concentrate our evaluation there. Varying the number of layers allows us to investigate which layers emergently discover more phoneme-like units, and under what conditions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Manipulations", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Because our model generates a segmental encoding of the speech signal, we apply two classes of evaluation in this study: phoneme segmentation and phoneme-level probing classification. The segmentation evaluation measures the degree of correspondence between the model-generated segment boundaries and expert-annotated phoneme boundaries, using a boundary F-measure which assigns a true positive for up to one predicted boundary that falls within some tolerance of each gold boundary, false positives for all other predicted boundaries, and false negatives for all gold boundaries that lack a predicted boundary within the tolerance. Following Lee and Glass (2012), we use a tolerance of 20ms. The classification evaluation measures the amount of signal in model-generated encodings as to (1) the true identity of the phoneme being encoded and (2) the cluster of phonological features associated with that phoneme (Hayes, 2011) . Following e.g. Shain and Elsner (2019) and Chrupa\u0142a et al. (2020) , we do so using probing classifiers. In particular, for each layer of each model's encoder, we fit linear classifiers to (1) the phoneme labels and (2) the phonological feature labels associated with the gold phoneme segment corresponding to each phone boundary. We extract the gold and predicted phoneme encodings at the human-annotated phoneme boundaries, regardless of whether the model segmented at that location. This supports direct comparison of metrics across models, since the set of evaluated segments is held constant. Phonological features are extracted at the same timepoints, following the procedure described in Shain and Elsner (2019) . 5 Although our model is designed to support joint discovery of multiple layers of representation, we find empirically that no model appreciably improves at any layer in word boundary F-score over a baseline that segments only at the ends of voice activity regions, and qualitative inspection does not indicate systematic correspondence to an unannotated level of representation such as syllables, morphemes, or intonational units. Despite differences in segmentation rate, and thereby in word boundary precision-recall trade-off, models generally converge on similar (low) word boundary F-scores, and thus our manipulations are not informative about word learning. Probe-based classification metrics are not well suited to word-level evaluation due to the size of the vocabulary. Though human speech processing involves units between the phoneme and word level, detailed analysis of such units is difficult due to the lack of annotation in the corpus. We believe poor word discovery at higher layers may be due in part to the fact that noninitial layers have both a non-stationary objective (the evolving representations of the layer below) and slower learning dynamics, perhaps making it difficult for these layers to \"catch up\" with moving targets (Ioffe and Szegedy, 2015) . We leave exploration of possible remedies to future research and focus here only on the phoneme level.", |
| "cite_spans": [ |
| { |
| "start": 913, |
| "end": 926, |
| "text": "(Hayes, 2011)", |
| "ref_id": "BIBREF57" |
| }, |
| { |
| "start": 944, |
| "end": 967, |
| "text": "Shain and Elsner (2019)", |
| "ref_id": "BIBREF114" |
| }, |
| { |
| "start": 972, |
| "end": 994, |
| "text": "Chrupa\u0142a et al. (2020)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 1623, |
| "end": 1646, |
| "text": "Shain and Elsner (2019)", |
| "ref_id": "BIBREF114" |
| }, |
| { |
| "start": 1649, |
| "end": 1650, |
| "text": "5", |
| "ref_id": null |
| }, |
| { |
| "start": 2899, |
| "end": 2924, |
| "text": "(Ioffe and Szegedy, 2015)", |
| "ref_id": "BIBREF64" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "While it is a priori unclear which layer of the encoder is expected to encode phonemes (for example, the initial layers may encode sub-phonemic units), we find systematically better phoneme segmentation and classification performance in the first layer of the network. For simplicity, we therefore only present metrics from this layer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In addition to reporting raw model performance, we report performance improvements from each model relative to (1) baseline U (untrained), an architecturally matched model left at random initialization (Chrupa\u0142a et al., 2020) , and (2) baseline X (cross-language), the architecturally matched model trained on the opposite language. 6 These two baselines quantify different contributions of the acquisition process. Baseline U quantifies architectural inductive bias: how well does the architecture alone guide linguistic representations, without learning? Baseline X quantifies modality inductive bias: how well does general knowledge of human speech guide linguistic representations, without exposure to the target language? Improvement against either of these baselines supports language learning from experience, over and above any prior knowledge that might more efficiently be innately encoded. 7", |
| "cite_spans": [ |
| { |
| "start": 202, |
| "end": 225, |
| "text": "(Chrupa\u0142a et al., 2020)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Boundary and macro-averaged phoneme and feature classification F-measures from the bestperforming configuration on the English development set (B = 25, F = 1, L = 3) are given in Table 1 . English boundary performance (F = 65.3) approaches previously reported unsupervised phoneme segmentation scores on different and therefore not directly comparable datasets (Lee and Glass, 2012; Michel et al., 2017, both around 6 For Xitsonga, baseline X is the architecturally matched English-trained model. For English, baseline X is the architecturally matched Xitsonga-trained model. 7 We do not evaluate directly against a previous state of the art because no state of the art exists for unsupervised phoneme segmentation and classification in the Zerospeech 2015 data. A previous model that performed the same task (Lee and Glass, 2012) achieved an average boundary F-score of 76.1 on a different dataset that used a different boundary annotation standard (automatic forced alignment instead of human annotation). To our knowledge, the dataset is no longer publicly available. A recent segmentation-only model (Michel et al., 2017) achieved a boundary F of 75 on the TIMIT dataset (Fisher et al., 1986) . However, because TIMIT is restricted to 10 unique utterances of English, we believe Zerospeech 2015, which contains more linguistically diverse speech from two unrelated languages, is a better dataset for investigating language acquisition patterns. F = 75). The overall segmentation performance in Xitsonga is considerably worse than that of English, consistent with prior evidence that word segmentation in the Zerospeech 2015 Xitsonga partition is harder than English (e.g. Kamper et al., 2017a) . By contrast, classification metrics in Xitsonga are better than in English, which is again consistent with prior findings of stronger unsupervised classification performance in Xitsonga (Shain and Elsner, 2019) . The difference in relative performance between segmentation and classification in the two languages could be due in part to differences in register: the English data is spontaneously produced, while the Xitsonga data is read speech. Longer average phoneme duration (100ms vs 70ms) and cleaner articulations in Xitsonga could plausibly give rise to this asymmetry, and further investigation is left to future work. The model substantially outperforms the untrained baseline (U) on all metrics and outperforms the cross-language baseline (X) on all metrics but boundary F in Xitsonga, which could be due in part to the larger size of the English-language training set. Results therefore indicate that the reconstruction and prediction objectives have contributed to unsupervised discovery of phonemic patterns in both languages.", |
| "cite_spans": [ |
| { |
| "start": 361, |
| "end": 382, |
| "text": "(Lee and Glass, 2012;", |
| "ref_id": null |
| }, |
| { |
| "start": 383, |
| "end": 417, |
| "text": "Michel et al., 2017, both around 6", |
| "ref_id": null |
| }, |
| { |
| "start": 576, |
| "end": 577, |
| "text": "7", |
| "ref_id": null |
| }, |
| { |
| "start": 809, |
| "end": 830, |
| "text": "(Lee and Glass, 2012)", |
| "ref_id": null |
| }, |
| { |
| "start": 1104, |
| "end": 1125, |
| "text": "(Michel et al., 2017)", |
| "ref_id": "BIBREF87" |
| }, |
| { |
| "start": 1175, |
| "end": 1196, |
| "text": "(Fisher et al., 1986)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 1676, |
| "end": 1697, |
| "text": "Kamper et al., 2017a)", |
| "ref_id": "BIBREF70" |
| }, |
| { |
| "start": 1886, |
| "end": 1910, |
| "text": "(Shain and Elsner, 2019)", |
| "ref_id": "BIBREF114" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 179, |
| "end": 186, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Segmentation and macro-averaged classification F-measures by language and experimental condition are given in Figure 2 . Results show a contribution of both memory (B > 0) and prediction (F > 0), with a similar distribution of relative performance between the two languages, supporting the existence of language-general influences of prediction and memory on phoneme learning.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 110, |
| "end": 118, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "As shown in Figure 2 , models without memory pressures (B = 0) find substantially worse boundaries than models with memory pressures. There also appears to be a ceiling effect of backward reconstruction size, with a jump in performance at B = 25 but no systematic improvement at B = 50. Importantly, at layer 1, B = 25 covers a 250ms interval, which falls within even conservative estimates of the storage duration of unanalyzed auditory traces in humans (Cowan, 1984) . The B = 25 objective could therefore plausibly be used during online speech processing. Prediction pressures also support discovery of phoneme boundaries, as shown by the generally worse boundary performance of F = 0 vs. F > 0 in both languages.", |
| "cite_spans": [ |
| { |
| "start": 455, |
| "end": 468, |
| "text": "(Cowan, 1984)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In addition, Figure 2 shows that memory and prediction both modulate phoneme classification performance, with a roughly convex performance surface around a peak at B = 25, F = 10 for English and B = 25, F = 5 for Xitsonga. A similar peak emerges in the feature classification results for English, along with a local feature classification peak in Xitsonga for L > 2. A 250ms auditory memory window thus supports both phoneme segmentation and classification in our models, with additional benefits from predicting over short intervals (Singer et al., 2018) . For feature classification, the primary determinant of performance across languages is the prediction objective, with performance generally increasing up to F = 5. There is also an effect of encoder depth in these results, such that encoders with more layers (L > 2) tend to perform better across metrics, despite the fact that all metrics reflect performance at the first layer. This result supports a contribution of multiscale modeling, even if the segmentation behavior at higher layers does not clearly correspond to a theory-driven level of representation (see section 4.3). absence of these characteristics. Memory and prediction therefore modulate not only absolute performance, but also the utility of language experience. Figure 4 reports performance differences by metric against baseline X (cross-language). English segmentation is substantially helped by experience with (i.e. training on) English, especially under strong memory pressures. However, Xitsonga segmentation is generally worse for the Xitsongatrained model than the English-trained one. This might be due to the fact that the English training set is larger, and/or to low overall levels of segmentation performance in Xitsonga. While we leave further investigation of this exception to future work, the classification metrics still show a clear benefit of in-domain training in both languages, but only in the presence of prediction pressures.", |
| "cite_spans": [ |
| { |
| "start": 534, |
| "end": 555, |
| "text": "(Singer et al., 2018)", |
| "ref_id": "BIBREF117" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 13, |
| "end": 21, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1290, |
| "end": 1298, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The baseline X results bear on the degree to which speech processing patterns can plausibly be innately specified. Although the set of phonological categories and features are classically regarded as universal (Chomsky and Halle, 1968; Clements, 1985) , it is well known that the \"same\" phonological abstraction (e.g. voicing) can be phonetically cached out in different ways depending on the language (e.g. Gordon and Ladefoged, 2001; Gordon et al., 2002) . Our results suggest that, at least between Xitsonga and English, this variation is both (1) constrained enough to permit recognition of non-trivial patterns from speech in other languages on the basis of general, possibly innate processing biases, and (2) substantial enough to give rise to a benefit of direct experience with the target language, even for language-general constructs like phoneme categories and phonological features. We use linear regression on the combined metrics to quantitatively evaluate the contribution of both memory and prediction pressures to phoneme acquisition. Results show significant positive contributions to acquisition from memory pressures (p = 0.006), prediction pressures (p < 0.001), and multiscale encoding (p < 0.001). 8 The boundary precision/recall trade-off illuminates the mechanisms by which memory and prediction pressures affect learning ( Figure 5 ). Without memory pressures (B = 0), segmentation rates are high, resulting in high recall and low precision. Introducing memory pressures (B > 0) slows the segmentation rate, resulting in a more balanced P/R trade-off. Without prediction pressures (F = 0), segmentation rates are generally low, resulting in higher precision and lower recall. Introducing prediction pressures (F > 0) increases the segmentation rate, again resulting in a more balanced trade-off. To understand this pattern, recall that a boundary in our model represents both a cost 8 See Appendix G for details. (flushing the memory cell) and a benefit (injecting top-down feedback). The cost of forgetting is plausibly greater for reconstruction than prediction, since only the current layer has had direct access to the sequence of reconstruction targets. By contrast, the benefit of top-down feedback is plausibly greater for prediction than reconstruction, since the prediction can condition on contextual representations at multiple timescales. In our segmental model of speech processing, the objectives therefore induce countervailing biases that boost signal for phonological constructs, supporting their joint influence on phoneme acquisition from speech.", |
| "cite_spans": [ |
| { |
| "start": 210, |
| "end": 235, |
| "text": "(Chomsky and Halle, 1968;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 236, |
| "end": 251, |
| "text": "Clements, 1985)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 408, |
| "end": 435, |
| "text": "Gordon and Ladefoged, 2001;", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 436, |
| "end": 456, |
| "text": "Gordon et al., 2002)", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 1221, |
| "end": 1222, |
| "text": "8", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1349, |
| "end": 1357, |
| "text": "Figure 5", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We proposed an unsupervised deep neural model of speech processing that is incremental, segmental, and optimized by local feedback. We manipulated the model's objective function in order to investigate prior hypotheses about the role in human language acquisition of memory constraints on the one hand and predictive processing on the other. Results support a role for both memory and prediction pressures for acquiring phonemes from speech. Both objectives inform the model's segmentation behavior and the content of its segment encodings. In addition, results suggest that these two mechanisms coordinate to support phoneme discovery by introducing countervailing pressures toward retention of previously encountered signals (memory) and consultation of top-down signals (prediction). \u2022 The recurrent connection includes both the previous segment label and the current segment length in addition to the previous hidden state. We found this to be helpful during model development, and we hypothesize that this is because doing so removes the need for this information to be encoded by the model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 We implement the case-wise reasoning of the segmentation decisions using multiplicative masking rather than logical selection. This is intended to boost signal into the boundary decisions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 We enforce hierarchical segmentation behavior by multiplicatively masking the segmentation decision at layer l with the segmentation decision at layer l \u2212 1, thus preventing higher layers from segmenting where lower layers do not.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 We compute boundaries during training via Bernoulli sampling rather than rounding. We found this to substantially improve performance on the development set, and we hypothesize that sampling may improve the straightthrough gradient estimates by ensuring that the segmentation decision is unbiased with respect to the underlying segmentation probability.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 We renormalize the preactivations s t by the incoming boundary decisions (eq. A7). We found this to be helpful during model development, and we hypothesize that this is because it avoids fluctuation in the scale of preactivations as a function of the boundaries.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 We do not apply the Chung et al. (2017) technique of slope annealing, i.e. gradually increasing the steepness of the sigmoid activation function to reduce bias in the straightthrough estimator. We did not find an appreciable benefit from slope annealing during development, and it had a tendency to produce training instability. Eliminating it also reduces experimenter degrees of freedom by removing design decisions about the annealing function.", |
| "cite_spans": [ |
| { |
| "start": 22, |
| "end": 41, |
| "text": "Chung et al. (2017)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The decoder consists of two attentional seq2seq LSTMs with L layers each, one backwarddirectional (memory) and one forward directional (prediction). Given a backward window size B and a forward window size F , each backward decoder layer generates reconstructions Y B(l) t \u2208 R B\u00d7D l\u22121 and each forward decoder layer generates predictions Y t,0 for the forward decoder -are generated using multilayer feedforward transforms f hB(l) , f cB(l) , f hF(l) , and f cF(l) :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "F(l) t \u2208 R F \u00d7D l\u22121 ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "h dB(l) t,0 def = f hB(l) h e(l) t (A22) c dB(l) t,0 def = f cB(l) h e(l) t (A23) h dF(l) t,0 def = f hF(l) h e(l) t (A24) c dF(l) t,0 def = f cF(l) h e(l) t (A25)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "Decoder states are doubly time indexed by t, i, where t indexes the encoder timestamp (i.e. the input timestep at which decoding begins) and i indexes the decoder timestamp (i.e. progress through the B or F decoder frames). Given decoder states h", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "dB(l) t,i , h dF(l) t,i , predictions Y B(l) t [i] , Y F(l) t [i] \u2208 R D l\u22121", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "are generated using multilayer feedforward transforms f yB(l) , f yF(l) :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Y B(l) t [i] def = f yB(l) h dB(l) t,i (A26) Y F(l) t [i] def = f yF(l) h dF(l) t,i", |
| "eq_num": "(A27)" |
| } |
| ], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "The decoder takes as input a periodic positional encoding e i , generated following Vaswani et al. (2017) . Non-final layers additionally take as an attention values the predictions from the layer above, i.e. Y B(l+1) t (for backward reconstruction) and Y F(l+1) t (for forward prediction) and compute a weighted sum of these values over time with attention weight vectors a B(l) t,i \u2208 (0, 1) B and a F(l) t,i \u2208 (0, 1) F to generate context vectors w", |
| "cite_spans": [ |
| { |
| "start": 84, |
| "end": 105, |
| "text": "Vaswani et al. (2017)", |
| "ref_id": "BIBREF120" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "B(l) t,i and w F(l) t,i : w B(l) t,i def = Y B(l+1) t a B(l) t,i (A28) w F(l) t,i def = Y F(l+1) t a F(l) t,i (A29)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "Attention weights a B(l) and a F(l) are computed using Gaussian kernel k(i; \u00b5, \u03c3 2 ):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "k(i; \u00b5, \u03c3 2 ) def = exp (i \u2212 \u00b5) 2 \u03c3 2 (A30)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "Kernel k is applied to decoder time, with concentration \u03c3 B(l) , \u03c3 F(l) = 0.25 and with location \u00b5", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "B(l) t,i \u00b5 F(l)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "t,i \u2208 R + computed by transforming the previous decoder state using a feedforward transform f qB(l) , f qF(l) and adding the result to the previous attention location:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "\u00b5 B(l) t,i def = abs f qB(l) h dB(l) t,i\u22121 + \u00b5 B(l) t,i\u22121 (A31) \u00b5 F(l) t,i def = abs f qF(l) h dF(l) t,i\u22121 + \u00b5 F(l) t,i\u22121 (A32)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "where \u00b5 B(l) t,0 = 1. Unit-normalized attention vectors are computed from timestamp vectors t B def = (1, . . . , B) and t F def = (1, . . . , F ) as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "a B(l) t,i def = k t B ; \u03c3 B(l) , \u00b5 B(l) t,i B j=1 k t B [j] ; \u03c3 B(l) , \u00b5 B(l) t,j (A33) a F(l) t,i def = k t F ; \u03c3 F(l) , \u00b5 F(l) t,i B j=1 k t F [j] ; \u03c3 F(l) , \u00b5 F(l) t,j", |
| "eq_num": "(A34)" |
| } |
| ], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "The attention weights are thus constrained to march monotonically in time from t into the decoded past or future predicted segment labels from the layer above. Using fixed concentration 0.25 yields an effective kernel width [\u22122\u03c3, 2\u03c3] of one timestep, ensuring that the bulk of the attention kernel either falls on a single segment label or straddles two consecutive segment labels and preventing the decoder from spreading its attention over many higher-level segments. This design encourages one-to-many temporal alignment between decoded segment labels and decoded inputs, while allowing the decoder to determine how long to attend to a predicted segment label before moving on to the next one. At the final (top) layer, no top-down predictions are available, so the context vectors are omitted (or, equivalently, set to 0). The inputs to the decoder x", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "dB(l) t,i , x dF(l) t,i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "are constructed as the vertical concatenation of e, w, and the previously generated decoder output, and a standard LSTM state update is applied:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "x dB(l) t,i def = \uf8eb \uf8ec \uf8ed e i w B(l) t,i Y B(l) t [i\u22121] \uf8f6 \uf8f7 \uf8f8 (A35) x dF(l) t,i def = \uf8eb \uf8ec \uf8ed e i w F(l) t,i Y F(l) t [i\u22121] \uf8f6 \uf8f7 \uf8f8 (A36) h dB(l) t,i , c dB(l) t,i def = LSTM x dB(l) t,i , h dB(l) t,i\u22121 , i > 0 (A37) h dF(l) t,i , c dF(l) t,i def = LSTM x dF(l) t,i , h dF(l) t,i\u22121 , i > 0", |
| "eq_num": "(A38)" |
| } |
| ], |
| "section": "C Decoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "See Appendix A for definition of the encoder and B for comparison toChung et al. (2017).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "See Appendix D for definition of the objective. 4 See Appendix F for full preprocessing details.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "See Appendix E for probe implementation details.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank Aren Jansen and the Clippers discussion group at Ohio State for providing valuable feedback on this research. This work was funded in part by a Google Faculty Research Award to Micha Elsner.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| }, |
| { |
| "text": "Given encoder inputs x e t \u2208 R D 0 for 0 < t \u2264 T , an encoder with L layers is a recurrent neural network that computes its D l -dimensional hidden state h e(l) t \u2208 R D l at timestep t and layer l from the hidden state below h e(l\u22121) t (bottom-up connection), the previous hidden state h e(l) t\u22121 (recurrent connection), and the previous hidden state above h e(l+1) t\u22121 , where the layer zeroth state h e(0) t \u2208 R D 0 is the data x e t . The hidden state h e(l) t serves as a label at layer l and timestep t. Information flow between these layers is governed by a discrete boundary neuron z (l) t \u2208 {0, 1} at each layer. Let t (l) be the location of the most recent segment boundary preceding time t in layer l:Let f f(l) (a, b) be a filter function dropping labels of l at non-boundaries between timepoints a and b:t at layer l and timestep t is defined as:In other words, the segment St consists of the sequence of segment labels from layer l \u2212 1 at boundaries from l \u2212 1 that intervene since the last segment boundary at l.The bottom-up, recurrent, and top-down inputs are respectively linearly transformed into vectors in s b(l) , s r(l) , s t(l) \u2208 R 4D l +1 using weight matrices W j i mapping from layer i to layer j, and masked using the boundary decisions z:where h e(l) t records the label at the preceding segment boundary h e(l)t is the number of timesteps since the preceding segment boundary at l. We pass this additional information into the recurrent connection to relieve pressure on the cell state to encode it. These vectors are summed together with a bias b (l) to create a vector of preactivations s (l) , normalized by the boundary decisions so that the weights on active connections sum to 1:In this way, information is only passed upward and downward at boundaries, and the boundaries thus govern information flow between adjacent layers. The vector sand deterministically during evaluation:We additionally require that z (0) t = 1 (the input always \"segments\") and z (L) t = 0 (the top layer never segments). To enforce hierarchical segmentation behavior, we mask the boundaries by the boundaries at the layer below:Gradients through these discrete decisions are approximated using straight-through estimation (Hinton, 2012; Bengio et al., 2013; Courbariaux et al., 2016; Chung et al., 2017; Shain and Elsner, 2019; Eloff et al., 2019) .The forget gates ft , and cell proposal g e(l) t are computed as follows:The cell state c e(l) t is a weighted sum of three terms: a flush operation c f(l) t that erases the cell memory, a standard LSTM update c u (l) , and a copy operation c c(l) that copies the preceding cell state forward:These terms are weighted by the boundary decision such that a flush occurs when the preceding timestep finds a boundary, an update otherwise occurs when the layer below finds a boundary, and a copy occurs when neither layer finds a boundary:The hidden state h e(l) t is computed as:The previous segment encoding his updated following a boundary and copied forward otherwise:, and copied forward otherwise:Chung et al. 2017Although our encoder model closely follows the definition in Chung et al. (2017) , it differs in the following ways:", |
| "cite_spans": [ |
| { |
| "start": 591, |
| "end": 594, |
| "text": "(l)", |
| "ref_id": null |
| }, |
| { |
| "start": 627, |
| "end": 630, |
| "text": "(l)", |
| "ref_id": null |
| }, |
| { |
| "start": 1577, |
| "end": 1580, |
| "text": "(l)", |
| "ref_id": null |
| }, |
| { |
| "start": 1620, |
| "end": 1623, |
| "text": "(l)", |
| "ref_id": null |
| }, |
| { |
| "start": 2249, |
| "end": 2269, |
| "text": "Bengio et al., 2013;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 2270, |
| "end": 2295, |
| "text": "Courbariaux et al., 2016;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 2296, |
| "end": 2315, |
| "text": "Chung et al., 2017;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 2316, |
| "end": 2339, |
| "text": "Shain and Elsner, 2019;", |
| "ref_id": "BIBREF114" |
| }, |
| { |
| "start": 2340, |
| "end": 2359, |
| "text": "Eloff et al., 2019)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 2575, |
| "end": 2578, |
| "text": "(l)", |
| "ref_id": null |
| }, |
| { |
| "start": 3137, |
| "end": 3156, |
| "text": "Chung et al. (2017)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Encoder Definition", |
| "sec_num": null |
| }, |
| { |
| "text": "The decoder is only applied to elements of f f(l) (1, T ) (i.e. only to frames where layer l segments) and only decodes the last B elements of f f(l\u22121) (1, t) and the first F elements of f f(l\u22121) (t + 1, T ); that is, it decodes only the B preceding segment labels and F following segment labels from layer l \u2212 1, ignoring labels at non-boundaries. Therefore, like encoding, decoding is also multiscale, taking place at the timescale of the encoder representations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "213", |
| "sec_num": null |
| }, |
| { |
| "text": "Each decoder layer contributes two terms to the objective, a forward objective and a backward objective. Layer 1 decodes the data and uses a squared error loss:Layers 2, . . . , L decode the representations from the layer below, which are tanh-activated and thus constrained to the interval (\u22121, 1). Encoder features h e(l) t are deterministically cast into bitwise feature probabilities p e(l) t and decoded using sigmoid cross-entropy loss:Let T (l) denote the number of segment boundaries in layer l. Let Zrespectively denote the backward and forward targets and model predictions at encoder time t, decoder time i, and dimension d, defined as follows:The backward and forward loss components L B(l) and L F(l) are computed as:The overall loss L is:We apply the following implementation decisions in this study:\u2022 One hidden layer of 128 units for all feedforward transforms\u2022 Positional encoding dimensionality of 128\u2022 Exponential linear unit (elu) activations for all internal feedforward layers (Clevert et al., 2015) \u2022 Glorot uniform initialization for bottom-up, top-down, and feedforward encoder and decoder weight matrices (Glorot and Bengio, 2010) \u2022 Orthogonal initialization for recurrent weight matrices (Saxe et al., 2013)\u2022 Adam optimizer (Kingma and Ba, 2014) with learning rate 0.001, a minibatch size of 8, and default TensorFlow parameters.\u2022 Probing classifier implementation -Logistic regression using scikit-learn (Pedregosa et al., 2011) -Phoneme prediction is multinomial, feature prediction is binary -Minority feature class is always coded as positive -2-fold cross-validation -L2 \u03bb = 1 -100 LBFGS iterations (Zhu et al., 1997) per fold", |
| "cite_spans": [ |
| { |
| "start": 698, |
| "end": 702, |
| "text": "B(l)", |
| "ref_id": null |
| }, |
| { |
| "start": 999, |
| "end": 1021, |
| "text": "(Clevert et al., 2015)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 1131, |
| "end": 1156, |
| "text": "(Glorot and Bengio, 2010)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 1432, |
| "end": 1456, |
| "text": "(Pedregosa et al., 2011)", |
| "ref_id": "BIBREF97" |
| }, |
| { |
| "start": 1631, |
| "end": 1649, |
| "text": "(Zhu et al., 1997)", |
| "ref_id": "BIBREF127" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "D Objective", |
| "sec_num": null |
| }, |
| { |
| "text": "We convert the audio recordings into sequences of 50-dimensional cochleagrams (Brown and Cooke, 1994; McDermott and Simoncelli, 2011) , each representing 10ms of audio data. Although this differs from the standard automatic speech recognition pipeline based on Mel frequency cepstral coefficients (Mermelstein, 1976) , it is motivated for our study because the model is unsupervised. Since we wish to test theories about cognition by extracting features from the acoustic stream without supervision, it is critical not only that the speech representation contain features that support identification of linguistic units, but that the representation emphasize those features in a plausibly similar manner to that of the human auditory system. Cochleagrams support this goal by incorporating more recent insights about human auditory perception (Mc-Dermott and Simoncelli, 2011) . Our implementation uses the pycochleagram library https: //github.com/mcdermottLab/pycochleagram.We L2 normalize the cochleagrams in order to encourage the decoder to focus on the spectral power envelope rather than absolute variation in loudness, since the former plausibly contains more linguistic signal. This procedure is supported by evidence of loudness constancy in human auditory perception, suggesting that similar kinds of normalization may take place in the brain (Zahorik and Wightman, 2001) . We additionally z-transform the normalized cochleagrams over time within each audio file, since this proved beneficial during model development.The source audio files contain many non-speech regions that are not of direct relevance for this study. We use the voice activity detection (VAD) intervals provided with the Zerospeech 2015 challenge data to remove these regions as a preprocess, and we force boundaries at the ends of VAD intervals. This greatly speeds training by removing irrelevant data, and it aligns with neuroscientific evidence of a prelinguistic capacity to detect human voices (Belin et al., 2000; Fecteau et al., 2005; Blasi et al., 2011; Pernet et al., 2015) .", |
| "cite_spans": [ |
| { |
| "start": 78, |
| "end": 101, |
| "text": "(Brown and Cooke, 1994;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 102, |
| "end": 133, |
| "text": "McDermott and Simoncelli, 2011)", |
| "ref_id": "BIBREF84" |
| }, |
| { |
| "start": 297, |
| "end": 316, |
| "text": "(Mermelstein, 1976)", |
| "ref_id": "BIBREF86" |
| }, |
| { |
| "start": 843, |
| "end": 876, |
| "text": "(Mc-Dermott and Simoncelli, 2011)", |
| "ref_id": null |
| }, |
| { |
| "start": 1354, |
| "end": 1382, |
| "text": "(Zahorik and Wightman, 2001)", |
| "ref_id": "BIBREF125" |
| }, |
| { |
| "start": 1982, |
| "end": 2002, |
| "text": "(Belin et al., 2000;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 2003, |
| "end": 2024, |
| "text": "Fecteau et al., 2005;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 2025, |
| "end": 2044, |
| "text": "Blasi et al., 2011;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 2045, |
| "end": 2065, |
| "text": "Pernet et al., 2015)", |
| "ref_id": "BIBREF99" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "F Data Preprocessing", |
| "sec_num": null |
| }, |
| { |
| "text": "We use linear regression to test the relationship between performance and memory pressures, prediction pressures, and multiscale encoding. To do so, we combine raw boundary, phoneme classification, and feature classification metrics, along with deltas in these metrics over baselines U and X, into a single vector of performance statistics, each of which measures one aspect of the contribution of these dimensions to phoneme learning in our unsupervised models. To improve normality of performance metrics which are bounded on the interval [0, 1], as well as comparability of performance across metrics, we first (1) cast the metrics onto the interval [\u22121, 1], (2) apply Fisher's Z transformation (i.e. arctanh), and (3) Z-score the transformed vectors within each metric type. We use binary coding for our predictors of interest: presence/absence of memory pressures (B > 0), presence/absence of prediction pressures (F > 0), and presence/absence of multiscale segmental encoding (L > 2). We also include categorical controls for comparison type (full, full -baseline U, full -baseline X) and metric type (boundary, phoneme, feature). Results, shown in Table A1 , support a contribution of all three critical variables to phoneme acquisition.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1155, |
| "end": 1163, |
| "text": "Table A1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "G Regression Model Design and Results", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Automatic segmentation and clustering of speech using sparse coding and metaheuristic search", |
| "authors": [ |
| { |
| "first": "Wiehan", |
| "middle": [], |
| "last": "Agenbag", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Niesler", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Sixteenth Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wiehan Agenbag and Thomas Niesler. 2015. Auto- matic segmentation and clustering of speech using sparse coding and metaheuristic search. In Sixteenth Annual Conference of the International Speech Com- munication Association.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Islands of reliability for regular morphology: Evidence from Italian. Language", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Albright", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "684--709", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Albright. 2002. Islands of reliability for regu- lar morphology: Evidence from Italian. Language, pages 684-709.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The effect of subphonetic differences on lexical access", |
| "authors": [ |
| { |
| "first": "Sheila", |
| "middle": [ |
| "E" |
| ], |
| "last": "Jean E Andruski", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Blumstein", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Burton", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Cognition", |
| "volume": "52", |
| "issue": "3", |
| "pages": "163--187", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jean E Andruski, Sheila E Blumstein, and Martha Bur- ton. 1994. The effect of subphonetic differences on lexical access. Cognition, 52(3):163-187.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Deep learning methods for unsupervised acoustic modeling-Leap submission to ZeroSpeech challenge", |
| "authors": [ |
| { |
| "first": "Rajath", |
| "middle": [], |
| "last": "T K Ansari", |
| "suffix": "" |
| }, |
| { |
| "first": "Sonali", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Sriram", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ganapathy", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Automatic Speech Recognition and Understanding Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "754--761", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T K Ansari, Rajath Kumar, Sonali Singh, and Sriram Ganapathy. 2017a. Deep learning methods for un- supervised acoustic modeling-Leap submission to ZeroSpeech challenge 2017. In Automatic Speech Recognition and Understanding Workshop (ASRU), 2017 IEEE, pages 754-761. IEEE.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Unsupervised HMM posteriograms for language independent acoustic modeling in zero resource conditions", |
| "authors": [ |
| { |
| "first": "Rajath", |
| "middle": [], |
| "last": "T K Ansari", |
| "suffix": "" |
| }, |
| { |
| "first": "Sonali", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Sriram", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Susheela", |
| "middle": [], |
| "last": "Ganapathy", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Devi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Automatic Speech Recognition and Understanding Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "762--768", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T K Ansari, Rajath Kumar, Sonali Singh, Sriram Ganapathy, and Susheela Devi. 2017b. Unsuper- vised HMM posteriograms for language indepen- dent acoustic modeling in zero resource conditions. In Automatic Speech Recognition and Understand- ing Workshop (ASRU), 2017 IEEE, pages 762-768. IEEE.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Learning during processing: Word learning doesn't wait for word recognition to finish", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Keith", |
| "suffix": "" |
| }, |
| { |
| "first": "Bob", |
| "middle": [], |
| "last": "Apfelbaum", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mcmurray", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Cognitive science", |
| "volume": "41", |
| "issue": "", |
| "pages": "706--747", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Keith S Apfelbaum and Bob McMurray. 2017. Learn- ing during processing: Word learning doesn't wait for word recognition to finish. Cognitive science, 41:706-747.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Some informational aspects of visual perception", |
| "authors": [ |
| { |
| "first": "Fred", |
| "middle": [], |
| "last": "Attneave", |
| "suffix": "" |
| } |
| ], |
| "year": 1954, |
| "venue": "Psychological review", |
| "volume": "61", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fred Attneave. 1954. Some informational aspects of visual perception. Psychological review, 61(3):183.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Singulars and plurals in Dutch: Evidence for a parallel dual-route model", |
| "authors": [ |
| { |
| "first": "Ton", |
| "middle": [], |
| "last": "R Harald Baayen", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Dijkstra", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Schreuder", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Journal of Memory and Language", |
| "volume": "37", |
| "issue": "1", |
| "pages": "94--117", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R Harald Baayen, Ton Dijkstra, and Robert Schreuder. 1997. Singulars and plurals in Dutch: Evidence for a parallel dual-route model. Journal of Memory and Language, 37(1):94-117.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "The Phonological Loop as a Language Learning Device", |
| "authors": [ |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Baddeley", |
| "suffix": "" |
| }, |
| { |
| "first": "Susan", |
| "middle": [], |
| "last": "Gathercole", |
| "suffix": "" |
| }, |
| { |
| "first": "Costanza", |
| "middle": [], |
| "last": "Papagno", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Psychological Review", |
| "volume": "105", |
| "issue": "1", |
| "pages": "158--173", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alan Baddeley, Susan Gathercole, and Costanza Pa- pagno. 1998. The Phonological Loop as a Lan- guage Learning Device. Psychological Review, 105(1):158-173.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Working Memory", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Alan", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Baddeley", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hitch", |
| "suffix": "" |
| } |
| ], |
| "year": 1974, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alan D Baddeley and Graham Hitch. 1974. Working Memory. University of Stirling, Stirling, Scotland.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Discovering discrete subword units with binarized autoencoders and hidden-markovmodel encoders", |
| "authors": [ |
| { |
| "first": "Leonardo", |
| "middle": [], |
| "last": "Badino", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessio", |
| "middle": [], |
| "last": "Mereta", |
| "suffix": "" |
| }, |
| { |
| "first": "Lorenzo", |
| "middle": [], |
| "last": "Rosasco", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Sixteenth Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Leonardo Badino, Alessio Mereta, and Lorenzo Rosasco. 2015. Discovering discrete subword units with binarized autoencoders and hidden-markov- model encoders. In Sixteenth Annual Conference of the International Speech Communication Associ- ation.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Using articulatory features and inferred phonological segments in zero resource speech processing", |
| "authors": [ |
| { |
| "first": "Pallavi", |
| "middle": [], |
| "last": "Baljekar", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunayana", |
| "middle": [], |
| "last": "Sitaram", |
| "suffix": "" |
| }, |
| { |
| "first": "Prasanna", |
| "middle": [], |
| "last": "Kumar Muthukumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Black", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Sixteenth Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pallavi Baljekar, Sunayana Sitaram, Prasanna Kumar Muthukumar, and Alan W Black. 2015. Using artic- ulatory features and inferred phonological segments in zero resource speech processing. In Sixteenth An- nual Conference of the International Speech Com- munication Association.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Modeling unsupervised phonetic and phonological learning in Generative Adversarial Phonology", |
| "authors": [], |
| "year": null, |
| "venue": "Proceedings of the Society for Computation in Linguistics", |
| "volume": "3", |
| "issue": "", |
| "pages": "138--148", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ga\u0161per Begu\u0161. 2020. Modeling unsupervised phonetic and phonological learning in Generative Adversarial Phonology. Proceedings of the Society for Computa- tion in Linguistics, 3(1):138-148.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Voice-selective areas in human auditory cortex", |
| "authors": [ |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Belin", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Robert", |
| "suffix": "" |
| }, |
| { |
| "first": "Philippe", |
| "middle": [], |
| "last": "Zatorre", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierre", |
| "middle": [], |
| "last": "Lafaille", |
| "suffix": "" |
| }, |
| { |
| "first": "Bruce", |
| "middle": [], |
| "last": "Ahad", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pike", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Nature", |
| "volume": "403", |
| "issue": "6767", |
| "pages": "309--312", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pascal Belin, Robert J Zatorre, Philippe Lafaille, Pierre Ahad, and Bruce Pike. 2000. Voice-selective areas in human auditory cortex. Nature, 403(6767):309- 312.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Estimating or propagating gradients through stochastic neurons for conditional computation", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicholas", |
| "middle": [], |
| "last": "L\u00e9onard", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1308.3432" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, Nicholas L\u00e9onard, and Aaron Courville. 2013. Estimating or propagating gradi- ents through stochastic neurons for conditional com- putation. arXiv preprint arXiv:1308.3432.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Predictability, complexity, and learning. Neural computation", |
| "authors": [ |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Bialek", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Nemenman", |
| "suffix": "" |
| }, |
| { |
| "first": "Naftali", |
| "middle": [], |
| "last": "Tishby", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "", |
| "volume": "13", |
| "issue": "", |
| "pages": "2409--2463", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William Bialek, Ilya Nemenman, and Naftali Tishby. 2001. Predictability, complexity, and learning. Neu- ral computation, 13(11):2409-2463.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Early specialization for voice and emotion processing in the infant brain", |
| "authors": [ |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Blasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Evelyne", |
| "middle": [], |
| "last": "Mercure", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarah", |
| "middle": [], |
| "last": "Lloyd-Fox", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Brammer", |
| "suffix": "" |
| }, |
| { |
| "first": "Disa", |
| "middle": [], |
| "last": "Sauter", |
| "suffix": "" |
| }, |
| { |
| "first": "Quinton", |
| "middle": [], |
| "last": "Deeley", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Gareth", |
| "suffix": "" |
| }, |
| { |
| "first": "Ville", |
| "middle": [], |
| "last": "Barker", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Renvall", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Current biology", |
| "volume": "21", |
| "issue": "14", |
| "pages": "1220--1224", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anna Blasi, Evelyne Mercure, Sarah Lloyd-Fox, Alex Thomson, Michael Brammer, Disa Sauter, Quinton Deeley, Gareth J Barker, Ville Renvall, Sean Deoni, and others. 2011. Early specialization for voice and emotion processing in the infant brain. Current biol- ogy, 21(14):1220-1224.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Computational auditory scene analysis. Computer speech and language", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Guy", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Cooke", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "", |
| "volume": "8", |
| "issue": "", |
| "pages": "297--336", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guy J Brown and Martin Cooke. 1994. Computational auditory scene analysis. Computer speech and lan- guage, 8(4):297-336.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Inflection classes, gender, and the principle of contrast. Language", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Carstairs-Mccarthy", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "737--788", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew Carstairs-McCarthy. 1994. Inflection classes, gender, and the principle of contrast. Language, pages 737-788.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Parallel inference of Dirichlet process Gaussian mixture models for unsupervised acoustic modeling: A feasibility study", |
| "authors": [ |
| { |
| "first": "Hongjie", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Cheung-Chi", |
| "middle": [], |
| "last": "Leung", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Haizhou", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Sixteenth Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hongjie Chen, Cheung-Chi Leung, Lei Xie, Bin Ma, and Haizhou Li. 2015. Parallel inference of Dirich- let process Gaussian mixture models for unsuper- vised acoustic modeling: A feasibility study. In Sixteenth Annual Conference of the International Speech Communication Association.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Multilingual bottle-neck feature learning from untranscribed speech", |
| "authors": [ |
| { |
| "first": "Hongjie", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Cheung-Chi", |
| "middle": [], |
| "last": "Leung", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Haizhou", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Automatic Speech Recognition and Understanding Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "727--733", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hongjie Chen, Cheung-Chi Leung, Lei Xie, Bin Ma, and Haizhou Li. 2017. Multilingual bottle-neck fea- ture learning from untranscribed speech. In Auto- matic Speech Recognition and Understanding Work- shop (ASRU), 2017 IEEE, pages 727-733. IEEE.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Syntactic Structures", |
| "authors": [ |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Chomsky", |
| "suffix": "" |
| } |
| ], |
| "year": 1957, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noam Chomsky. 1957. Syntactic Structures. Mouton, The Hague.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Aspects of the Theory of Syntax", |
| "authors": [ |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Chomsky", |
| "suffix": "" |
| } |
| ], |
| "year": 1965, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noam Chomsky. 1965. Aspects of the Theory of Syntax. MIT Press, Cambridge, MA.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "The Sound Pattern of English", |
| "authors": [ |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Chomsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Morris", |
| "middle": [], |
| "last": "Halle", |
| "suffix": "" |
| } |
| ], |
| "year": 1968, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noam Chomsky and Morris Halle. 1968. The Sound Pattern of English. Harper \\& Row.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Unsupervised speech representation learning using wavenet autoencoders", |
| "authors": [ |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Chorowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Ron", |
| "middle": [ |
| "J" |
| ], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "A\u00e4ron", |
| "middle": [], |
| "last": "Van Den Oord", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jan Chorowski, Ron J Weiss, Samy Bengio, and A\u00e4ron van den Oord. 2019. Unsupervised speech representation learning using wavenet autoencoders.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "speech, and language processing", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "27", |
| "issue": "", |
| "pages": "2041--2053", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "IEEE/ACM transactions on audio, speech, and lan- guage processing, 27(12):2041-2053.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Turning the pipeline into a loop: Iterated unsupervised dependency parsing and PoS induction", |
| "authors": [ |
| { |
| "first": "Christos", |
| "middle": [], |
| "last": "Christodoulopoulos", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the NAACL-HLT Workshop on the Induction of Linguistic Structure", |
| "volume": "", |
| "issue": "", |
| "pages": "96--99", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christos Christodoulopoulos, Sharon Goldwater, and Mark Steedman. 2012. Turning the pipeline into a loop: Iterated unsupervised dependency parsing and PoS induction. In Proceedings of the NAACL-HLT Workshop on the Induction of Linguistic Structure, pages 96-99.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Analyzing analytical methods: The case of phonology in neural models of spoken language", |
| "authors": [ |
| { |
| "first": "Grzegorz", |
| "middle": [], |
| "last": "Chrupa\u0142a", |
| "suffix": "" |
| }, |
| { |
| "first": "Bertrand", |
| "middle": [], |
| "last": "Higy", |
| "suffix": "" |
| }, |
| { |
| "first": "Afra", |
| "middle": [], |
| "last": "Alishahi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2004.07070" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Grzegorz Chrupa\u0142a, Bertrand Higy, and Afra Alishahi. 2020. Analyzing analytical methods: The case of phonology in neural models of spoken language. arXiv preprint arXiv:2004.07070.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Hierarchical Multiscale Recurrent Neural Networks", |
| "authors": [ |
| { |
| "first": "Junyoung", |
| "middle": [], |
| "last": "Chung", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungjin", |
| "middle": [], |
| "last": "Ahn", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Junyoung Chung, Sungjin Ahn, and Yoshua Bengio. 2017. Hierarchical Multiscale Recurrent Neural Networks. In International Conference on Learning Representations 2017.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "The geometry of phonological features", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "George", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Clements", |
| "suffix": "" |
| } |
| ], |
| "year": 1985, |
| "venue": "Phonology", |
| "volume": "2", |
| "issue": "1", |
| "pages": "225--252", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George N Clements. 1985. The geometry of phonolog- ical features. Phonology, 2(1):225-252.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Fast and accurate deep network learning by exponential linear units (elus)", |
| "authors": [ |
| { |
| "first": "Djork-Arn\u00e9", |
| "middle": [], |
| "last": "Clevert", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Unterthiner", |
| "suffix": "" |
| }, |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1511.07289" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Djork-Arn\u00e9 Clevert, Thomas Unterthiner, and Sepp Hochreiter. 2015. Fast and accurate deep network learning by exponential linear units (elus). arXiv preprint arXiv:1511.07289.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Ran El-Yaniv, and Yoshua Bengio", |
| "authors": [ |
| { |
| "first": "Matthieu", |
| "middle": [], |
| "last": "Courbariaux", |
| "suffix": "" |
| }, |
| { |
| "first": "Itay", |
| "middle": [], |
| "last": "Hubara", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Soudry", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Binarized neural networks: Training deep neural networks with weights and activations constrained to+ 1 or-1", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1602.02830" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthieu Courbariaux, Itay Hubara, Daniel Soudry, Ran El-Yaniv, and Yoshua Bengio. 2016. Bina- rized neural networks: Training deep neural net- works with weights and activations constrained to+ 1 or-1. arXiv preprint arXiv:1602.02830.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "On short and long auditory stores", |
| "authors": [ |
| { |
| "first": "Nelson", |
| "middle": [], |
| "last": "Cowan", |
| "suffix": "" |
| } |
| ], |
| "year": 1984, |
| "venue": "Psychological bulletin", |
| "volume": "96", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nelson Cowan. 1984. On short and long auditory stores. Psychological bulletin, 96(2):341.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Time course and functional neuroanatomy of speech segmentation in adults", |
| "authors": [ |
| { |
| "first": "Toni", |
| "middle": [], |
| "last": "Cunillera", |
| "suffix": "" |
| }, |
| { |
| "first": "Estela", |
| "middle": [], |
| "last": "C\u00e0mara", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Juan", |
| "suffix": "" |
| }, |
| { |
| "first": "Josep", |
| "middle": [], |
| "last": "Toro", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Marco-Pallares", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Nuria Sebasti\u00e1n-Galles", |
| "volume": "48", |
| "issue": "", |
| "pages": "541--553", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Toni Cunillera, Estela C\u00e0mara, Juan M Toro, Josep Marco-Pallares, Nuria Sebasti\u00e1n-Galles, Hector Or- tiz, Jes\u00fas Pujol, and Antoni Rodr\\'\\iguez-Fornells. 2009. Time course and functional neuroanatomy of speech segmentation in adults. Neuroimage, 48(3):541-553.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "The effects of stress and statistical cues on continuous speech segmentation: an event-related brain potential study", |
| "authors": [ |
| { |
| "first": "Toni", |
| "middle": [], |
| "last": "Cunillera", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Juan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Toro", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Nuria Sebasti\u00e1n-Gall\u00e9s, and Antoni Rodr\\'\\iguez-Fornells", |
| "volume": "1123", |
| "issue": "", |
| "pages": "168--178", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Toni Cunillera, Juan M Toro, Nuria Sebasti\u00e1n-Gall\u00e9s, and Antoni Rodr\\'\\iguez-Fornells. 2006. The ef- fects of stress and statistical cues on continuous speech segmentation: an event-related brain poten- tial study. Brain Research, 1123(1):168-178.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "A smartphone-based ASR data collection tool for under-resourced languages", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nic J De", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vries", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Marelie", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaco", |
| "middle": [], |
| "last": "Davel", |
| "suffix": "" |
| }, |
| { |
| "first": "Willem", |
| "middle": [ |
| "D" |
| ], |
| "last": "Badenhorst", |
| "suffix": "" |
| }, |
| { |
| "first": "Febe", |
| "middle": [], |
| "last": "Basson", |
| "suffix": "" |
| }, |
| { |
| "first": "Etienne", |
| "middle": [], |
| "last": "De Wet", |
| "suffix": "" |
| }, |
| { |
| "first": "Alta", |
| "middle": [ |
| "De" |
| ], |
| "last": "Barnard", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Waal", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "56", |
| "issue": "", |
| "pages": "119--131", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nic J De Vries, Marelie H Davel, Jaco Badenhorst, Willem D Basson, Febe De Wet, Etienne Barnard, and Alta De Waal. 2014. A smartphone-based ASR data collection tool for under-resourced languages. Speech communication, 56:119-131.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "The zero resource speech challenge 2017", |
| "authors": [ |
| { |
| "first": "Ewan", |
| "middle": [], |
| "last": "Dunbar", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuan", |
| "middle": [ |
| "Nga" |
| ], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Juan", |
| "middle": [], |
| "last": "Benjumea", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Karadayi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mathieu", |
| "middle": [], |
| "last": "Bernard", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Besacier", |
| "suffix": "" |
| }, |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Anguera", |
| "suffix": "" |
| }, |
| { |
| "first": "Emmanuel", |
| "middle": [], |
| "last": "Dupoux", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Automatic Speech Recognition and Understanding Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "323--330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ewan Dunbar, Xuan Nga Cao, Juan Benjumea, Julien Karadayi, Mathieu Bernard, Laurent Besacier, Xavier Anguera, and Emmanuel Dupoux. 2017. The zero resource speech challenge 2017. In Automatic Speech Recognition and Understanding Workshop (ASRU), 2017 IEEE, pages 323-330. IEEE.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Distributed representations, simple recurrent networks, and grammatical structure", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jeffrey L Elman", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "Machine Learning", |
| "volume": "7", |
| "issue": "", |
| "pages": "195--225", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey L Elman. 1991. Distributed representations, simple recurrent networks, and grammatical struc- ture. Machine Learning, 7:195-225.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Learning and development in neural networks: The importance of starting small", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jeffrey L Elman", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Cognition", |
| "volume": "48", |
| "issue": "", |
| "pages": "71--99", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey L Elman. 1993. Learning and development in neural networks: The importance of starting small. Cognition, 48:71-99.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Eloff", |
| "suffix": "" |
| }, |
| { |
| "first": "Andr\u00e9", |
| "middle": [], |
| "last": "Nortje", |
| "suffix": "" |
| }, |
| { |
| "first": "Avashna", |
| "middle": [], |
| "last": "Benjamin Van Niekerk", |
| "suffix": "" |
| }, |
| { |
| "first": "Leanne", |
| "middle": [], |
| "last": "Govender", |
| "suffix": "" |
| }, |
| { |
| "first": "Arnu", |
| "middle": [], |
| "last": "Nortje", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pretorius", |
| "suffix": "" |
| }, |
| { |
| "first": "Ewald", |
| "middle": [], |
| "last": "Elan Van Biljon", |
| "suffix": "" |
| }, |
| { |
| "first": "Lisa", |
| "middle": [], |
| "last": "Van Der Westhuizen", |
| "suffix": "" |
| }, |
| { |
| "first": "Herman", |
| "middle": [], |
| "last": "Van Staden", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kamper", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proc. Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "1103--1107", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Eloff, Andr\u00e9 Nortje, Benjamin van Niekerk, Avashna Govender, Leanne Nortje, Arnu Pretorius, Elan van Biljon, Ewald van der Westhuizen, Lisa van Staden, and Herman Kamper. 2019. Unsuper- vised Acoustic Unit Discovery for Speech Synthe- sis Using Discrete Latent-Variable Neural Networks. Proc. Interspeech 2019, pages 1103-1107.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Speech segmentation with a neural encoder model of working memory", |
| "authors": [ |
| { |
| "first": "Micha", |
| "middle": [], |
| "last": "Elsner", |
| "suffix": "" |
| }, |
| { |
| "first": "Cory", |
| "middle": [], |
| "last": "Shain", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1070--1080", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Micha Elsner and Cory Shain. 2017. Speech segmenta- tion with a neural encoder model of working mem- ory. In Proceedings of the 2017 Conference on Em- pirical Methods in Natural Language Processing, pages 1070-1080.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Sensitivity to voice in human prefrontal cortex", |
| "authors": [ |
| { |
| "first": "Shirley", |
| "middle": [], |
| "last": "Fecteau", |
| "suffix": "" |
| }, |
| { |
| "first": "Jorge", |
| "middle": [ |
| "L" |
| ], |
| "last": "Armony", |
| "suffix": "" |
| }, |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Joanette", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Belin", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Journal of Neurophysiology", |
| "volume": "94", |
| "issue": "3", |
| "pages": "2251--2254", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shirley Fecteau, Jorge L Armony, Yves Joanette, and Pascal Belin. 2005. Sensitivity to voice in hu- man prefrontal cortex. Journal of Neurophysiology, 94(3):2251-2254.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "The DARPA Speech Recognition Research Database: Specifications and Status", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fisher", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathleen M Goudie-Marshall", |
| "middle": [], |
| "last": "George R Doddington", |
| "suffix": "" |
| } |
| ], |
| "year": 1986, |
| "venue": "Proceedings of DARPA Workshop on Speech Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "93--99", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William M Fisher, George R Doddington, and Kath- leen M Goudie-Marshall. 1986. The DARPA Speech Recognition Research Database: Specifica- tions and Status. In Proceedings of DARPA Work- shop on Speech Recognition, pages 93-99.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Insensitivity of the Human Sentence-Processing System to Hierarchical Structure", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Stefan", |
| "suffix": "" |
| }, |
| { |
| "first": "Rens", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bod", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Psychological Science", |
| "volume": "22", |
| "issue": "6", |
| "pages": "829--834", |
| "other_ids": { |
| "DOI": [ |
| "10.1177/0956797611409589" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefan L Frank and Rens Bod. 2011. Insensitivity of the Human Sentence-Processing System to Hierar- chical Structure. Psychological Science, 22(6):829- 834.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Hierarchical and sequential processing of language. Language", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Stefan", |
| "suffix": "" |
| }, |
| { |
| "first": "Morten", |
| "middle": [ |
| "H" |
| ], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Christiansen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Cognition and Neuroscience", |
| "volume": "33", |
| "issue": "9", |
| "pages": "1213--1218", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefan L Frank and Morten H Christiansen. 2018. Hier- archical and sequential processing of language. Lan- guage, Cognition and Neuroscience, 33(9):1213- 1218.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "The free-energy principle: a unified brain theory?", |
| "authors": [ |
| { |
| "first": "Karl", |
| "middle": [], |
| "last": "Friston", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Nature reviews neuroscience", |
| "volume": "11", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karl Friston. 2010. The free-energy principle: a uni- fied brain theory? Nature reviews neuroscience, 11(2):127.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Understanding the difficulty of training deep feedforward neural networks", |
| "authors": [ |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Glorot", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the thirteenth international conference on artificial intelligence and statistics", |
| "volume": "", |
| "issue": "", |
| "pages": "249--256", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xavier Glorot and Yoshua Bengio. 2010. Understand- ing the difficulty of training deep feedforward neural networks. In Proceedings of the thirteenth interna- tional conference on artificial intelligence and statis- tics, pages 249-256.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Language Identification in the Limit", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [ |
| "E" |
| ], |
| "last": "Gold", |
| "suffix": "" |
| } |
| ], |
| "year": 1967, |
| "venue": "Information and Control", |
| "volume": "", |
| "issue": "10", |
| "pages": "447--474", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark E. Gold. 1967. Language Identification in the Limit. Information and Control, (10):447-474.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Autosegmental phonology", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Goldsmith", |
| "suffix": "" |
| } |
| ], |
| "year": 1976, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Goldsmith. 1976. Autosegmental phonology. Ph.D. thesis, MIT Press London.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Unsupervised learning of the morphology of a natural language", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Goldsmith", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Computational Linguistics", |
| "volume": "27", |
| "issue": "2", |
| "pages": "153--189", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Goldsmith. 2003. Unsupervised learning of the morphology of a natural language. Computational Linguistics, 27(2):153-189.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "A {Bayesian} framework for word segmentation: {Exploring} the effects of context", |
| "authors": [ |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Griffiths", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Cognition", |
| "volume": "112", |
| "issue": "", |
| "pages": "21--54", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sharon Goldwater, Thomas Griffiths, and Mark John- son. 2009. A {Bayesian} framework for word seg- mentation: {Exploring} the effects of context. Cog- nition, 112:21-54.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Predictive power of word surprisal for reading times is a linear function of language model quality", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Goodkind", |
| "suffix": "" |
| }, |
| { |
| "first": "Klinton", |
| "middle": [], |
| "last": "Bicknell", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 8th Workshop on Cognitive Modeling and Computational Linguistics (CMCL 2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "10--18", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Goodkind and Klinton Bicknell. 2018. Predic- tive power of word surprisal for reading times is a linear function of language model quality. In Pro- ceedings of the 8th Workshop on Cognitive Modeling and Computational Linguistics (CMCL 2018), pages 10-18.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "A cross-linguistic acoustic study of voiceless fricatives", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Gordon", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Barthmaier", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathy", |
| "middle": [], |
| "last": "Sands", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Journal of the International Phonetic Association", |
| "volume": "", |
| "issue": "", |
| "pages": "141--174", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Gordon, Paul Barthmaier, and Kathy Sands. 2002. A cross-linguistic acoustic study of voiceless fricatives. Journal of the International Phonetic As- sociation, pages 141-174.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Phonation types: a cross-linguistic overview", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Gordon", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Ladefoged", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Journal of phonetics", |
| "volume": "29", |
| "issue": "4", |
| "pages": "383--406", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Gordon and Peter Ladefoged. 2001. Phona- tion types: a cross-linguistic overview. Journal of phonetics, 29(4):383-406.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Uncertainty about the rest of the sentence", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Hale", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Cognitive Science", |
| "volume": "30", |
| "issue": "4", |
| "pages": "643--672", |
| "other_ids": { |
| "DOI": [ |
| "10.1207/s15516709cog0000{_}64" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Hale. 2006. Uncertainty about the rest of the sen- tence. Cognitive Science, 30(4):643-672.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "Cortical connectivity and sensory coding", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Kenneth", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas D Mrsic-Flogel", |
| "middle": [], |
| "last": "Harris", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Nature", |
| "volume": "503", |
| "issue": "7474", |
| "pages": "51--58", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenneth D Harris and Thomas D Mrsic-Flogel. 2013. Cortical connectivity and sensory coding. Nature, 503(7474):51-58.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "Hierarchical process memory: memory as an integral component of information processing", |
| "authors": [ |
| { |
| "first": "Uri", |
| "middle": [], |
| "last": "Hasson", |
| "suffix": "" |
| }, |
| { |
| "first": "Janice", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher J", |
| "middle": [], |
| "last": "Honey", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Trends in cognitive sciences", |
| "volume": "19", |
| "issue": "6", |
| "pages": "304--313", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Uri Hasson, Janice Chen, and Christopher J Honey. 2015. Hierarchical process memory: memory as an integral component of information processing. Trends in cognitive sciences, 19(6):304-313.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "Introductory phonology", |
| "authors": [ |
| { |
| "first": "Bruce", |
| "middle": [], |
| "last": "Hayes", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "32", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bruce Hayes. 2011. Introductory phonology, vol- ume 32. John Wiley \\& Sons, Hoboken.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Scalable modified Kneser-Ney language model estimation", |
| "authors": [ |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Heafield", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Pouzyrevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [ |
| "H" |
| ], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "690--696", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenneth Heafield, Ivan Pouzyrevsky, Jonathan H Clark, and Philipp Koehn. 2013. Scalable modified Kneser- Ney language model estimation. In Proceedings of the 51st Annual Meeting of the Association for Com- putational Linguistics, pages 690-696, Sofia, Bul- garia.", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "Unsupervised linear discriminant analysis for supporting DPGMM clustering in the zero resource scenario", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Heck", |
| "suffix": "" |
| }, |
| { |
| "first": "Sakriani", |
| "middle": [], |
| "last": "Sakti", |
| "suffix": "" |
| }, |
| { |
| "first": "Satoshi", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Procedia Computer Science", |
| "volume": "81", |
| "issue": "", |
| "pages": "73--79", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Heck, Sakriani Sakti, and Satoshi Nakamura. 2016. Unsupervised linear discriminant analysis for supporting DPGMM clustering in the zero resource scenario. Procedia Computer Science, 81:73-79.", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "Feature optimized dpgmm clustering for unsupervised subword modeling: A contribution to zerospeech 2017", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Heck", |
| "suffix": "" |
| }, |
| { |
| "first": "Sakriani", |
| "middle": [], |
| "last": "Sakti", |
| "suffix": "" |
| }, |
| { |
| "first": "Satoshi", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Automatic Speech Recognition and Understanding Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "740--746", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Heck, Sakriani Sakti, and Satoshi Nakamura. 2017. Feature optimized dpgmm clustering for un- supervised subword modeling: A contribution to ze- rospeech 2017. In Automatic Speech Recognition and Understanding Workshop (ASRU), 2017 IEEE, pages 740-746. IEEE.", |
| "links": null |
| }, |
| "BIBREF61": { |
| "ref_id": "b61", |
| "title": "Neural Networks for Machine Learning. Coursera, video lectures", |
| "authors": [ |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Geoffrey Hinton. 2012. Neural Networks for Machine Learning. Coursera, video lectures.", |
| "links": null |
| }, |
| "BIBREF62": { |
| "ref_id": "b62", |
| "title": "Long Short-Term Memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Comput", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long Short-Term Memory. Neural Comput., 9(8):1735- 1780.", |
| "links": null |
| }, |
| "BIBREF63": { |
| "ref_id": "b63", |
| "title": "Neural networks and physical systems with emergent collective computational abilities", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hopfield", |
| "suffix": "" |
| } |
| ], |
| "year": 1982, |
| "venue": "Proceedings of the national academy of sciences", |
| "volume": "79", |
| "issue": "", |
| "pages": "2554--2558", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John J Hopfield. 1982. Neural networks and physi- cal systems with emergent collective computational abilities. Proceedings of the national academy of sciences, 79(8):2554-2558.", |
| "links": null |
| }, |
| "BIBREF64": { |
| "ref_id": "b64", |
| "title": "Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift", |
| "authors": [ |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Ioffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Szegedy", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "448--456", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sergey Ioffe and Christian Szegedy. 2015. Batch Nor- malization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. In International Conference on Machine Learning, pages 448-456.", |
| "links": null |
| }, |
| "BIBREF65": { |
| "ref_id": "b65", |
| "title": "Maintenance of multiple working memory items by temporal segmentation. Neuroscience", |
| "authors": [ |
| { |
| "first": "Ole", |
| "middle": [], |
| "last": "Jensen", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "139", |
| "issue": "", |
| "pages": "237--249", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ole Jensen. 2006. Maintenance of multiple working memory items by temporal segmentation. Neuro- science, 139(1):237-249.", |
| "links": null |
| }, |
| "BIBREF66": { |
| "ref_id": "b66", |
| "title": "Prediction plays a key role in language development as well as processing", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Matt", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicholas", |
| "middle": [ |
| "B" |
| ], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Adele", |
| "middle": [ |
| "E" |
| ], |
| "last": "Turk-Browne", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Behavioral and Brain Sciences", |
| "volume": "36", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt A Johnson, Nicholas B Turk-Browne, and Adele E Goldberg. 2013. Prediction plays a key role in language development as well as processing. Be- havioral and Brain Sciences, 36(4):360.", |
| "links": null |
| }, |
| "BIBREF67": { |
| "ref_id": "b67", |
| "title": "Exploring the limits of language modeling", |
| "authors": [ |
| { |
| "first": "Rafal", |
| "middle": [], |
| "last": "Jozefowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1602.02410" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rafal Jozefowicz, Oriol Vinyals, Mike Schuster, Noam Shazeer, and Yonghui Wu. 2016. Exploring the limits of language modeling. arXiv preprint arXiv:1602.02410.", |
| "links": null |
| }, |
| "BIBREF68": { |
| "ref_id": "b68", |
| "title": "Revisiting the Hierarchical Multiscale LSTM", |
| "authors": [ |
| { |
| "first": "Akos", |
| "middle": [], |
| "last": "K\u00e1d\u00e1r", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc-Alexandre", |
| "middle": [], |
| "last": "C\u00f4t\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Grzegorz", |
| "middle": [], |
| "last": "Chrupa\u0142a", |
| "suffix": "" |
| }, |
| { |
| "first": "Afra", |
| "middle": [], |
| "last": "Alishahi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3215--3227", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Akos K\u00e1d\u00e1r, Marc-Alexandre C\u00f4t\u00e9, Grzegorz Chrupa\u0142a, and Afra Alishahi. 2018. Revisiting the Hierarchical Multiscale LSTM. In Proceed- ings of the 27th International Conference on Computational Linguistics, pages 3215-3227.", |
| "links": null |
| }, |
| "BIBREF69": { |
| "ref_id": "b69", |
| "title": "Unsupervised neural network based feature extraction using weak top-down constraints", |
| "authors": [ |
| { |
| "first": "Herman", |
| "middle": [], |
| "last": "Kamper", |
| "suffix": "" |
| }, |
| { |
| "first": "Micha", |
| "middle": [], |
| "last": "Elsner", |
| "suffix": "" |
| }, |
| { |
| "first": "Aren", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on", |
| "volume": "", |
| "issue": "", |
| "pages": "5818--5822", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Herman Kamper, Micha Elsner, Aren Jansen, and Sharon Goldwater. 2015. Unsupervised neural net- work based feature extraction using weak top-down constraints. In Acoustics, Speech and Signal Pro- cessing (ICASSP), 2015 IEEE International Confer- ence on, pages 5818-5822. IEEE.", |
| "links": null |
| }, |
| "BIBREF70": { |
| "ref_id": "b70", |
| "title": "A segmental framework for fullyunsupervised large-vocabulary speech recognition", |
| "authors": [ |
| { |
| "first": "Herman", |
| "middle": [], |
| "last": "Kamper", |
| "suffix": "" |
| }, |
| { |
| "first": "Aren", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Computer Speech & Language", |
| "volume": "46", |
| "issue": "", |
| "pages": "154--174", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.csl.2017.04.008" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Herman Kamper, Aren Jansen, and Sharon Goldwa- ter. 2017a. A segmental framework for fully- unsupervised large-vocabulary speech recognition. Computer Speech & Language, 46:154-174.", |
| "links": null |
| }, |
| "BIBREF71": { |
| "ref_id": "b71", |
| "title": "An embedded segmental k-means model for unsupervised segmentation and clustering of speech", |
| "authors": [ |
| { |
| "first": "Herman", |
| "middle": [], |
| "last": "Kamper", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Livescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Automatic Speech Recognition and Understanding Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "719--726", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Herman Kamper, Karen Livescu, and Sharon Gold- water. 2017b. An embedded segmental k-means model for unsupervised segmentation and clustering of speech. In Automatic Speech Recognition and Un- derstanding Workshop (ASRU), 2017 IEEE, pages 719-726. IEEE.", |
| "links": null |
| }, |
| "BIBREF72": { |
| "ref_id": "b72", |
| "title": "Predictive Processing: A Canonical Cortical Computation", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Georg", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas D Mrsic-Flogel", |
| "middle": [], |
| "last": "Keller", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Neuron", |
| "volume": "100", |
| "issue": "2", |
| "pages": "424--435", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Georg B Keller and Thomas D Mrsic-Flogel. 2018. Predictive Processing: A Canonical Cortical Com- putation. Neuron, 100(2):424-435.", |
| "links": null |
| }, |
| "BIBREF73": { |
| "ref_id": "b73", |
| "title": "Less really is more for adults learning a miniature artificial language", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Alan", |
| "suffix": "" |
| }, |
| { |
| "first": "Julie", |
| "middle": [ |
| "L" |
| ], |
| "last": "Kersten", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Earles", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Journal of Memory and Language", |
| "volume": "44", |
| "issue": "2", |
| "pages": "250--273", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alan W Kersten and Julie L Earles. 2001. Less re- ally is more for adults learning a miniature artifi- cial language. Journal of Memory and Language, 44(2):250-273.", |
| "links": null |
| }, |
| "BIBREF74": { |
| "ref_id": "b74", |
| "title": "A hierarchy of time-scales and the brain", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Stefan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Kiebel", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [ |
| "J" |
| ], |
| "last": "Daunizeau", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Friston", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "PLoS Comput Biol", |
| "volume": "4", |
| "issue": "11", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefan J Kiebel, Jean Daunizeau, and Karl J Friston. 2008. A hierarchy of time-scales and the brain. PLoS Comput Biol, 4(11):e1000209.", |
| "links": null |
| }, |
| "BIBREF75": { |
| "ref_id": "b75", |
| "title": "Unsupervised Lexicon Discovery from Acoustic Input", |
| "authors": [ |
| { |
| "first": "Chia-Ying", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Timothy", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "O'donnell", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "3", |
| "issue": "", |
| "pages": "389--403", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chia-ying Lee, Timothy J O'Donnell, and James Glass. 2015. Unsupervised Lexicon Discovery from Acoustic Input. In Transactions of the Associa- tion for Computational Linguistics, volume 3, pages 389-403.", |
| "links": null |
| }, |
| "BIBREF76": { |
| "ref_id": "b76", |
| "title": "Assessing the ability of LSTMs to learn syntax-sensitive dependencies", |
| "authors": [ |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| }, |
| { |
| "first": "Emmanuel", |
| "middle": [], |
| "last": "Dupoux", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "4", |
| "issue": "", |
| "pages": "521--535", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tal Linzen, Emmanuel Dupoux, and Yoav Goldberg. 2016. Assessing the ability of LSTMs to learn syntax-sensitive dependencies. Transactions of the Association for Computational Linguistics, 4:521- 535.", |
| "links": null |
| }, |
| "BIBREF77": { |
| "ref_id": "b77", |
| "title": "The existence of persistent states in the brain", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "William A Little", |
| "suffix": "" |
| } |
| ], |
| "year": 1974, |
| "venue": "Mathematical biosciences", |
| "volume": "19", |
| "issue": "1-2", |
| "pages": "101--120", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William A Little. 1974. The existence of persistent states in the brain. Mathematical biosciences, 19(1- 2):101-120.", |
| "links": null |
| }, |
| "BIBREF78": { |
| "ref_id": "b78", |
| "title": "An evaluation of graph clustering methods for unsupervised term discovery", |
| "authors": [ |
| { |
| "first": "Vince", |
| "middle": [], |
| "last": "Lyzinski", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregory", |
| "middle": [], |
| "last": "Sell", |
| "suffix": "" |
| }, |
| { |
| "first": "Aren", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Sixteenth Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vince Lyzinski, Gregory Sell, and Aren Jansen. 2015. An evaluation of graph clustering methods for unsu- pervised term discovery. In Sixteenth Annual Con- ference of the International Speech Communication Association.", |
| "links": null |
| }, |
| "BIBREF79": { |
| "ref_id": "b79", |
| "title": "Vision: A Computational Investigation into the Human Representation and Processing of Visual Information", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Marr", |
| "suffix": "" |
| } |
| ], |
| "year": 1982, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Marr. 1982. Vision: A Computational Investiga- tion into the Human Representation and Processing of Visual Information. W.H. Freeman and Company.", |
| "links": null |
| }, |
| "BIBREF80": { |
| "ref_id": "b80", |
| "title": "STDP allows close-tooptimal spatiotemporal spike pattern detection by single coincidence detector neurons", |
| "authors": [ |
| { |
| "first": "Timoth\u00e9e", |
| "middle": [], |
| "last": "Masquelier", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Neuroscience", |
| "volume": "389", |
| "issue": "", |
| "pages": "133--140", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timoth\u00e9e Masquelier. 2018. STDP allows close-to- optimal spatiotemporal spike pattern detection by single coincidence detector neurons. Neuroscience, 389:133-140.", |
| "links": null |
| }, |
| "BIBREF81": { |
| "ref_id": "b81", |
| "title": "Language learning as language use: A cross-linguistic model of child language development", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Stewart", |
| "suffix": "" |
| }, |
| { |
| "first": "Morten", |
| "middle": [ |
| "H" |
| ], |
| "last": "Mccauley", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Christiansen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Psychological review", |
| "volume": "126", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stewart M McCauley and Morten H Christiansen. 2019. Language learning as language use: A cross-linguistic model of child language develop- ment. Psychological review, 126(1):1.", |
| "links": null |
| }, |
| "BIBREF82": { |
| "ref_id": "b82", |
| "title": "Revisiting the poverty of the stimulus: Hierarchical generalization without a hierarchical bias in recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Mccoy", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 40th Annual Conference of the Cognitive Science Society", |
| "volume": "", |
| "issue": "", |
| "pages": "2093--2098", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R Thomas McCoy, Robert Frank, and Tal Linzen. 2018. Revisiting the poverty of the stimulus: Hierarchical generalization without a hierarchical bias in recur- rent neural networks. In Proceedings of the 40th Annual Conference of the Cognitive Science Society, pages 2093-2098.", |
| "links": null |
| }, |
| "BIBREF83": { |
| "ref_id": "b83", |
| "title": "Does syntax need to grow on trees? Sources of hierarchical inductive bias in sequence-to-sequence networks", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Mccoy", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "8", |
| "issue": "", |
| "pages": "125--140", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R Thomas McCoy, Robert Frank, and Tal Linzen. 2020. Does syntax need to grow on trees? Sources of hier- archical inductive bias in sequence-to-sequence net- works. Transactions of the Association for Compu- tational Linguistics, 8:125-140.", |
| "links": null |
| }, |
| "BIBREF84": { |
| "ref_id": "b84", |
| "title": "Sound texture perception via statistics of the auditory periphery: evidence from sound synthesis", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Josh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mcdermott", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Eero", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Simoncelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Neuron", |
| "volume": "71", |
| "issue": "5", |
| "pages": "926--940", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Josh H McDermott and Eero P Simoncelli. 2011. Sound texture perception via statistics of the audi- tory periphery: evidence from sound synthesis. Neu- ron, 71(5):926-940.", |
| "links": null |
| }, |
| "BIBREF85": { |
| "ref_id": "b85", |
| "title": "Gradient effects of within-category phonetic variation on lexical access", |
| "authors": [ |
| { |
| "first": "Bob", |
| "middle": [], |
| "last": "Mcmurray", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [ |
| "N" |
| ], |
| "last": "Tanenhaus", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Aslin", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Cognition", |
| "volume": "86", |
| "issue": "2", |
| "pages": "33--42", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bob McMurray, Michael K Tanenhaus, and Richard N Aslin. 2002. Gradient effects of within-category phonetic variation on lexical access. Cognition, 86(2):B33-B42.", |
| "links": null |
| }, |
| "BIBREF86": { |
| "ref_id": "b86", |
| "title": "Distance measures for speech recognition, psychological and instrumental. Pattern recognition and artificial intelligence", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Mermelstein", |
| "suffix": "" |
| } |
| ], |
| "year": 1976, |
| "venue": "", |
| "volume": "116", |
| "issue": "", |
| "pages": "374--388", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul Mermelstein. 1976. Distance measures for speech recognition, psychological and instrumental. Pat- tern recognition and artificial intelligence, 116:374- 388.", |
| "links": null |
| }, |
| "BIBREF87": { |
| "ref_id": "b87", |
| "title": "Blind Phoneme Segmentation With Temporal Prediction Errors", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Michel", |
| "suffix": "" |
| }, |
| { |
| "first": "Okko", |
| "middle": [], |
| "last": "Rasanen", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Thiolli\u00e8re", |
| "suffix": "" |
| }, |
| { |
| "first": "Emmanuel", |
| "middle": [], |
| "last": "Dupoux", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL 2017", |
| "volume": "", |
| "issue": "", |
| "pages": "62--68", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul Michel, Okko Rasanen, Roland Thiolli\u00e8re, and Emmanuel Dupoux. 2017. Blind Phoneme Segmen- tation With Temporal Prediction Errors. In Pro- ceedings of ACL 2017, Student Research Workshop, pages 62-68.", |
| "links": null |
| }, |
| "BIBREF88": { |
| "ref_id": "b88", |
| "title": "Humans store about 1.5 megabytes of information during language acquisition", |
| "authors": [ |
| { |
| "first": "Francis", |
| "middle": [], |
| "last": "Mollica", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Steven", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Piantadosi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Royal Society open science", |
| "volume": "6", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francis Mollica and Steven T Piantadosi. 2019. Hu- mans store about 1.5 megabytes of information dur- ing language acquisition. Royal Society open sci- ence, 6(3):181393.", |
| "links": null |
| }, |
| "BIBREF89": { |
| "ref_id": "b89", |
| "title": "Words in puddles of sound: Modelling psycholinguistic effects in speech segmentation", |
| "authors": [ |
| { |
| "first": "Padraic", |
| "middle": [], |
| "last": "Monaghan", |
| "suffix": "" |
| }, |
| { |
| "first": "Morten", |
| "middle": [ |
| "H" |
| ], |
| "last": "Christiansen", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Journal of child language", |
| "volume": "37", |
| "issue": "3", |
| "pages": "545--564", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Padraic Monaghan and Morten H Christiansen. 2010. Words in puddles of sound: Modelling psycholin- guistic effects in speech segmentation. Journal of child language, 37(3):545-564.", |
| "links": null |
| }, |
| "BIBREF90": { |
| "ref_id": "b90", |
| "title": "Maturational constraints on language learning", |
| "authors": [ |
| { |
| "first": "Elissa", |
| "middle": [], |
| "last": "Newport", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Cognitive Science", |
| "volume": "14", |
| "issue": "", |
| "pages": "11--28", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elissa Newport. 1990. Maturational constraints on lan- guage learning. Cognitive Science, 14:11-28.", |
| "links": null |
| }, |
| "BIBREF91": { |
| "ref_id": "b91", |
| "title": "On structuring probabilistic dependences in stochastic language modelling", |
| "authors": [ |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| }, |
| { |
| "first": "Ute", |
| "middle": [], |
| "last": "Essen", |
| "suffix": "" |
| }, |
| { |
| "first": "Reinhard", |
| "middle": [], |
| "last": "Kneser", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Computer Speech and Language", |
| "volume": "8", |
| "issue": "1", |
| "pages": "1--38", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hermann Ney, Ute Essen, and Reinhard Kneser. 1994. On structuring probabilistic dependences in stochas- tic language modelling. Computer Speech and Lan- guage, 8(1):1-38.", |
| "links": null |
| }, |
| "BIBREF92": { |
| "ref_id": "b92", |
| "title": "Motion extrapolation in catching", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "R Nijhawan", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Nature", |
| "volume": "370", |
| "issue": "6487", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R Nijhawan. 1994. Motion extrapolation in catching. Nature, 370(6487):256.", |
| "links": null |
| }, |
| "BIBREF93": { |
| "ref_id": "b93", |
| "title": "Adeen Flinker, and Nima Mesgarani. 2020. Hierarchical integration across multiple timescales in human auditory cortex. bioRxiv", |
| "authors": [ |
| { |
| "first": "Laura", |
| "middle": [ |
| "K" |
| ], |
| "last": "Sam V Norman-Haignere", |
| "suffix": "" |
| }, |
| { |
| "first": "Orrin", |
| "middle": [], |
| "last": "Long", |
| "suffix": "" |
| }, |
| { |
| "first": "Werner", |
| "middle": [], |
| "last": "Devinsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ifeoma", |
| "middle": [], |
| "last": "Doyle", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Irobunda", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Merricks", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Neil", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Feldstein", |
| "suffix": "" |
| }, |
| { |
| "first": "M V", |
| "middle": [], |
| "last": "Guy", |
| "suffix": "" |
| }, |
| { |
| "first": "Catherine", |
| "middle": [], |
| "last": "Mckhann", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Schevon", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1101/2020.09.30.321687" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sam V Norman-Haignere, Laura K Long, Orrin Devin- sky, Werner Doyle, Ifeoma Irobunda, Edward Mer- ricks, Neil A Feldstein, Guy M V McKhann, Cather- ine Schevon, Adeen Flinker, and Nima Mesgarani. 2020. Hierarchical integration across multiple timescales in human auditory cortex. bioRxiv.", |
| "links": null |
| }, |
| "BIBREF94": { |
| "ref_id": "b94", |
| "title": "Emergence of simple-cell receptive field properties by learning a sparse code for natural images", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Bruno", |
| "suffix": "" |
| }, |
| { |
| "first": "David J", |
| "middle": [], |
| "last": "Olshausen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Field", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Nature", |
| "volume": "381", |
| "issue": "6583", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bruno A Olshausen and David J Field. 1996. Emer- gence of simple-cell receptive field properties by learning a sparse code for natural images. Nature, 381(6583):607.", |
| "links": null |
| }, |
| "BIBREF95": { |
| "ref_id": "b95", |
| "title": "Sparse coding of sensory inputs. Current opinion in neurobiology", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Bruno", |
| "suffix": "" |
| }, |
| { |
| "first": "David J", |
| "middle": [], |
| "last": "Olshausen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Field", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "14", |
| "issue": "", |
| "pages": "481--487", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bruno A Olshausen and David J Field. 2004. Sparse coding of sensory inputs. Current opinion in neuro- biology, 14(4):481-487.", |
| "links": null |
| }, |
| "BIBREF96": { |
| "ref_id": "b96", |
| "title": "WaveNet: A Generative Model for Raw Audio", |
| "authors": [ |
| { |
| "first": "A\u00e4ron", |
| "middle": [], |
| "last": "Van Den Oord", |
| "suffix": "" |
| }, |
| { |
| "first": "Sander", |
| "middle": [], |
| "last": "Dieleman", |
| "suffix": "" |
| }, |
| { |
| "first": "Heiga", |
| "middle": [], |
| "last": "Zen", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Simonyan", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Graves", |
| "suffix": "" |
| }, |
| { |
| "first": "Nal", |
| "middle": [], |
| "last": "Kalchbrenner", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Senior", |
| "suffix": "" |
| }, |
| { |
| "first": "Koray", |
| "middle": [], |
| "last": "Kavukcuoglu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "9th ISCA Speech Synthesis Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A\u00e4ron van den Oord, Sander Dieleman, Heiga Zen, Karen Simonyan, Oriol Vinyals, Alex Graves, Nal Kalchbrenner, Andrew Senior, and Koray Kavukcuoglu. 2016. WaveNet: A Generative Model for Raw Audio. In 9th ISCA Speech Synthesis Work- shop, page 125.", |
| "links": null |
| }, |
| "BIBREF97": { |
| "ref_id": "b97", |
| "title": "Scikitlearn: Machine learning in Python", |
| "authors": [ |
| { |
| "first": "Fabian", |
| "middle": [], |
| "last": "Pedregosa", |
| "suffix": "" |
| }, |
| { |
| "first": "Ga\u00ebl", |
| "middle": [], |
| "last": "Varoquaux", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Gramfort", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Michel", |
| "suffix": "" |
| }, |
| { |
| "first": "Bertrand", |
| "middle": [], |
| "last": "Thirion", |
| "suffix": "" |
| }, |
| { |
| "first": "Olivier", |
| "middle": [], |
| "last": "Grisel", |
| "suffix": "" |
| }, |
| { |
| "first": "Mathieu", |
| "middle": [], |
| "last": "Blondel", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Prettenhofer", |
| "suffix": "" |
| }, |
| { |
| "first": "Ron", |
| "middle": [], |
| "last": "Weiss", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of machine learning research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2825--2830", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fabian Pedregosa, Ga\u00ebl Varoquaux, Alexandre Gram- fort, Vincent Michel, Bertrand Thirion, Olivier Grisel, Mathieu Blondel, Peter Prettenhofer, Ron Weiss, Vincent Dubourg, and others. 2011. Scikit- learn: Machine learning in Python. Journal of ma- chine learning research, 12(Oct):2825-2830.", |
| "links": null |
| }, |
| "BIBREF98": { |
| "ref_id": "b98", |
| "title": "When do memory limitations lead to regularization? An experimental and computational investigation", |
| "authors": [ |
| { |
| "first": "Amy", |
| "middle": [], |
| "last": "Perfors", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Journal of Memory and Language", |
| "volume": "67", |
| "issue": "4", |
| "pages": "486--506", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amy Perfors. 2012. When do memory limitations lead to regularization? An experimental and computa- tional investigation. Journal of Memory and Lan- guage, 67(4):486-506.", |
| "links": null |
| }, |
| "BIBREF99": { |
| "ref_id": "b99", |
| "title": "The human voice areas: Spatial organization and inter-individual variability in temporal and extratemporal cortices", |
| "authors": [ |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Cyril R Pernet", |
| "suffix": "" |
| }, |
| { |
| "first": "Marianne", |
| "middle": [], |
| "last": "Mcaleer", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Latinus", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Krzysztof", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Gorgolewski", |
| "suffix": "" |
| }, |
| { |
| "first": "Patricia", |
| "middle": [ |
| "E" |
| ], |
| "last": "Charest", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "H" |
| ], |
| "last": "Bestelmeyer", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Watson", |
| "suffix": "" |
| }, |
| { |
| "first": "Frances", |
| "middle": [], |
| "last": "Fleming", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Crabbe", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Neuroimage", |
| "volume": "119", |
| "issue": "", |
| "pages": "164--174", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cyril R Pernet, Phil McAleer, Marianne Latinus, Krzysztof J Gorgolewski, Ian Charest, Patricia E G Bestelmeyer, Rebecca H Watson, David Fleming, Frances Crabbe, Mitchell Valdes-Sosa, and others. 208 2015. The human voice areas: Spatial organization and inter-individual variability in temporal and extra- temporal cortices. Neuroimage, 119:164-174.", |
| "links": null |
| }, |
| "BIBREF100": { |
| "ref_id": "b100", |
| "title": "The role of language processing in language acquisition", |
| "authors": [ |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Phillips", |
| "suffix": "" |
| }, |
| { |
| "first": "Lara", |
| "middle": [], |
| "last": "Ehrenhofer", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Linguistic approaches to bilingualism", |
| "volume": "5", |
| "issue": "4", |
| "pages": "409--453", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Colin Phillips and Lara Ehrenhofer. 2015. The role of language processing in language acquisition. Lin- guistic approaches to bilingualism, 5(4):409-453.", |
| "links": null |
| }, |
| "BIBREF101": { |
| "ref_id": "b101", |
| "title": "The logical primitives of thought: Empirical foundations for compositional cognitive models", |
| "authors": [ |
| { |
| "first": "Joshua", |
| "middle": [ |
| "B" |
| ], |
| "last": "Steven T Piantadosi", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah D", |
| "middle": [], |
| "last": "Tenenbaum", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Psychological review", |
| "volume": "123", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven T Piantadosi, Joshua B Tenenbaum, and Noah D Goodman. 2016. The logical primitives of thought: Empirical foundations for compositional cognitive models. Psychological review, 123(4):392.", |
| "links": null |
| }, |
| "BIBREF102": { |
| "ref_id": "b102", |
| "title": "Rules of language", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Pinker", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "Science", |
| "volume": "253", |
| "issue": "5019", |
| "pages": "530--535", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Pinker. 1991. Rules of language. Science, 253(5019):530-535.", |
| "links": null |
| }, |
| "BIBREF103": { |
| "ref_id": "b103", |
| "title": "The Buckeye corpus of conversational speech: labeling conventions and a test of transcriber reliability", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mark", |
| "suffix": "" |
| }, |
| { |
| "first": "Keith", |
| "middle": [], |
| "last": "Pitt", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Hume", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Kiesling", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Raymond", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Speech Communication", |
| "volume": "45", |
| "issue": "1", |
| "pages": "89--95", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark A Pitt, Keith Johnson, Elizabeth Hume, Scott Kiesling, and William Raymond. 2005. The Buck- eye corpus of conversational speech: labeling con- ventions and a test of transcriber reliability. Speech Communication, 45(1):89-95.", |
| "links": null |
| }, |
| "BIBREF104": { |
| "ref_id": "b104", |
| "title": "Language models are unsupervised multitask learners", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "OpenAI Blog", |
| "volume": "1", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI Blog, 1(8):9.", |
| "links": null |
| }, |
| "BIBREF105": { |
| "ref_id": "b105", |
| "title": "Unsupervised word discovery from speech using automatic segmentation into syllable-like units", |
| "authors": [ |
| { |
| "first": "Okko", |
| "middle": [], |
| "last": "R\u00e4s\u00e4nen", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabriel", |
| "middle": [], |
| "last": "Doyle", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael C", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Sixteenth Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Okko R\u00e4s\u00e4nen, Gabriel Doyle, and Michael C Frank. 2015. Unsupervised word discovery from speech us- ing automatic segmentation into syllable-like units. In Sixteenth Annual Conference of the International Speech Communication Association.", |
| "links": null |
| }, |
| "BIBREF106": { |
| "ref_id": "b106", |
| "title": "A comparison of neural network methods for unsupervised representation learning on the zero resource speech challenge", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Renshaw", |
| "suffix": "" |
| }, |
| { |
| "first": "Herman", |
| "middle": [], |
| "last": "Kamper", |
| "suffix": "" |
| }, |
| { |
| "first": "Aren", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Sixteenth Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Renshaw, Herman Kamper, Aren Jansen, and Sharon Goldwater. 2015. A comparison of neu- ral network methods for unsupervised representa- tion learning on the zero resource speech challenge. In Sixteenth Annual Conference of the International Speech Communication Association.", |
| "links": null |
| }, |
| "BIBREF107": { |
| "ref_id": "b107", |
| "title": "Language acquisition in the MDL framework. Language computations", |
| "authors": [ |
| { |
| "first": "Jorma", |
| "middle": [], |
| "last": "Rissanen", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "Sven" |
| ], |
| "last": "Ristad", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "149--166", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jorma Rissanen and Eric Sven Ristad. 1994. Language acquisition in the MDL framework. Language com- putations, pages 149-166.", |
| "links": null |
| }, |
| "BIBREF108": { |
| "ref_id": "b108", |
| "title": "Language acquisition in the absence of explicit negative evidence: How important is starting small? Cognition", |
| "authors": [ |
| { |
| "first": "L T", |
| "middle": [], |
| "last": "Douglas", |
| "suffix": "" |
| }, |
| { |
| "first": "David C", |
| "middle": [], |
| "last": "Rohde", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Plaut", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "", |
| "volume": "72", |
| "issue": "", |
| "pages": "67--109", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douglas L T Rohde and David C Plaut. 1999. Lan- guage acquisition in the absence of explicit negative evidence: How important is starting small? Cogni- tion, 72(1):67-109.", |
| "links": null |
| }, |
| "BIBREF109": { |
| "ref_id": "b109", |
| "title": "Statistical learning by 8-month-old infants", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jenny R Saffran", |
| "suffix": "" |
| }, |
| { |
| "first": "Elissa", |
| "middle": [], |
| "last": "Richard N Aslin", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Science", |
| "volume": "274", |
| "issue": "", |
| "pages": "1926--1928", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jenny R Saffran, Richard N Aslin, and Elissa L New- port. 1996. Statistical learning by 8-month-old in- fants. Science, 274(5294):1926-1928.", |
| "links": null |
| }, |
| "BIBREF110": { |
| "ref_id": "b110", |
| "title": "An ERP study of continuous speech processing: I. Segmentation, semantics, and syntax in native speakers", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Lisa", |
| "suffix": "" |
| }, |
| { |
| "first": "Helen J", |
| "middle": [], |
| "last": "Sanders", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Neville", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Cognitive Brain Research", |
| "volume": "15", |
| "issue": "3", |
| "pages": "228--240", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lisa D Sanders and Helen J Neville. 2003. An ERP study of continuous speech processing: I. Segmenta- tion, semantics, and syntax in native speakers. Cog- nitive Brain Research, 15(3):228-240.", |
| "links": null |
| }, |
| "BIBREF111": { |
| "ref_id": "b111", |
| "title": "Depth of processing in language comprehension: Not noticing the evidence", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Anthony", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Sanford", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sturt", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Trends in cognitive sciences", |
| "volume": "6", |
| "issue": "9", |
| "pages": "382--386", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anthony J Sanford and Patrick Sturt. 2002. Depth of processing in language comprehension: Not notic- ing the evidence. Trends in cognitive sciences, 6(9):382-386.", |
| "links": null |
| }, |
| "BIBREF112": { |
| "ref_id": "b112", |
| "title": "Exact solutions to the nonlinear dynamics of learning in deep linear neural networks", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "L" |
| ], |
| "last": "Saxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Surya", |
| "middle": [], |
| "last": "Mcclelland", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ganguli", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1312.6120" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew M Saxe, James L McClelland, and Surya Gan- guli. 2013. Exact solutions to the nonlinear dynam- ics of learning in deep linear neural networks. arXiv preprint arXiv:1312.6120.", |
| "links": null |
| }, |
| "BIBREF113": { |
| "ref_id": "b113", |
| "title": "A Neural Model of Adaptation in Reading", |
| "authors": [ |
| { |
| "first": "Marten", |
| "middle": [], |
| "last": "Van Schijndel", |
| "suffix": "" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "EMNLP 2018", |
| "volume": "", |
| "issue": "", |
| "pages": "4704--4710", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marten van Schijndel and Tal Linzen. 2018. A Neural Model of Adaptation in Reading. In EMNLP 2018, pages 4704-4710.", |
| "links": null |
| }, |
| "BIBREF114": { |
| "ref_id": "b114", |
| "title": "Measuring the perceptual availability of phonological features during language acquisition using unsupervised binary stochastic autoencoders", |
| "authors": [ |
| { |
| "first": "Cory", |
| "middle": [], |
| "last": "Shain", |
| "suffix": "" |
| }, |
| { |
| "first": "Micha", |
| "middle": [], |
| "last": "Elsner", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "69--85", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cory Shain and Micha Elsner. 2019. Measuring the perceptual availability of phonological features dur- ing language acquisition using unsupervised binary stochastic autoencoders. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 69-85.", |
| "links": null |
| }, |
| "BIBREF115": { |
| "ref_id": "b115", |
| "title": "Sparse coding with memristor networks", |
| "authors": [ |
| { |
| "first": "Fuxi", |
| "middle": [], |
| "last": "Patrick M Sheridan", |
| "suffix": "" |
| }, |
| { |
| "first": "Chao", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Wen", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengya", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei D", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Nature nanotechnology", |
| "volume": "12", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick M Sheridan, Fuxi Cai, Chao Du, Wen Ma, Zhengya Zhang, and Wei D Lu. 2017. Sparse coding with memristor networks. Nature nanotechnology, 12(8):784.", |
| "links": null |
| }, |
| "BIBREF116": { |
| "ref_id": "b116", |
| "title": "Composite embedding systems for ZeroSpeech2017 Track1", |
| "authors": [ |
| { |
| "first": "Hayato", |
| "middle": [], |
| "last": "Shibata", |
| "suffix": "" |
| }, |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kato", |
| "suffix": "" |
| }, |
| { |
| "first": "Takahiro", |
| "middle": [], |
| "last": "Shinozaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Shinji", |
| "middle": [], |
| "last": "Watanabet", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Automatic Speech Recognition and Understanding Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "747--753", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hayato Shibata, Taku Kato, Takahiro Shinozaki, and Shinji Watanabet. 2017. Composite embedding sys- tems for ZeroSpeech2017 Track1. In Automatic Speech Recognition and Understanding Workshop (ASRU), 2017 IEEE, pages 747-753. IEEE.", |
| "links": null |
| }, |
| "BIBREF117": { |
| "ref_id": "b117", |
| "title": "Sensory cortex is optimized for prediction of future input", |
| "authors": [ |
| { |
| "first": "Yosef", |
| "middle": [], |
| "last": "Singer", |
| "suffix": "" |
| }, |
| { |
| "first": "Yayoi", |
| "middle": [], |
| "last": "Teramoto", |
| "suffix": "" |
| }, |
| { |
| "first": "D B", |
| "middle": [], |
| "last": "Ben", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan W H", |
| "middle": [], |
| "last": "Willmore", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "J" |
| ], |
| "last": "Schnupp", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicol S", |
| "middle": [], |
| "last": "King", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Harper", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "7", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yosef Singer, Yayoi Teramoto, Ben D B Willmore, Jan W H Schnupp, Andrew J King, and Nicol S Harper. 2018. Sensory cortex is optimized for prediction of future input. eLife, 7:e31557.", |
| "links": null |
| }, |
| "BIBREF118": { |
| "ref_id": "b118", |
| "title": "Articulatory gesture rich representation learning of phonological units in low resource settings", |
| "authors": [ |
| { |
| "first": "Lal", |
| "middle": [], |
| "last": "Brij Mohan", |
| "suffix": "" |
| }, |
| { |
| "first": "Manish", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Shrivastava", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "International Conference on Statistical Language and Speech Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "80--95", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brij Mohan Lal Srivastava and Manish Shrivastava. 2016. Articulatory gesture rich representation learn- ing of phonological units in low resource settings. In International Conference on Statistical Language and Speech Processing, pages 80-95. Springer.", |
| "links": null |
| }, |
| "BIBREF119": { |
| "ref_id": "b119", |
| "title": "Neural discrete representation learning", |
| "authors": [ |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Van Den", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Oord", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "6306--6315", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aaron Van Den Oord, Oriol Vinyals, and others. 2017. Neural discrete representation learning. In Ad- vances in Neural Information Processing Systems, pages 6306-6315.", |
| "links": null |
| }, |
| "BIBREF120": { |
| "ref_id": "b120", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF121": { |
| "ref_id": "b121", |
| "title": "The zero resource speech challenge 2015", |
| "authors": [ |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "Versteegh", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Thiolli\u00e8re", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Schatz", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuan", |
| "middle": [ |
| "Nga" |
| ], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Anguera", |
| "suffix": "" |
| }, |
| { |
| "first": "Aren", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Emmanuel", |
| "middle": [], |
| "last": "Dupoux", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Sixteenth Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maarten Versteegh, Roland Thiolli\u00e8re, Thomas Schatz, Xuan Nga Cao, Xavier Anguera, Aren Jansen, and Emmanuel Dupoux. 2015. The zero resource speech challenge 2015. In Sixteenth Annual Conference of the International Speech Communication Associ- ation.", |
| "links": null |
| }, |
| "BIBREF122": { |
| "ref_id": "b122", |
| "title": "On productivity. Linguistic variation yearbook", |
| "authors": [ |
| { |
| "first": "Charles", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "5", |
| "issue": "", |
| "pages": "265--302", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Charles Yang. 2005. On productivity. Linguistic varia- tion yearbook, 5(1):265-302.", |
| "links": null |
| }, |
| "BIBREF123": { |
| "ref_id": "b123", |
| "title": "Extracting bottleneck features and word-like pairs from untranscribed speech for feature representation", |
| "authors": [ |
| { |
| "first": "Yougen", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Cheung-Chi", |
| "middle": [], |
| "last": "Leung", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongjie", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Haizhou", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Automatic Speech Recognition and Understanding Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "734--739", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yougen Yuan, Cheung-Chi Leung, Lei Xie, Hongjie Chen, Bin Ma, and Haizhou Li. 2017. Extracting bottleneck features and word-like pairs from untran- scribed speech for feature representation. In Auto- matic Speech Recognition and Understanding Work- shop (ASRU), 2017 IEEE, pages 734-739. IEEE.", |
| "links": null |
| }, |
| "BIBREF124": { |
| "ref_id": "b124", |
| "title": "Human brain activity time-locked to perceptual event boundaries", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Jeffrey", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zacks", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Todd", |
| "suffix": "" |
| }, |
| { |
| "first": "Margaret", |
| "middle": [ |
| "A" |
| ], |
| "last": "Braver", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "I" |
| ], |
| "last": "Sheridan", |
| "suffix": "" |
| }, |
| { |
| "first": "Abraham", |
| "middle": [ |
| "Z" |
| ], |
| "last": "Donaldson", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "M" |
| ], |
| "last": "Snyder", |
| "suffix": "" |
| }, |
| { |
| "first": "Randy", |
| "middle": [ |
| "L" |
| ], |
| "last": "Ollinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcus", |
| "middle": [ |
| "E" |
| ], |
| "last": "Buckner", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Raichle", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Nature neuroscience", |
| "volume": "4", |
| "issue": "6", |
| "pages": "651--655", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey M Zacks, Todd S Braver, Margaret A Sheri- dan, David I Donaldson, Abraham Z Snyder, John M Ollinger, Randy L Buckner, and Marcus E Raichle. 2001. Human brain activity time-locked to per- ceptual event boundaries. Nature neuroscience, 4(6):651-655.", |
| "links": null |
| }, |
| "BIBREF125": { |
| "ref_id": "b125", |
| "title": "Loudness constancy with varying sound source distance", |
| "authors": [ |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Zahorik", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Frederic L Wightman", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Nature neuroscience", |
| "volume": "4", |
| "issue": "1", |
| "pages": "78--83", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pavel Zahorik and Frederic L Wightman. 2001. Loud- ness constancy with varying sound source distance. Nature neuroscience, 4(1):78-83.", |
| "links": null |
| }, |
| "BIBREF126": { |
| "ref_id": "b126", |
| "title": "A deep scattering spectrum-deep siamese network pipeline for unsupervised acoustic modeling", |
| "authors": [ |
| { |
| "first": "Neil", |
| "middle": [], |
| "last": "Zeghidour", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabriel", |
| "middle": [], |
| "last": "Synnaeve", |
| "suffix": "" |
| }, |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "Versteegh", |
| "suffix": "" |
| }, |
| { |
| "first": "Emmanuel", |
| "middle": [], |
| "last": "Dupoux", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Acoustics, Speech and Signal Processing (ICASSP), 2016 IEEE International Conference on", |
| "volume": "", |
| "issue": "", |
| "pages": "4965--4969", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Neil Zeghidour, Gabriel Synnaeve, Maarten Versteegh, and Emmanuel Dupoux. 2016. A deep scattering spectrum-deep siamese network pipeline for unsu- pervised acoustic modeling. In Acoustics, Speech and Signal Processing (ICASSP), 2016 IEEE Inter- national Conference on, pages 4965-4969. IEEE.", |
| "links": null |
| }, |
| "BIBREF127": { |
| "ref_id": "b127", |
| "title": "Algorithm 778: L-BFGS-B: Fortran subroutines for large-scale bound-constrained optimization", |
| "authors": [ |
| { |
| "first": "Ciyou", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Richard", |
| "suffix": "" |
| }, |
| { |
| "first": "Peihuang", |
| "middle": [], |
| "last": "Byrd", |
| "suffix": "" |
| }, |
| { |
| "first": "Jorge", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nocedal", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "ACM Transactions on Mathematical Software (TOMS)", |
| "volume": "23", |
| "issue": "4", |
| "pages": "550--560", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ciyou Zhu, Richard H Byrd, Peihuang Lu, and Jorge Nocedal. 1997. Algorithm 778: L-BFGS-B: Fortran subroutines for large-scale bound-constrained opti- mization. ACM Transactions on Mathematical Soft- ware (TOMS), 23(4):550-560.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Incremental layerwise encoder-decoder framework. Shown here with 3 layers and a forward/backward window size of 3. Segment boundaries are shown in cyan. Gray arrows indicate information flow through the encoder, as governed by the boundary decisions. Colored arrows indicate information flow from encodings to decoder targets in the backward (orange) and forward (green) directions, starting from the encoded timestep at the center of the figure.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF1": { |
| "text": "Phoneme acquisition scores. F-measures for boundary detection (left), phoneme classification (center), and phonological feature classification (right).", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF2": { |
| "text": "reports performance differences by metric against baseline U (untrained). Training yields consistent and often substantial improvements across metrics in multiscale (L > 0) encoders with both memory (B > 0) and prediction (F > 0) pressures, but can fail to improve in the Effect of learning. Change in F-measure by metric over baseline U.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF3": { |
| "text": "Effect of language. Change in F-measure by metric over baseline X.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF4": { |
| "text": "Boundary P/R trade-off. Boundary precision (left) and recall (right) by experimental configuration in Xitsonga and English.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF6": { |
| "text": "corresponding respectively to the B preceding and F following segment labels of layer l \u2212 1 at time t. The initial decoder hidden and cell states -h dB(l)", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "TABREF0": { |
| "content": "<table><tr><td/><td/><td>English</td><td/><td/><td>Xitsonga</td><td/></tr><tr><td>Model</td><td>Bd</td><td>Pc</td><td>Fc</td><td>Bd</td><td>Pc</td><td>Fc</td></tr><tr><td colspan=\"2\">Full 65.3</td><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td/><td/><td>201</td></tr></table>", |
| "text": "22.9 49.3 39.3 28.6 53.8 Baseline U 30.4 12.3 42.2 22.1 15.4 46.2 Baseline X 52.4 20.5 47.1 44.8 27.8 53.2Table 1: F-measures for boundary discovery (Bd), phoneme classification (Pc), and phonological feature classification (Fc), using B = 25, F = 1, L = 3.", |
| "html": null, |
| "type_str": "table", |
| "num": null |
| } |
| } |
| } |
| } |