| { |
| "paper_id": "D18-1029", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:51:06.935335Z" |
| }, |
| "title": "On the Relation between Linguistic Typology and (Limitations of) Multilingual Language Modeling", |
| "authors": [ |
| { |
| "first": "Daniela", |
| "middle": [], |
| "last": "Gerz", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Lab, DTAL", |
| "institution": "University of Cambridge", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Lab, DTAL", |
| "institution": "University of Cambridge", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Ponti", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Lab, DTAL", |
| "institution": "University of Cambridge", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Lab, DTAL", |
| "institution": "University of Cambridge", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "A key challenge in cross-lingual NLP is developing general language-independent architectures that are equally applicable to any language. However, this ambition is largely hampered by the variation in structural and semantic properties, i.e. the typological profiles of the world's languages. In this work, we analyse the implications of this variation on the language modeling (LM) task. We present a largescale study of state-of-the art n-gram based and neural language models on 50 typologically diverse languages covering a wide variety of morphological systems. Operating in the full vocabulary LM setup focused on wordlevel prediction, we demonstrate that a coarse typology of morphological systems is predictive of absolute LM performance. Moreover, fine-grained typological features such as exponence, flexivity, fusion, and inflectional synthesis are borne out to be responsible for the proliferation of low-frequency phenomena which are organically difficult to model by statistical architectures, or for the meaning ambiguity of character n-grams. Our study strongly suggests that these features have to be taken into consideration during the construction of nextlevel language-agnostic LM architectures, capable of handling morphologically complex languages such as Tamil or Korean.", |
| "pdf_parse": { |
| "paper_id": "D18-1029", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "A key challenge in cross-lingual NLP is developing general language-independent architectures that are equally applicable to any language. However, this ambition is largely hampered by the variation in structural and semantic properties, i.e. the typological profiles of the world's languages. In this work, we analyse the implications of this variation on the language modeling (LM) task. We present a largescale study of state-of-the art n-gram based and neural language models on 50 typologically diverse languages covering a wide variety of morphological systems. Operating in the full vocabulary LM setup focused on wordlevel prediction, we demonstrate that a coarse typology of morphological systems is predictive of absolute LM performance. Moreover, fine-grained typological features such as exponence, flexivity, fusion, and inflectional synthesis are borne out to be responsible for the proliferation of low-frequency phenomena which are organically difficult to model by statistical architectures, or for the meaning ambiguity of character n-grams. Our study strongly suggests that these features have to be taken into consideration during the construction of nextlevel language-agnostic LM architectures, capable of handling morphologically complex languages such as Tamil or Korean.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Deep learning has allowed NLP algorithms to dispose of manually-crafted features, and to virtually achieve language independence. However, their performance still varies noticeably across languages due to different underlying data distributions (Bender, 2013; O'Horan et al., 2016) . Linguistic typology, the systematic comparison of the world's languages, holds promise to explain these idiosyncrasies and interpret statistical models in terms of variation in language structures (Ponti et al., 2017) . * Both authors equally contributed to this work.", |
| "cite_spans": [ |
| { |
| "start": 245, |
| "end": 259, |
| "text": "(Bender, 2013;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 260, |
| "end": 281, |
| "text": "O'Horan et al., 2016)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 481, |
| "end": 501, |
| "text": "(Ponti et al., 2017)", |
| "ref_id": "BIBREF52" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In order to evaluate how cross-lingual structural variation hinders the design of effective generalpurpose algorithms, we propose the task of language modeling (LM) as a testbed. In particular, we opt for a full-vocabulary setup where no word encountered at training time is treated as an unknown symbol, in order to a) ensure a fair comparison across languages with different word frequency rates and b) avoid setting an arbitrary threshold on vocabulary size (Cotterell et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 461, |
| "end": 485, |
| "text": "(Cotterell et al., 2018)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Although there has recently been a tendency towards expanding test language samples, the datasets considered in previous works (Botha and Blunsom, 2014; Vania and Lopez, 2017; Kawakami et al., 2017; Cotterell et al., 2018) are not entirely adequate yet to represent the typological variation and to ground cross-lingual generalisations empirically. Hence, we test several LM architectures (including n-gram, neural, and character-aware models) on a novel and wider set of 50 languages sampled according to stratification principles.", |
| "cite_spans": [ |
| { |
| "start": 127, |
| "end": 152, |
| "text": "(Botha and Blunsom, 2014;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 153, |
| "end": 175, |
| "text": "Vania and Lopez, 2017;", |
| "ref_id": "BIBREF57" |
| }, |
| { |
| "start": 176, |
| "end": 198, |
| "text": "Kawakami et al., 2017;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 199, |
| "end": 222, |
| "text": "Cotterell et al., 2018)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Through this large-scale multilingual analysis, we shed new light on the current limitations of standard LM models and offer support to further developments in multilingual NLP. In particular, we demonstrate that the previous fixedvocabulary assumption in fact ignores the limitations of language modeling for morphologically rich languages. Moreover, we find a strong correlation across the board between LM model performances and the type of morphological system adopted in each language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To motivate this correlation we show how finegrained typological properties interact with the frequency distribution (Zipf, 1949) by regulating word boundaries and the proliferation of word forms; and 2) with the mapping between morphemes (here intended as character n-grams) and meaning, by possibly blurring it.", |
| "cite_spans": [ |
| { |
| "start": 117, |
| "end": 129, |
| "text": "(Zipf, 1949)", |
| "ref_id": "BIBREF62" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The paper is organised as follows. After provid-ing a short overview of multilingual LM and its possible setups ( \u00a72), we describe the cross-lingual variation in morphological systems and propose a novel typologically diverse dataset for LM in \u00a73. We outline the data in \u00a74 and benchmarked language models in \u00a75. Finally, we discuss the results in light of linguistic typology in \u00a76.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A language model computes a probability distribution over sequences of word tokens, and is typically trained to maximise the likelihood of word input sequences. The LM objective is expressed as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual Language Modeling", |
| "sec_num": "2" |
| }, |
| { |
| "text": "P (w 1 , ...w n ) = i P (w i |w 1 , ...w i\u22121 ) (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual Language Modeling", |
| "sec_num": "2" |
| }, |
| { |
| "text": "w i is a word token with the index i in the sequence. LM is considered a central task in NLP and language understanding, with applications in speech recognition (Mikolov et al., 2010 ), text summarisation (Filippova et al., 2015; Rush et al., 2015) , and information retrieval (Ponte and Croft, 1998; Zamani and Croft, 2016) . The importance of language modeling has been accentuated even more in representation learning recently, where it is used as a novel form of unsupervised pre-training (and an alternative to static word embeddings) for the benefit of a variety of NLP applications (Peters et al., 2018; Howard and Ruder, 2018) . (Marcus et al., 1993) and the 1 Billion Word Benchmark (BWB) (Chelba et al., 2013) . Datasets extracted from BBC News (Greene and Cunningham, 2006) and IMDB Movie Reviews (Maas et al., 2011) are also used for LM evaluation in English (Wang and Cho, 2016; Miyamoto and Cho, 2016; Press and Wolf, 2017) . For multilingual LM evaluation, Botha and Blunsom (2014) extract datasets for Czech, French, Spanish, German, and Russian from the 2013 Workshop on Statistical Machine Translation (WMT) data (Bojar et al., 2013) . Kim et al. (2016) reuse these datasets and add Arabic. Ling et al. (2015) evaluate on English, Portuguese, Catalan, German and Turkish datasets extracted from Wikipedia. Kawakami et al. (2017) evaluate on 7 European languages using Wikipedia data, including Finnish.", |
| "cite_spans": [ |
| { |
| "start": 161, |
| "end": 182, |
| "text": "(Mikolov et al., 2010", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 205, |
| "end": 229, |
| "text": "(Filippova et al., 2015;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 230, |
| "end": 248, |
| "text": "Rush et al., 2015)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 277, |
| "end": 300, |
| "text": "(Ponte and Croft, 1998;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 301, |
| "end": 324, |
| "text": "Zamani and Croft, 2016)", |
| "ref_id": "BIBREF60" |
| }, |
| { |
| "start": 589, |
| "end": 610, |
| "text": "(Peters et al., 2018;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 611, |
| "end": 634, |
| "text": "Howard and Ruder, 2018)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 637, |
| "end": 658, |
| "text": "(Marcus et al., 1993)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 698, |
| "end": 719, |
| "text": "(Chelba et al., 2013)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 755, |
| "end": 784, |
| "text": "(Greene and Cunningham, 2006)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 808, |
| "end": 827, |
| "text": "(Maas et al., 2011)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 871, |
| "end": 891, |
| "text": "(Wang and Cho, 2016;", |
| "ref_id": "BIBREF59" |
| }, |
| { |
| "start": 892, |
| "end": 915, |
| "text": "Miyamoto and Cho, 2016;", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 916, |
| "end": 937, |
| "text": "Press and Wolf, 2017)", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 1131, |
| "end": 1151, |
| "text": "(Bojar et al., 2013)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1154, |
| "end": 1171, |
| "text": "Kim et al. (2016)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 1209, |
| "end": 1227, |
| "text": "Ling et al. (2015)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 1324, |
| "end": 1346, |
| "text": "Kawakami et al. (2017)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual Language Modeling", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To the best of our knowledge, the largest datasets used in previous work are from (M\u00fcller et al., 2012; Cotterell et al., 2018) and amount to 21 languages from the Europarl data (Koehn, 2005) . Despite the large coverage of languages, these sets are still restricted only to the languages of the European Union. On the other hand, the most typologically diverse dataset thus far was released by Vania and Lopez (2017) . It includes 10 languages representing some morphological systems.", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 103, |
| "text": "(M\u00fcller et al., 2012;", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 104, |
| "end": 127, |
| "text": "Cotterell et al., 2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 178, |
| "end": 191, |
| "text": "(Koehn, 2005)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 395, |
| "end": 417, |
| "text": "Vania and Lopez (2017)", |
| "ref_id": "BIBREF57" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual Language Modeling", |
| "sec_num": "2" |
| }, |
| { |
| "text": "This short survey of related work demonstrates a clear tendency towards extending LM evaluation to other languages, abandoning English-centric assumptions, and focusing on language-agnostic LM architectures. However, a comprehensive evaluation set that systematically covers a wide and balanced spectrum of typologically diverse languages is still missing. The novel dataset we discuss in this paper aims at bridging this gap (see \u00a74).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual Language Modeling", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Fixed vs. Full Vocabulary Setup. A majority of language models rely on the fixed-vocabulary assumption: they use a special symbol <UNK> that represents all words not present in the fixed vocabulary V , which are termed out-of-vocabulary (OOV). Selecting the set V typically slips under the radar, and can be seen as \"something of a black art\" despite its enormous impact on final LM performance (Cotterell et al., 2018) . 1 Standard LM setups either fix the vocabulary V to the top n most frequent words, typically with n = 10, 000 or n = 5, 000 (Mikolov et al., 2010; Ling et al., 2015; Vania and Lopez, 2017; Lee et al., 2017, inter alia) , or include in V only words with a frequency below a certain threshold (typically 2 or 5) (Heafield et al., 2013) .", |
| "cite_spans": [ |
| { |
| "start": 395, |
| "end": 419, |
| "text": "(Cotterell et al., 2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 546, |
| "end": 568, |
| "text": "(Mikolov et al., 2010;", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 569, |
| "end": 587, |
| "text": "Ling et al., 2015;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 588, |
| "end": 610, |
| "text": "Vania and Lopez, 2017;", |
| "ref_id": "BIBREF57" |
| }, |
| { |
| "start": 611, |
| "end": 640, |
| "text": "Lee et al., 2017, inter alia)", |
| "ref_id": null |
| }, |
| { |
| "start": 732, |
| "end": 755, |
| "text": "(Heafield et al., 2013)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual Language Modeling", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The rationale behind fixing the set V is a) to make the language model more robust to handling OOVs and to effectively bypass the problem of unreliable word estimates for low-frequency and unseen words (by ignoring them), and b) to enable direct comparisons of absolute perplexity scores across different models. However, this posits a critical challenge as cross-linguistic evaluation becomes uneven. In fact, we witness a larger proportion of vocabulary words replaced by <UNK> in morphologically rich languages because of their higher OOV rates (see Table 3 ). What is more, while the fixed-vocabulary assumption artificially FI Kreikkalaiset sijoittivat geometrian synnyn muinaiseen Egyptiin , jossa sit\u00e4 tarvittiin maanmittaukseen .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 553, |
| "end": 560, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Multilingual Language Modeling", |
| "sec_num": "2" |
| }, |
| { |
| "text": "<UNK> <UNK> <UNK> synnyn <UNK> Egyptiin , jossa sit\u00e4 tarvittiin <UNK> .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "FI (MIN-5)", |
| "sec_num": null |
| }, |
| { |
| "text": "<UNK> <UNK> <UNK> <UNK> <UNK> <UNK> , jossa sit\u00e4 <UNK> <UNK> . improves the perplexity measure, it actually makes the models less useful, especially in morphologically rich languages, as exemplified in Table 1 . Our goal is to get a clear picture on how different typological features and the corresponding corpus frequency distributions affect LM performance, without the influence of the unrealistic fixed-vocabulary assumption. Therefore, we work in the full-vocabulary LM setup (Adams et al., 2017; Grave et al., 2017) . This means that we explicitly decide to retain also infrequent words in the modeled data: V contains all words occurring at least once in the training set, only unseen words from test data are treated as OOVs. We believe that this setup leads to an evaluation that pinpoints the crucial limitations of standard LM architectures. 2 Why Not Open Vocabulary Setup? Recent neural LM architectures have also focused on handling large vocabularies and unseen words using character-aware modeling (Luong and Manning, 2016; Jozefowicz et al., 2016; Kawakami et al., 2017, inter alia) . This setup is commonly referred to as the open-vocabulary setup. However, two distinct approaches with crucial modeling differences are referred to by the same term in the literature. a) Word-level generation constructs word vectors for arbitrary words from constituent subword-level components, but word-level prediction is still evaluated based on the fixed-vocabulary assumption. b) Character-level generation predicts characters instead of words.", |
| "cite_spans": [ |
| { |
| "start": 482, |
| "end": 502, |
| "text": "(Adams et al., 2017;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 503, |
| "end": 522, |
| "text": "Grave et al., 2017)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 1015, |
| "end": 1040, |
| "text": "(Luong and Manning, 2016;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 1041, |
| "end": 1065, |
| "text": "Jozefowicz et al., 2016;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 1066, |
| "end": 1100, |
| "text": "Kawakami et al., 2017, inter alia)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 202, |
| "end": 209, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "FI (10K)", |
| "sec_num": null |
| }, |
| { |
| "text": "KO \u1100 \u1173 \u1103 \u1171 \u1112 \u1161 \u11ab\u1109 \u1175 \u1107 \u1162 \u11a8\u110b \u1175 \u11af\u110c \u1161 \u11bc\u110b \u1166\u1109 \u1165 \u110c \u1161 \u11bc\u110b \u116f \u11ab\u1112 \u1161\u110b \u1167 \u1109 \u1175 \u11ab\u1103 \u1169 \u11bc\u110b \u1173\u1105 \u1169 \u110b \u1161 \u11af\u1105 \u1167\u110c \u1167 \u11bb\u1103 \u1161. \u1100 \u1173\u1105 \u1165\u1102 \u1161 \u1100 \u1173\u110b \u1174 \u110c \u1175 \u11b8\u110b \u1173 \u11ab \u110c \u1175\u1103 \u1169 \u11a8\u1112 \u1161\u1100 \u1166 \u1100 \u1161\u1102 \u1161 \u11ab\u1112 \u1162 \u11bb\u1103 \u1161 . KO (MIN-5) \u1100 \u1173 \u1103 \u1171 <UNK> <UNK> <UNK> <UNK> \u110b \u1161 \u11af\u1105 \u1167\u110c \u1167 \u11bb\u1103 \u1161 . \u1100 \u1173\u1105 \u1165\u1102 \u1161 \u1100 \u1173\u110b \u1174 \u110c \u1175 \u11b8\u110b \u1173 \u11ab <UNK> <UNK> . KO (10K) \u1100 \u1173 \u1103 \u1171 <UNK> <UNK> <UNK> <UNK> \u110b \u1161 \u11af\u1105 \u1167\u110c \u1167 \u11bb\u1103 \u1161 . \u1100 \u1173\u1105 \u1165\u1102 \u1161 \u1100 \u1173\u110b \u1174 <UNK> <UNK> <UNK> .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "FI (10K)", |
| "sec_num": null |
| }, |
| { |
| "text": "Given that character-level prediction and wordlevel prediction operate on entirely different sets of symbols, their performance is hardly comparable. Still, Jozefowicz et al. (2016) report that, in a hybrid setup which evaluates character-level prediction based on word-level perplexity with the 2 For instance, as discussed later in \u00a73 and validated empirically in \u00a76, the vocabularies of morphologically rich languages are inherently larger: it is simply more difficult to learn and make LM predictions in such languages.", |
| "cite_spans": [ |
| { |
| "start": 157, |
| "end": 181, |
| "text": "Jozefowicz et al. (2016)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "FI (10K)", |
| "sec_num": null |
| }, |
| { |
| "text": "Fusion Exponence Flexivity Synthesis Isolating low 1:1 1:1 low Fusional mid many:1 1:many mid Introflexive high many:1 1:many mid Agglutinative mid 1:1 1:1 high Table 2 : Traditional morphological types described in terms of selected features from WALS.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 161, |
| "end": 168, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Type", |
| "sec_num": null |
| }, |
| { |
| "text": "fixed-vocabulary assumption, current state-of-theart word-level prediction models (i.e., the ones we discuss in \u00a75) still significantly outperform such hybrid character-level prediction approaches. Therefore, we operate in the full-vocabulary setup.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Type", |
| "sec_num": null |
| }, |
| { |
| "text": "Aiming for a comprehensive multilingual LM evaluation in this study, we survey all possible types of morphological systems (Haspelmath and Sims, 2013) , which possibly lead to different performances. Traditionally, languages have been grouped into the four main categories: isolating, fusional, introflexive and agglutinative, based on their position along a spectrum measuring the preference on breaking up concepts in many words (on one extreme) or rather compose them into single words (on the other extreme).", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 150, |
| "text": "(Haspelmath and Sims, 2013)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Typology of Morphological Systems", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The mono-dimensionality of this spectrum has recently been challenged as languages exhibit a multitude of morphological features that do not covary across languages (Plank, 2017; . The typological database WALS (Dryer and Haspelmath, 2013) documents several of them that are relevant for LM: inflectional synthesis, fusion, exponence, and flexivity. Note that the prototypes of traditional categories can be approximated in terms of these features, as shown in Table 2 , although more combinations are possible.", |
| "cite_spans": [ |
| { |
| "start": 165, |
| "end": 178, |
| "text": "(Plank, 2017;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 211, |
| "end": 239, |
| "text": "(Dryer and Haspelmath, 2013)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 461, |
| "end": 468, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Typology of Morphological Systems", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Languages specify different subsets of grammatical categories (such as tense for verbs, or num-ber for nouns), and for each category different values are available in each language: for instance, Finnish has less tense values (it lacks a future), whereas Slovene has more number values (including a dual) compared to English. The feature inflectional synthesis for verbs (Bickel and Nichols, 2013) measures how many categories appear on the maximally inflected verb per language. More available categories enlarge the vocabulary (and consequently the OOV rate) with forms instantiating all possible combinations of their values.", |
| "cite_spans": [ |
| { |
| "start": 371, |
| "end": 397, |
| "text": "(Bickel and Nichols, 2013)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Typology of Morphological Systems", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Another crucial aspect is how the available grammatical categories are expressed, which can be described by fusion, exponence, and flexivity. Fusion measures the degree of connectedness between a grammatical marker to another word. The marker can be (from lower to higher fusion) a separate word, a clitic, an affix, or can affect the form of the root itself (e.g. an umlaut or a tone).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Typology of Morphological Systems", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Exponence measures the number of categories (e.g., tense, number) a single morpheme tends to convey. Exponence is separative if one grammatical category is conveyed by one morpheme (1:1), and cumulative if multiple categories are grouped into one morpheme (many:1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Typology of Morphological Systems", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Flexivity indicates the possibility that the value of a grammatical category be mapped into different morphological forms (1:many). In other terms, lemmas belonging to the same part-of-speech are divided into inflectional classes (such as declension classes for nouns or conjugation classes for verbs), each characterised by a different paradigm, that is, a different set of value-to-form mappings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Typology of Morphological Systems", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The three last features are illustrated by the examples Ex. (2)-Ex. (5), all uttering the sentence \"I will guard the doors and I will not open (them)\". 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Typology of Morphological Systems", |
| "sec_num": "3" |
| }, |
| { |
| "text": "(2) t\u00f4i In particular, consider how tense and person are expressed on verbs. Vietnamese in Ex. (2) puts two particles t\u00f4i and s\u1ebd before the verb, which are distinct (separate exponence), autonomous from the root (no fusion), and fixed (absence of flexivity). Turkish in Ex. (3) attaches suffixes: -acakfor tense and -\u0131m for person. These are distinct (separate exponence), joined to the roots (concatenative fusion), and (phonologically determined variants of) the same morpheme (1:1 flexivity). Italian in Ex. (4) uses affixes -er\u00f2 and -ir\u00f2: they are concatenated to the root with respect to fusion, convey both tense and person (cumulative exponence), and are dissimilar (presence of flexivity). Finally, in Ex. 5for Hebrew the consonant pattern of the verb \u0161-m-r is interdigitated by the vowel -ofor tense, and preceded by a prefix 'efor person. The first phenomenon alters the root itself (introflexive fusion), is distinct from the second (separate exponence), and changes its realisation based on the verb's lemma (presence of flexivity).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Typology of Morphological Systems", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The above evidence strongly motivates us, as well as recent previous work (Vania and Lopez, 2017; Kawakami et al., 2017; Cotterell et al., 2018) , to approach LM with models that are aware of the inner structure of their input words, and to benchmark these modeling choices on a typologically diverse range of languages, as shown in \u00a74.", |
| "cite_spans": [ |
| { |
| "start": 74, |
| "end": 97, |
| "text": "(Vania and Lopez, 2017;", |
| "ref_id": "BIBREF57" |
| }, |
| { |
| "start": 98, |
| "end": 120, |
| "text": "Kawakami et al., 2017;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 121, |
| "end": 144, |
| "text": "Cotterell et al., 2018)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Typology of Morphological Systems", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Selection of Languages. Our selection of test languages is guided by the following goals: a) we have to ensure the coverage of typological properties from \u00a73, and b) we want to analyse a large set of languages which extends and surpasses other work in the LM literature (see \u00a72).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Since cross-lingual NLP aims at modeling extant languages rather than possible languages (including, e.g., extinct ones), creating a balanced sample is challenging. In fact, attested languages, intended as a random variable, are extremely sparse and not independent-and-identically-distributed (Cotterell and Eisner, 2017) . First, available and reliable data exist only for a fraction of the world's languages. Second, these data are biased because their features may not stem from the underlying distribution, i.e., from what is naturally possible/frequent, but rather can be inherited by genealogical relatedness or borrowed by areal proximity (Bakker, 2010) . To mitigate these biases, theoretical works resorted to stratification approaches, where each subgroup of related languages is sampled independently. maximizing their diversity (Dryer, 1989, inter alia) . We perform our selection in the same spirit.", |
| "cite_spans": [ |
| { |
| "start": 294, |
| "end": 322, |
| "text": "(Cotterell and Eisner, 2017)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 647, |
| "end": 661, |
| "text": "(Bakker, 2010)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 841, |
| "end": 866, |
| "text": "(Dryer, 1989, inter alia)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We start from the Polyglot Wikipedia (PW) project (Al-Rfou et al., 2013) which provides cleaned and tokenised Wikipedia data in 40 languages. However, the majority of the PW languages are similar from the perspective of genealogy (26/40 are Indo-European), geography (28/40 are Western European), and typology (26/40 are fusional). Consequently, the PW set is not a representative sample of the world's languages.", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 72, |
| "text": "(Al-Rfou et al., 2013)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To amend this limitation, we source additional languages with the data coming from the same domain, Wikipedia, considering candidates in descending order of corpus size cleaned and preprocessed by the Polyglot tokeniser (Al-Rfou et al., 2013) . Since fusional languages are already represented in the PW, we add new languages from other morphological types: isolating (Min Nan, Burmese, Khmer), agglutinative (Basque, Georgian, Kannada, Tamil, Mongolian, Javanese), and introflexive languages (Amharic).", |
| "cite_spans": [ |
| { |
| "start": 220, |
| "end": 242, |
| "text": "(Al-Rfou et al., 2013)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Partition. We construct datasets for all 50 languages by extracting the first 40K sentences for each language, and split them into train (34K), validation (3K), and test (3K). This choice has been motivated by the following observations: a) we require similarly-sized datasets from the same domain for all languages; b) the size of the datasets has to be similar to the standard English PTB dataset (Marcus et al., 1993) which has been utilised to guide LM development in English for more than 20 years. The final list of 50 languages along with their language codes (ISO 639-1), morphological type (i.e., isolating, fusional, introflexive, agglutinative), and corpus statistics is provided in Table 3 .", |
| "cite_spans": [ |
| { |
| "start": 399, |
| "end": 420, |
| "text": "(Marcus et al., 1993)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 694, |
| "end": 701, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Benchmarked Language Models. The availability of LM evaluation sets in a large number of diverse languages, as described in \u00a74, gives an opportunity to conduct a full-fledged multilingual analysis of representative LM architectures for word-level prediction. First, we evaluate a stateof-the-art model from the n-gram family of models (Goodman, 2001 ) from the KenLM package. 4 It is based on 5-grams with extended Kneser-Ney smoothing (Kneser and Ney, 1995; Heafield et al., 2013) . We refer to this model as KN5.", |
| "cite_spans": [ |
| { |
| "start": 335, |
| "end": 349, |
| "text": "(Goodman, 2001", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 376, |
| "end": 377, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 436, |
| "end": 458, |
| "text": "(Kneser and Ney, 1995;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 459, |
| "end": 481, |
| "text": "Heafield et al., 2013)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models and Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Modern LM architectures are almost exclusively based on recurrent neural networks (RNNs), and especially on Long-Short-Term Memory networks (LSTMs). (Mikolov et al., 2010; Sundermeyer et al., 2015; Chen et al., 2016, inter alia) . They map a sequence of input words to embedding vectors using a look-up matrix and then perform word-level prediction by passing the vectors to the LSTM.", |
| "cite_spans": [ |
| { |
| "start": 149, |
| "end": 171, |
| "text": "(Mikolov et al., 2010;", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 172, |
| "end": 197, |
| "text": "Sundermeyer et al., 2015;", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 198, |
| "end": 228, |
| "text": "Chen et al., 2016, inter alia)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models and Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Finally, we also evaluate a character-aware variant of the neural LSTM LM architecture. We use the Char-CNN-LSTM model (Kim et al., 2016) due to its public availability and strong performance in several languages. In this model, each character is embedded and passed through a convolutional neural network with max-over-time pooling (LeCun et al., 1989) , followed by a highway network transformation (Srivastava et al., 2015) to build word representations from their constituent characters. By resorting to character-level information, the model is able to provide better parameter estimates for lower-frequency words, which is particularly important for morphologically rich languages. The CNN-based word representations are then processed in a sequence by a regular LSTM network to obtain word-level predictions.", |
| "cite_spans": [ |
| { |
| "start": 119, |
| "end": 137, |
| "text": "(Kim et al., 2016)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 325, |
| "end": 353, |
| "text": "pooling (LeCun et al., 1989)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models and Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Evaluation Setup. We report perplexity scores (Jurafsky and Martin, 2017, chapter 4.2.1) using the full vocabulary for each respective LM dataset. This means that we explicitly decide to retain also infrequent words in the data and analyse the difficulty of modeling such words in morphologically rich languages (see \u00a72 for the discussion).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models and Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In the full-vocabulary setup, the set V comprises all words occurring at least once in the training set. Unseen test words are mapped to one <UNK> vector, sampled from the the space of trained word vectors relying on a normal distribution and the same fixed random seed for all models. On the other hand, KN5 by design has a slightly different way of handling unseen test words: they are regarded as outliers and assigned low-probability estimates.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models and Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Training Setup and Parameters. For LSTM and Char-CNN-LSTM language models, we reproduce the standard LM setup of Zaremba et al. (2015) and parameter choices of Kim et al. (2016) . Batch size is 20 and a sequence length is 35, where one step corresponds to one word token. The maximum word length is chosen dynamically based on the longest word in the corpus. The corpus is processed continuously; the RNN hidden states reset at the beginning of each epoch. Parameters are optimised with SGD, and the gradient is averaged over the batch size and sequence length. We then scale the averaged gradient by the sequence length (=35) and clip to 5.0 for more stable training. The learning rate is 1.0, decayed by 0.5 after each epoch if the validation perplexity does not improve. All models are trained for 15 epochs, which is typically sufficient for model convergence. Finally, KN5 is trained relying on the suggested parameters from the KenLM package.", |
| "cite_spans": [ |
| { |
| "start": 113, |
| "end": 134, |
| "text": "Zaremba et al. (2015)", |
| "ref_id": "BIBREF61" |
| }, |
| { |
| "start": 160, |
| "end": 177, |
| "text": "Kim et al. (2016)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models and Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this section, we present our main empirical findings on the connection between LM performance and corpus statistics emerging from different typological profiles (see \u00a73). Before proceeding, we stress that the absolute perplexity scores across different languages are not directly comparable, but their values provide evidence on the difficulty and limitations of language modeling in each language, considering the fact that all language models were trained on similarly-sized datasets. The results for all three benchmarked language models on all 50 languages are summarised in Table 3 . Table 3 reveals that the Char-CNN-LSTM model is the best-performing model overall. We report the best results with that model for 48/50 languages and across all traditional morphological types. Gains over the simpler recurrent LM architecture (i.e., the LSTM model) are present for all 50/50 languages. In short, this means that character-level information on the input side of neural architectures, in addition to leading to fewer parameters, is universally beneficial for the final performance of word-level prediction, as also suggested by Kim et al. (2016) on a much smaller set of languages. By relying on character-level knowledge, Char-CNN-LSTM model provides better estimates for lower-frequency words.", |
| "cite_spans": [ |
| { |
| "start": 1135, |
| "end": 1152, |
| "text": "Kim et al. (2016)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 582, |
| "end": 589, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 592, |
| "end": 599, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Moreover, the results show that KN5 is a competitive baseline for several languages (e.g., Kannada, Thai, Amharic) . This further highlights the importance of testing models on a typologically diverse set of languages: despite the clear superiority of neural LM architectures such as Char-CNN-LSTM in a large number of languages, the results and the marked outliers still suggest that there is currently no \"one-size-fits-all\" model.", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 114, |
| "text": "Kannada, Thai, Amharic)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "In general, large perplexity scores for certain languages (e.g., agglutinative languages such as Finnish, Korean, Tamil, or introflexive languages), especially when compared to performance on English on a similarly-sized dataset, clearly point at the limitations of all the \"language-agnostic\" LM architectures. As suggested by Jozefowicz et al. (2016) , LM performance in English can be boosted by simply collecting more data and working with large vocabularies (e.g., reducing the number of relevant OOVs). However, this solution is certainly not applicable to a majority of the world's languages (Bird, 2011; Gandhe et al., 2014; Adams et al., 2017) , see later in \u00a76: Further Discussion.", |
| "cite_spans": [ |
| { |
| "start": 328, |
| "end": 352, |
| "text": "Jozefowicz et al. (2016)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 599, |
| "end": 611, |
| "text": "(Bird, 2011;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 612, |
| "end": 632, |
| "text": "Gandhe et al., 2014;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 633, |
| "end": 652, |
| "text": "Adams et al., 2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "Frequency Analysis and Traditional Morphological Types. We now analyse all languages in our collection according to word-level frequency properties also listed in Table 3 for all 50 languages. We report: 1) the vocabulary size (i.e., the total number of vocabulary words in each training dataset); 2) the total number of test words not occurring in the corresponding training data; 3) the total number of tokens in both training and test data; and finally 4) type-to-token ratios (TTR) in training data. We also plot absolute perplexity scores of Char-CNN-LSTM (Kim et al., 2016) , the bestperforming model overall (see \u00a76), in relation to TTR ratios in Figure 1 . In isolating and some fusional languages (e.g., Vietnamese, Thai, English) the TTR tends to be small: we have a comparatively low number of infrequent words. Agglutinative languages such as Finnish, Estonian, and Korean are on the other side of the spectrum. Introflexive and fusional languages, typically over-represented in prior work (see the discussion in \u00a73), are found in the middle.", |
| "cite_spans": [ |
| { |
| "start": 561, |
| "end": 579, |
| "text": "(Kim et al., 2016)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 163, |
| "end": 170, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 654, |
| "end": 662, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "This emerges clearly in Figure 1 , grouping isolating languages to the left side of the x-axis, followed by fusional languages (Germanic and Romance first to the left, and then Balto-Slavic to the right), and placing agglutinative languages towards the far right. Crucially, TTR is an excellent predictor of LM performance. To measure the correlation between this corpus statistics variable and absolute LM performance, we compute their Pearson's r correlation. We find a strong positive correlation, with a value of r = 0.83 and significance p < 0.001.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 24, |
| "end": 32, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "We do observe a strong link between each language's morphological type, and the corresponding perplexity score. A transition in terms of the spectrum of morphological systems (see \u00a73) can be traced again on the y-axis of Figure 1 , roughly following the reported LM performance: from isolating, over fusional and introflexive to agglutinative languages. In fact, a correlation exists also between traditional morphological types and LM performance. We assessed its strength with the oneway ANOVA statistical test, obtaining a value of \u03b7 2 = 0.37 and a significance of p < 0.001.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 221, |
| "end": 229, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "Finally, it should be noted that the choice of TTP over other corpus statistics such as vocabulary size is motivated by the fact that the corpora are comparable, and not parallel. Because of this, the variation of V may stem from the contents rather than the intrinsic linguistic properties. As a counter-check, the correlation between V and LM performance is in fact milder, with r = 0.64. Yet, notwithstanding the stronger correlation, TTP is unable to explain the results entirely. Only through finer-grained typological features it becomes possible to justify several outliers, as shown in the next subsection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "Fine-Grained Typological Analysis. Among the relevant typological features (see \u00a73 and Table 2 ), fusion and inflectional synthesis have the largest impact on word-level predictions. In fact, the former determines the word boundaries, whereas the latter regulates the amount of possible morpheme combinations. Consider their effect on the frequency distribution of words, expressed as follows (Zipf, 1949) :", |
| "cite_spans": [ |
| { |
| "start": 394, |
| "end": 406, |
| "text": "(Zipf, 1949)", |
| "ref_id": "BIBREF62" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 87, |
| "end": 95, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "f = 1 k s V n=1 1 n s (6)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "f is the frequency, k the rank, and s \u2265 0 the exponent characteristic of the distribution. If high, both typological features enlarge V and s, assigning less probability mass to each word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "Low fusion means a preference for separate words (as in isolating languages such as Viet-namese and Chinese), leading to a smaller vocabulary with less (but more frequent) words. This property, additionally boosted by low inflectional synthesis, facilitates statistical language modeling in isolating languages. Vice versa, high fusion results in preference for concatenation of morphemes or introflection, and consequently sparser vocabularies. Yet, this distinction cannot justify the figures by itself, as it equates agglutinative languages and traditional fusional languages. Here, inflectional synthesis is also at play. Through the statistical test of one-way ANOVA, we found a weak effect of \u03b7 2 = 0.09 for fusion and a medium effect of \u03b7 2 = 0.21 for inflection synthesis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "On the other hand, the fine-grained typological features of exponence and flexivity play a role in the ambiguity of the mapping between morphemes and meanings or grammatical functions. This turns out to be especially relevant for character-aware models. The intuition is that if the mapping is straightforward, injecting character information is more advantageous. To validate this claim, we evaluate the ANOVA between exponence of nouns and verbs and the difference in perplexity between LSTM and Char-CNN-LSTM. 5 We report a weak, although existent, correlation with value \u03b7 2 = 0.07 and \u03b7 2 = 0.04, respectively.", |
| "cite_spans": [ |
| { |
| "start": 513, |
| "end": 514, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "Further Discussion. Importantly, our largescale multilingual LM study strongly indicates that due to diverse typological profiles, certain languages and language groups are inherently more complex to language-model when relying on established statistical models, even when such models are constructed as widely applicable and (arguably) language-agnostic. This finding supports preliminary results from prior work (Botha and Blunsom, 2014; Adams et al., 2017; Cotterell et al., 2018) , and is also backed by insights from linguistic theory on variance of language complexity in general and variance of morphological complexity in specific (McWhorter, 2001; Evans and Levinson, 2009) . More broadly and along the same line, earlier research in statistical machine translation (SMT) has also shown that typological factors such as the amount of reordering, the morphological complexity, as well as genealogical relatedness of languages are crucial in predicting success in SMT (Birch et al., 2008; Paul et al., 2009; Daiber, 2018) .", |
| "cite_spans": [ |
| { |
| "start": 414, |
| "end": 439, |
| "text": "(Botha and Blunsom, 2014;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 440, |
| "end": 459, |
| "text": "Adams et al., 2017;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 460, |
| "end": 483, |
| "text": "Cotterell et al., 2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 639, |
| "end": 656, |
| "text": "(McWhorter, 2001;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 657, |
| "end": 682, |
| "text": "Evans and Levinson, 2009)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 975, |
| "end": 995, |
| "text": "(Birch et al., 2008;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 996, |
| "end": 1014, |
| "text": "Paul et al., 2009;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 1015, |
| "end": 1028, |
| "text": "Daiber, 2018)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "Our results indicate that the artificial fixed-vocabulary assumption from prior work produces overly optimistic perplexity scores, and its limitation is even more pronounced in morphologically rich languages, which inherently contain a large number of infrequent words due to their productive morphological systems. The typical solution to collect more data (Jozefowicz et al., 2016; Kawakami et al., 2017) mitigates this effect to a certain extent, but stills suffers from the Zipfian hypothesis (1949), and it cannot be guaranteed for resource-poor languages where obtaining sufficient monolingual data is also a challenge (Adams et al., 2017) . Therefore, another solution is to resort to other sources of information which are not purely contextual/distributional. For instance, a promising line of current and future research is to (learn to) exploit subword-level patterns captured in an unsupervised manner (Pinter et al., 2017; Herbelot and Baroni, 2017) or integrate existing morphological generation and inflection tools and regularities (Cotterell et al., 2015; Bergmanis et al., 2017) into language models to reduce data sparsity, and improve language modeling for morphologically rich languages. For instance, a recent enhancement of the Char-CNN-LSTM language model that enforces similarity between parameters of morphologically related words leads to large perplexity gains across a large number of languages, with the most prominent gains reported for morphologically complex languages (Gerz et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 358, |
| "end": 383, |
| "text": "(Jozefowicz et al., 2016;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 384, |
| "end": 406, |
| "text": "Kawakami et al., 2017)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 625, |
| "end": 645, |
| "text": "(Adams et al., 2017)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 914, |
| "end": 935, |
| "text": "(Pinter et al., 2017;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 936, |
| "end": 962, |
| "text": "Herbelot and Baroni, 2017)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 1048, |
| "end": 1072, |
| "text": "(Cotterell et al., 2015;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1073, |
| "end": 1096, |
| "text": "Bergmanis et al., 2017)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1502, |
| "end": 1521, |
| "text": "(Gerz et al., 2018)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "Given the recent success and improved performance with LM-based pre-training methodology (Peters et al., 2018; Howard and Ruder, 2018) across a wide variety of syntactic and semantic NLP tasks in English, improving language models for other languages might have far-reaching consequences for multilingual NLP in general. Typological information coded in typological databases offer invaluable support to language modeling (e.g., knowledge on word ordering, morphological regularities), but such typologicallyinformed LM architectures are still non-existent.", |
| "cite_spans": [ |
| { |
| "start": 89, |
| "end": 110, |
| "text": "(Peters et al., 2018;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 111, |
| "end": 134, |
| "text": "Howard and Ruder, 2018)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of Language Models. A quick inspection of the results from", |
| "sec_num": null |
| }, |
| { |
| "text": "In this paper, we have run a large-scale study on Language Modeling (LM) across several architectures and a collection of 50 typologically diverse languages. We have demonstrated that typological properties of languages, such as their morphological systems, have an enormous impact on the performance of allegedly \"language-agnostic\" models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We have found that the corpus statistics most predictive of LM performance is type-to-token ratio (TTR), as demonstrated by their strong Pearson's correlation. In turn, the value of TTR is motivated by fine-grained typological features that define the type of morphological system within a language. In fact, such features affect the word boundaries and the number of morphemes per word, affecting the word frequency distribution for each language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We have also observed that injecting character information into word representations is always beneficial because this mitigates the above-mentioned sparsity issues. However, the extent of the gain in perplexity partly depends on some typological properties that regulate the ambiguity of the mapping between morphemes (here modeled as character n-grams) and their meaning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We hope that NLP/LM practitioners will find the datasets for 50 languages put forth in this work along with benchmarked LMs useful for future developments in (language-agnostic as well as typologically-informed) multilingual language modeling. This study calls for next-generation solutions that will additionally leverage typological knowledge for improved language modeling. Code and data are available at: http://people.ds. cam.ac.uk/dsg40/lmmrl.html.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "For instance,Vania and Lopez (2017) report perplexity scores of \u224820 for Finnish when V is fixed to the 5k most frequent words. The same model in the full-vocabulary setup obtains perplexity scores of \u22482,000.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/kpu/kenlm", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Unfortunately no values are available in WALS for the feature of flexivity besides a limited domain.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work is supported by the ERC Consolidator Grant LEXICAL (no 648909). The authors would like to thank the anonymous reviewers for their helpful suggestions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Cross-lingual word embeddings for low-resource language modeling", |
| "authors": [ |
| { |
| "first": "Oliver", |
| "middle": [], |
| "last": "Adams", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Makarucha", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bird", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "937--947", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oliver Adams, Adam Makarucha, Graham Neubig, Steven Bird, and Trevor Cohn. 2017. Cross-lingual word embeddings for low-resource language model- ing. In Proceedings of EACL, pages 937-947.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Polyglot: Distributed word representations for multilingual NLP", |
| "authors": [ |
| { |
| "first": "Rami", |
| "middle": [], |
| "last": "Al-Rfou", |
| "suffix": "" |
| }, |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Perozzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Skiena", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "183--192", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rami Al-Rfou, Bryan Perozzi, and Steven Skiena. 2013. Polyglot: Distributed word representations for multilingual NLP. In Proceedings of CoNLL, pages 183-192.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Language sampling", |
| "authors": [ |
| { |
| "first": "Dik", |
| "middle": [], |
| "last": "Bakker", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "The Oxford handbook of linguistic typology", |
| "volume": "", |
| "issue": "", |
| "pages": "100--127", |
| "other_ids": { |
| "DOI": [ |
| "http://www.oxfordhandbooks.com/view/10.1093/oxfordhb/9780199281251.001.0001/oxfordhb-9780199281251-e-007?print=pdf" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dik Bakker. 2010. Language sampling. In The Oxford handbook of linguistic typology, pages 100-127. Ox- ford University Press.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Linguistic fundamentals for natural language processing: 100 essentials from morphology and syntax", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [ |
| "M" |
| ], |
| "last": "Bender", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "https://www.morganclaypool.com/doi/abs/10.2200/S00493ED1V01Y201303HLT020?journalCode=hlt" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily M. Bender. 2013. Linguistic fundamentals for natural language processing: 100 essentials from morphology and syntax. Morgan & Claypool Pub- lishers.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Training data augmentation for low-resource morphological inflection", |
| "authors": [ |
| { |
| "first": "Toms", |
| "middle": [], |
| "last": "Bergmanis", |
| "suffix": "" |
| }, |
| { |
| "first": "Katharina", |
| "middle": [], |
| "last": "Kann", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "31--39", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Toms Bergmanis, Katharina Kann, Hinrich Sch\u00fctze, and Sharon Goldwater. 2017. Training data augmen- tation for low-resource morphological inflection. In Proceedings of CoNLL, pages 31-39.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Inflectional Synthesis of the Verb", |
| "authors": [ |
| { |
| "first": "Balthasar", |
| "middle": [], |
| "last": "Bickel", |
| "suffix": "" |
| }, |
| { |
| "first": "Johanna", |
| "middle": [], |
| "last": "Nichols", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Balthasar Bickel and Johanna Nichols. 2013. Inflec- tional Synthesis of the Verb. Max Planck Institute for Evolutionary Anthropology, Leipzig.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Predicting success in machine translation", |
| "authors": [ |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| }, |
| { |
| "first": "Miles", |
| "middle": [], |
| "last": "Osborne", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "745--754", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexandra Birch, Miles Osborne, and Philipp Koehn. 2008. Predicting success in machine translation. In Proceedings of EMNLP, pages 745-754.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Bootstrapping the language archive: New prospects for natural language processing in preserving linguistic heritage", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bird", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Linguistic Issues in Language Technology", |
| "volume": "6", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Bird. 2011. Bootstrapping the language archive: New prospects for natural language processing in preserving linguistic heritage. Linguistic Issues in Language Technology, 6(4).", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Findings of the 2013 Workshop on Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Buck", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Federmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Soricut", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 8th Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "1--44", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ond\u0159ej Bojar, Christian Buck, Chris Callison-Burch, Christian Federmann, Barry Haddow, Philipp Koehn, Christof Monz, Matt Post, Radu Soricut, and Lucia Specia. 2013. Findings of the 2013 Workshop on Statistical Machine Translation. In Proceedings of the 8th Workshop on Statistical Machine Translation, pages 1-44.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Compositional morphology for word representations and language modelling", |
| "authors": [ |
| { |
| "first": "Jan", |
| "middle": [ |
| "A" |
| ], |
| "last": "Botha", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "1899--1907", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jan A. Botha and Phil Blunsom. 2014. Compositional morphology for word representations and language modelling. In Proceedings of ICML, pages 1899- 1907.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "One billion word benchmark for measuring progress in statistical language modeling", |
| "authors": [ |
| { |
| "first": "Ciprian", |
| "middle": [], |
| "last": "Chelba", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Ge", |
| "suffix": "" |
| }, |
| { |
| "first": "Thorsten", |
| "middle": [], |
| "last": "Brants", |
| "suffix": "" |
| }, |
| { |
| "first": "Phillipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of IN-TERPSEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "2635--2639", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ciprian Chelba, Tomas Mikolov, Mike Schuster, Qi Ge, Thorsten Brants, and Phillipp Koehn. 2013. One bil- lion word benchmark for measuring progress in sta- tistical language modeling. In Proceedings of IN- TERPSEECH, pages 2635-2639.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "CUED-RNNLM: An open-source toolkit for efficient training and evaluation of recurrent neural network language models", |
| "authors": [ |
| { |
| "first": "Xie", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xunying", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanmin", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip C", |
| "middle": [], |
| "last": "Gales", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Woodland", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "6000--6004", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xie Chen, Xunying Liu, Yanmin Qian, MJF Gales, and Philip C Woodland. 2016. CUED-RNNLM: An open-source toolkit for efficient training and evalu- ation of recurrent neural network language models. In Proceedings of ICASSP, pages 6000 -6004.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Probabilistic typology: Deep generative models of vowel inventories", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1182--1192", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Cotterell and Jason Eisner. 2017. Probabilistic typology: Deep generative models of vowel invento- ries. In Proceedings of ACL, pages 1182-1192.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Are all languages equally hard to language-model?", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mielke", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Roark", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Cotterell, Sebastian J. Mielke, Jason Eisner, and Brian Roark. 2018. Are all languages equally hard to language-model? In Proceedings of NAACL-HLT.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Labeled morphological segmentation with semi-Markov models", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Fraser", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "164--174", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Cotterell, Thomas M\u00fcller, Alexander Fraser, and Hinrich Sch\u00fctze. 2015. Labeled morphological seg- mentation with semi-Markov models. In Proceed- ings of CoNLL, pages 164-174.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Typologically Robust Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Joachim", |
| "middle": [], |
| "last": "Daiber", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joachim Daiber. 2018. Typologically Robust Statisti- cal Machine Translation. Ph.D. thesis, University of Amsterdam.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Large linguistic areas and language sampling. Studies in Language. International Journal sponsored by the Foundation \"Foundations of Language", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dryer", |
| "suffix": "" |
| } |
| ], |
| "year": 1989, |
| "venue": "", |
| "volume": "13", |
| "issue": "", |
| "pages": "257--292", |
| "other_ids": { |
| "DOI": [ |
| "http://www.jbe-platform.com/content/journals/10.1075/sl.13.2.03dry" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew S Dryer. 1989. Large linguistic areas and lan- guage sampling. Studies in Language. International Journal sponsored by the Foundation \"Foundations of Language\", 13(2):257-292.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "WALS Online. Max Planck Institute for Evolutionary Anthropology", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [ |
| "S" |
| ], |
| "last": "Dryer", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Haspelmath", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew S. Dryer and Martin Haspelmath, editors. 2013. WALS Online. Max Planck Institute for Evo- lutionary Anthropology, Leipzig.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "The myth of language universals: Language diversity and its importance for cognitive science", |
| "authors": [ |
| { |
| "first": "Nicholas", |
| "middle": [], |
| "last": "Evans", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [ |
| "C" |
| ], |
| "last": "Levinson", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Behavioral and Brain Sciences", |
| "volume": "32", |
| "issue": "5", |
| "pages": "429--448", |
| "other_ids": { |
| "DOI": [ |
| "10.1017/S0140525X0999094X" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nicholas Evans and Stephen C. Levinson. 2009. The myth of language universals: Language diversity and its importance for cognitive science. Behavioral and Brain Sciences, 32(5):429-448.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Sentence compression by deletion with LSTMs", |
| "authors": [ |
| { |
| "first": "Katja", |
| "middle": [], |
| "last": "Filippova", |
| "suffix": "" |
| }, |
| { |
| "first": "Enrique", |
| "middle": [], |
| "last": "Alfonseca", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [ |
| "A" |
| ], |
| "last": "Colmenares", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "360--368", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katja Filippova, Enrique Alfonseca, Carlos A. Col- menares, Lukasz Kaiser, and Oriol Vinyals. 2015. Sentence compression by deletion with LSTMs. In Proceedings of EMNLP, pages 360-368.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Neural network language models for low resource languages", |
| "authors": [ |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Gandhe", |
| "suffix": "" |
| }, |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Metze", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Lane", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of INTERSPEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "2615--2619", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankur Gandhe, Florian Metze, and Ian Lane. 2014. Neural network language models for low resource languages. In Proceedings of INTERSPEECH, pages 2615-2619.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Language modeling for morphologically rich languages: Character-aware modeling for wordlevel prediction", |
| "authors": [ |
| { |
| "first": "Daniela", |
| "middle": [], |
| "last": "Gerz", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Edoardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Ponti", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Naradowsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the ACL", |
| "volume": "6", |
| "issue": "", |
| "pages": "451--465", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniela Gerz, Ivan Vuli\u0107, Edoardo Maria Ponti, Ja- son Naradowsky, Roi Reichart, and Anna Korho- nen. 2018. Language modeling for morphologically rich languages: Character-aware modeling for word- level prediction. Transactions of the ACL, 6:451- 465.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "A bit of progress in language modeling", |
| "authors": [ |
| { |
| "first": "Joshua", |
| "middle": [ |
| "T" |
| ], |
| "last": "Goodman", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Computer Speech & Language", |
| "volume": "15", |
| "issue": "4", |
| "pages": "403--434", |
| "other_ids": { |
| "DOI": [ |
| "10.1006/csla.2001.0174" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joshua T. Goodman. 2001. A bit of progress in lan- guage modeling. Computer Speech & Language, 15(4):403-434.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Unbounded cache model for online language modeling with open vocabulary", |
| "authors": [ |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Moustapha", |
| "middle": [], |
| "last": "Ciss\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "6044--6054", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edouard Grave, Moustapha Ciss\u00e9, and Armand Joulin. 2017. Unbounded cache model for online language modeling with open vocabulary. In Proceedings of NIPS, pages 6044-6054.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Practical solutions to the problem of diagonal dominance in kernel document clustering", |
| "authors": [ |
| { |
| "first": "Derek", |
| "middle": [], |
| "last": "Greene", |
| "suffix": "" |
| }, |
| { |
| "first": "Padraig", |
| "middle": [], |
| "last": "Cunningham", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "377--384", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Derek Greene and Padraig Cunningham. 2006. Practi- cal solutions to the problem of diagonal dominance in kernel document clustering. In Proceedings of ICML, pages 377-384.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Understanding morphology", |
| "authors": [ |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Haspelmath", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Sims", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin Haspelmath and Andrea Sims. 2013. Under- standing morphology.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Scalable modified Kneser-Ney language model estimation", |
| "authors": [ |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Heafield", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Pouzyrevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [ |
| "H" |
| ], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "690--696", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenneth Heafield, Ivan Pouzyrevsky, Jonathan H. Clark, and Philipp Koehn. 2013. Scalable modified Kneser-Ney language model estimation. In Proceed- ings of ACL, pages 690-696.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "High-risk learning: acquiring new word vectors from tiny data", |
| "authors": [ |
| { |
| "first": "Aurelie", |
| "middle": [], |
| "last": "Herbelot", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "304--309", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aurelie Herbelot and Marco Baroni. 2017. High-risk learning: acquiring new word vectors from tiny data. In Proceedings of EMNLP, pages 304-309.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Universal language model fine-tuning for text classification", |
| "authors": [ |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Howard", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "328--339", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. In Proceedings of ACL, pages 328-339.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Exploring the limits of language modeling", |
| "authors": [ |
| { |
| "first": "Rafal", |
| "middle": [], |
| "last": "Jozefowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rafal Jozefowicz, Oriol Vinyals, Mike Schuster, Noam Shazeer, and Yonghui Wu. 2016. Exploring the lim- its of language modeling. In Proceedings of ICML.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Speech and Language Processing", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "H" |
| ], |
| "last": "Martin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "3", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Jurafsky and James H. Martin. 2017. Speech and Language Processing, volume 3. Pearson.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Learning to create and reuse words in openvocabulary neural language modeling", |
| "authors": [ |
| { |
| "first": "Kazuya", |
| "middle": [], |
| "last": "Kawakami", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1492--1502", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kazuya Kawakami, Chris Dyer, and Phil Blunsom. 2017. Learning to create and reuse words in open- vocabulary neural language modeling. In Proceed- ings of ACL, pages 1492-1502.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Character-aware neural language models", |
| "authors": [ |
| { |
| "first": "Yoon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Yacine", |
| "middle": [], |
| "last": "Jernite", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Sontag", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "M" |
| ], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "2741--2749", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoon Kim, Yacine Jernite, David Sontag, and Alexan- der M. Rush. 2016. Character-aware neural lan- guage models. In Proceedings of AAAI, pages 2741- 2749.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Improved backing-off for M-gram language modeling", |
| "authors": [ |
| { |
| "first": "Reinhard", |
| "middle": [], |
| "last": "Kneser", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Proceedings of ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "181--184", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reinhard Kneser and Hermann Ney. 1995. Improved backing-off for M-gram language modeling. In Pro- ceedings of ICASSP, pages 181-184.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Europarl: A parallel corpus for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 10th Machine Translation Summit", |
| "volume": "", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn. 2005. Europarl: A parallel corpus for statistical machine translation. In Proceedings of the 10th Machine Translation Summit, pages 79-86.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Handwritten digit recognition with a back-propagation network", |
| "authors": [ |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Lecun", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernhard", |
| "middle": [ |
| "E" |
| ], |
| "last": "Boser", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "S" |
| ], |
| "last": "Denker", |
| "suffix": "" |
| }, |
| { |
| "first": "Donnie", |
| "middle": [], |
| "last": "Henderson", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [ |
| "E" |
| ], |
| "last": "Howard", |
| "suffix": "" |
| }, |
| { |
| "first": "Wayne", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hubbard", |
| "suffix": "" |
| }, |
| { |
| "first": "Lawrence", |
| "middle": [ |
| "D" |
| ], |
| "last": "Jackel", |
| "suffix": "" |
| } |
| ], |
| "year": 1989, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "396--404", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yann LeCun, Bernhard E. Boser, John S. Denker, Don- nie Henderson, Richard E. Howard, Wayne E. Hub- bard, and Lawrence D. Jackel. 1989. Handwritten digit recognition with a back-propagation network. In Proceedings of NIPS, pages 396-404.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Finding function in form: Compositional character models for open vocabulary word representation", |
| "authors": [ |
| { |
| "first": "Wang", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Tiago", |
| "middle": [], |
| "last": "Lu\u00eds", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu\u00eds", |
| "middle": [], |
| "last": "Marujo", |
| "suffix": "" |
| }, |
| { |
| "first": "Ram\u00f3n", |
| "middle": [], |
| "last": "Fern\u00e1ndez Astudillo", |
| "suffix": "" |
| }, |
| { |
| "first": "Silvio", |
| "middle": [], |
| "last": "Amir", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabel", |
| "middle": [], |
| "last": "Trancoso", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1520--1530", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang Ling, Tiago Lu\u00eds, Lu\u00eds Marujo, Ram\u00f3n Fern\u00e1n- dez Astudillo, Silvio Amir, Chris Dyer, Alan W. Black, and Isabel Trancoso. 2015. Finding function in form: Compositional character models for open vocabulary word representation. In Proceedings of EMNLP, pages 1520-1530.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Achieving open vocabulary neural machine translation with hybrid word-character models", |
| "authors": [ |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1054--1063", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minh-Thang Luong and Christopher D. Manning. 2016. Achieving open vocabulary neural machine transla- tion with hybrid word-character models. In Proceed- ings of ACL, pages 1054-1063.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Learning word vectors for sentiment analysis", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [ |
| "L" |
| ], |
| "last": "Maas", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "E" |
| ], |
| "last": "Daly", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "T" |
| ], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "142--150", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng, and Christopher Potts. 2011. Learning word vectors for sentiment analysis. In Proceedings of ACL, pages 142-150.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Building a large annotated corpus of English: The Penn Treebank", |
| "authors": [ |
| { |
| "first": "Mitchell", |
| "middle": [ |
| "P" |
| ], |
| "last": "Marcus", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [ |
| "Ann" |
| ], |
| "last": "Marcinkiewicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Beatrice", |
| "middle": [], |
| "last": "Santorini", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computational Linguistics", |
| "volume": "19", |
| "issue": "2", |
| "pages": "313--330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mitchell P. Marcus, Mary Ann Marcinkiewicz, and Beatrice Santorini. 1993. Building a large annotated corpus of English: The Penn Treebank. Computa- tional Linguistics, 19(2):313-330.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "The world's simplest grammars are Creole grammars", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Mcwhorter", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Linguistic Typology", |
| "volume": "5", |
| "issue": "2", |
| "pages": "125--66", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John McWhorter. 2001. The world's simplest gram- mars are Creole grammars. Linguistic Typology, 5(2):125-66.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Recurrent neural network based language model", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Karafi\u00e1t", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukas", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of INTERSPEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "1045--1048", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Martin Karafi\u00e1t, Lukas Burget, Jan Cernock\u1ef3, and Sanjeev Khudanpur. 2010. Recurrent neural network based language model. In Proceed- ings of INTERSPEECH, pages 1045-1048.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Gated word-character recurrent language model", |
| "authors": [ |
| { |
| "first": "Yasumasa", |
| "middle": [], |
| "last": "Miyamoto", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1992--1997", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yasumasa Miyamoto and Kyunghyun Cho. 2016. Gated word-character recurrent language model. In Proceedings of EMNLP, pages 1992-1997.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "A comparative investigation of morphological language modeling for the languages of the European Union", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| }, |
| { |
| "first": "Helmut", |
| "middle": [], |
| "last": "Schmid", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "386--395", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas M\u00fcller, Hinrich Sch\u00fctze, and Helmut Schmid. 2012. A comparative investigation of morphologi- cal language modeling for the languages of the Euro- pean Union. In Proceedings of NAACL-HLT, pages 386-395.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Survey on the use of typological information in natural language processing", |
| "authors": [ |
| { |
| "first": "O'", |
| "middle": [], |
| "last": "Helen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yevgeni", |
| "middle": [], |
| "last": "Horan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Berzak", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "1297--1308", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Helen O'Horan, Yevgeni Berzak, Ivan Vuli\u0107, Roi Re- ichart, and Anna Korhonen. 2016. Survey on the use of typological information in natural language pro- cessing. In Proceedings of COLING, pages 1297- 1308.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "On the importance of pivot language selection for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "Hirofumi", |
| "middle": [], |
| "last": "Yamamoto", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "" |
| }, |
| { |
| "first": "Satoshi", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "221--224", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Paul, Hirofumi Yamamoto, Eiichiro Sumita, and Satoshi Nakamura. 2009. On the importance of pivot language selection for statistical machine translation. In Proceedings of NAACL-HLT, pages 221-224.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of NAACL-HLT, pages 2227-2237.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Mimicking word embeddings using subword RNNs", |
| "authors": [ |
| { |
| "first": "Yuval", |
| "middle": [], |
| "last": "Pinter", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Guthrie", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Eisenstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "102--112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuval Pinter, Robert Guthrie, and Jacob Eisenstein. 2017. Mimicking word embeddings using subword RNNs. In Proceedings of EMNLP, pages 102-112.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Split morphology: How agglutination and flexion mix", |
| "authors": [ |
| { |
| "first": "Frans", |
| "middle": [], |
| "last": "Plank", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Linguistic Typology", |
| "volume": "21", |
| "issue": "", |
| "pages": "1--62", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Frans Plank. 2017. Split morphology: How agglu- tination and flexion mix. Linguistic Typology, 21(2017):1-62.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "A language modeling approach to information retrieval", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Jay", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "Bruce" |
| ], |
| "last": "Ponte", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Croft", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of SIGIR", |
| "volume": "", |
| "issue": "", |
| "pages": "275--281", |
| "other_ids": { |
| "DOI": [ |
| "http://doi.acm.org/10.1145/290941.291008" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jay M. Ponte and W. Bruce Croft. 1998. A language modeling approach to information retrieval. In Pro- ceedings of SIGIR, pages 275-281.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Modeling language variation and universals: A survey on typological linguistics for natural language processing", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Edoardo Maria Ponti", |
| "suffix": "" |
| }, |
| { |
| "first": "O'", |
| "middle": [], |
| "last": "Helen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yevgeni", |
| "middle": [], |
| "last": "Horan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Berzak", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Thierry", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Poibeau", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Shutova", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edoardo Maria Ponti, Helen O'Horan, Yevgeni Berzak, Ivan Vuli\u0107, Roi Reichart, Thierry Poibeau, Ekaterina Shutova, and Anna Korhonen. 2018. Modeling lan- guage variation and universals: A survey on typo- logical linguistics for natural language processing. CoRR, abs/1807.00914.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Decoding sentiment from distributed representations of sentences", |
| "authors": [ |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Edoardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Ponti", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of *SEM", |
| "volume": "", |
| "issue": "", |
| "pages": "22--32", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edoardo Maria Ponti, Ivan Vuli\u0107, and Anna Korhonen. 2017. Decoding sentiment from distributed repre- sentations of sentences. In Proceedings of *SEM, pages 22-32.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Using the output embedding to improve language models", |
| "authors": [ |
| { |
| "first": "Ofir", |
| "middle": [], |
| "last": "Press", |
| "suffix": "" |
| }, |
| { |
| "first": "Lior", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "157--163", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ofir Press and Lior Wolf. 2017. Using the output em- bedding to improve language models. In Proceed- ings of EACL, pages 157-163.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "A neural attention model for abstractive sentence summarization", |
| "authors": [ |
| { |
| "first": "Alexander", |
| "middle": [ |
| "M" |
| ], |
| "last": "Rush", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "379--389", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander M. Rush, Sumit Chopra, and Jason Weston. 2015. A neural attention model for abstractive sen- tence summarization. In Proceedings of EMNLP, pages 379-389.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "Highway networks", |
| "authors": [ |
| { |
| "first": "Klaus", |
| "middle": [], |
| "last": "Rupesh Kumar Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Greff", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the ICML Deep Learning Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rupesh Kumar Srivastava, Klaus Greff, and J\u00fcrgen Schmidhuber. 2015. Highway networks. In Pro- ceedings of the ICML Deep Learning Workshop.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "From feedforward to recurrent LSTM neural networks for language modeling", |
| "authors": [ |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Sundermeyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralf", |
| "middle": [], |
| "last": "Schluter", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "IEEE Transactions on Audio, Speech and Language Processing", |
| "volume": "23", |
| "issue": "3", |
| "pages": "517--529", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin Sundermeyer, Hermann Ney, and Ralf Schluter. 2015. From feedforward to recurrent LSTM neu- ral networks for language modeling. IEEE Trans- actions on Audio, Speech and Language Processing, 23(3):517-529.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "From characters to words to in between: Do we capture morphology?", |
| "authors": [ |
| { |
| "first": "Clara", |
| "middle": [], |
| "last": "Vania", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Lopez", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "2016--2027", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Clara Vania and Adam Lopez. 2017. From characters to words to in between: Do we capture morphology? In Proceedings of ACL, pages 2016-2027.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Morph-fitting: Fine-tuning word vector spaces with simple language-specific rules", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "\u00d3", |
| "middle": [], |
| "last": "Diarmuid", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "S\u00e9aghdha", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "56--68", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Vuli\u0107, Nikola Mrk\u0161i\u0107, Roi Reichart, Diarmuid \u00d3 S\u00e9aghdha, Steve Young, and Anna Korhonen. 2017. Morph-fitting: Fine-tuning word vector spaces with simple language-specific rules. In Proceedings of ACL, pages 56-68.", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "Larger-context language modelling with recurrent neural network", |
| "authors": [ |
| { |
| "first": "Tian", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1319--1329", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tian Wang and Kyunghyun Cho. 2016. Larger-context language modelling with recurrent neural network. In Proceedings of ACL, pages 1319-1329.", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "Embeddingbased query language models", |
| "authors": [ |
| { |
| "first": "Hamed", |
| "middle": [], |
| "last": "Zamani", |
| "suffix": "" |
| }, |
| { |
| "first": "W. Bruce", |
| "middle": [], |
| "last": "Croft", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of IC-TIR", |
| "volume": "", |
| "issue": "", |
| "pages": "147--156", |
| "other_ids": { |
| "DOI": [ |
| "http://doi.acm.org/10.1145/2970398.2970405" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hamed Zamani and W. Bruce Croft. 2016. Embedding- based query language models. In Proceedings of IC- TIR, pages 147-156.", |
| "links": null |
| }, |
| "BIBREF61": { |
| "ref_id": "b61", |
| "title": "Recurrent neural network regularization", |
| "authors": [ |
| { |
| "first": "Wojciech", |
| "middle": [], |
| "last": "Zaremba", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ICLR (Conference Papers)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wojciech Zaremba, Ilya Sutskever, and Oriol Vinyals. 2015. Recurrent neural network regularization. In Proceedings of ICLR (Conference Papers).", |
| "links": null |
| }, |
| "BIBREF62": { |
| "ref_id": "b62", |
| "title": "Human behavior and the principle of least effort: An introduction to human ecology", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Kingsley", |
| "suffix": "" |
| }, |
| { |
| "first": "Zipf", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 1949, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "https://onlinelibrary.wiley.com/doi/abs/10.1002/1097-4679%28195007%296%3A3%3C306%3A%3AAID-JCLP2270060331%3E3.0.CO%3B2-7" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Kingsley Zipf. 1949. Human behavior and the principle of least effort: An introduction to human ecology.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "open-FUT.1SG (Italian)3 All morphological glosses follow the Leipzig glossing rules, listed at https://www.eva.mpg.de/lingua/ resources/glossing-rules.php(5) 'e-\u0161mor 1SG-guard.FUT 'al on ha-d'lat-\u00f3t DEF-door-PL v'-lo and-NEG 'e-ftach 1SG-wait.FUT ot\u00e1n them (Hebrew)", |
| "num": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Perplexity scores with the Char-CNN-LSTM language model(Kim et al., 2016) on PTBsized language modeling data in 50 languages as a function of type-to-token ratios in training data.", |
| "num": null |
| }, |
| "TABREF1": { |
| "content": "<table/>", |
| "type_str": "table", |
| "html": null, |
| "text": "Examples from Finnish and Korean LM datasets after applying the standard fixed-vocabulary assumption. MIN=5: only words with corpus frequency above 5 are retained in the final fixed vocabulary V ; 10K: V comprises the 10k most frequent words.", |
| "num": null |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "type_str": "table", |
| "html": null, |
| "text": "Test perplexities for 50 languages (ISO 639-1 codes sorted alphabetically) in the full-vocabulary prediction LM setup; Left: Basic statistics of LM evaluation data (see \u00a74 and \u00a75). Right: Results with all three language models in our comparison. Best absolute perplexity scores for each language are in bold, but note that the absolute scores in the KN5 column are not directly comparable to the scores obtained with neural models due to a different handling of OOVs at test time (see \u00a75).", |
| "num": null |
| } |
| } |
| } |
| } |