| { |
| "paper_id": "K19-1021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:05:55.181080Z" |
| }, |
| "title": "On the Importance of Subword Information for Morphological Tasks in Truly Low-Resource Languages", |
| "authors": [ |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Lab", |
| "institution": "University of Cambridge", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Heinzerling", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "RIKEN AIP", |
| "institution": "", |
| "location": {} |
| }, |
| "email": "benjamin.heinzerling@riken.jp" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Lab", |
| "institution": "University of Cambridge", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Strube", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "michael.strube@h-its.org" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Lab", |
| "institution": "University of Cambridge", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Recent work has validated the importance of subword information for word representation learning. Since subwords increase parameter sharing ability in neural models, their value should be even more pronounced in low-data regimes. In this work, we therefore provide a comprehensive analysis focused on the usefulness of subwords for word representation learning in truly low-resource scenarios and for three representative morphological tasks: fine-grained entity typing, morphological tagging, and named entity recognition. We conduct a systematic study that spans several dimensions of comparison: 1) type of data scarcity which can stem from the lack of taskspecific training data, or even from the lack of unannotated data required to train word embeddings, or both; 2) language type by working with a sample of 16 typologically diverse languages including some truly low-resource ones (e.g. Rusyn, Buryat, and Zulu); 3) the choice of the subword-informed word representation method. Our main results show that subword-informed models are universally useful across all language types, with large gains over subword-agnostic embeddings. They also suggest that the effective use of subwords largely depends on the language (type) and the task at hand, as well as on the amount of available data for training the embeddings and taskbased models, where having sufficient in-task data is a more critical requirement.", |
| "pdf_parse": { |
| "paper_id": "K19-1021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Recent work has validated the importance of subword information for word representation learning. Since subwords increase parameter sharing ability in neural models, their value should be even more pronounced in low-data regimes. In this work, we therefore provide a comprehensive analysis focused on the usefulness of subwords for word representation learning in truly low-resource scenarios and for three representative morphological tasks: fine-grained entity typing, morphological tagging, and named entity recognition. We conduct a systematic study that spans several dimensions of comparison: 1) type of data scarcity which can stem from the lack of taskspecific training data, or even from the lack of unannotated data required to train word embeddings, or both; 2) language type by working with a sample of 16 typologically diverse languages including some truly low-resource ones (e.g. Rusyn, Buryat, and Zulu); 3) the choice of the subword-informed word representation method. Our main results show that subword-informed models are universally useful across all language types, with large gains over subword-agnostic embeddings. They also suggest that the effective use of subwords largely depends on the language (type) and the task at hand, as well as on the amount of available data for training the embeddings and taskbased models, where having sufficient in-task data is a more critical requirement.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Recent studies have confirmed the usefulness of leveraging subword-level information in learning word representations (Peters et al., 2018; Heinzerling and Strube, 2018; Grave et al., 2018; Zhu et al., 2019, inter alia) , and in a range of tasks such as sequence tagging (Lample et al., 2016 ; Akbik * Equal contribution, work partly done while at HITS. Devlin et al., 2019) , fine-grained entity typing (Zhu et al., 2019) , neural machine translation (Sennrich et al., 2016; Luong and Manning, 2016; Lample et al., 2018; Durrani et al., 2019) , or general and rare word similarity (Pilehvar et al., 2018; Zhu et al., 2019) . The subword-informed word representation architectures leverage the internal structure of words and assume that a word's meaning can be inferred from the meaning of its constituent (i.e., subword) parts. Instead of treating each word as an atomic unit, subword-informed neural architectures reduce data sparsity by relying on parameterization at the level of subwords (Bojanowski et al., 2017; Pinter et al., 2017; Chaudhary et al., 2018; Kudo, 2018) .", |
| "cite_spans": [ |
| { |
| "start": 118, |
| "end": 139, |
| "text": "(Peters et al., 2018;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 140, |
| "end": 169, |
| "text": "Heinzerling and Strube, 2018;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 170, |
| "end": 189, |
| "text": "Grave et al., 2018;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 190, |
| "end": 219, |
| "text": "Zhu et al., 2019, inter alia)", |
| "ref_id": null |
| }, |
| { |
| "start": 271, |
| "end": 291, |
| "text": "(Lample et al., 2016", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 354, |
| "end": 374, |
| "text": "Devlin et al., 2019)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 404, |
| "end": 422, |
| "text": "(Zhu et al., 2019)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 452, |
| "end": 475, |
| "text": "(Sennrich et al., 2016;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 476, |
| "end": 500, |
| "text": "Luong and Manning, 2016;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 501, |
| "end": 521, |
| "text": "Lample et al., 2018;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 522, |
| "end": 543, |
| "text": "Durrani et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 582, |
| "end": 605, |
| "text": "(Pilehvar et al., 2018;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 606, |
| "end": 623, |
| "text": "Zhu et al., 2019)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 994, |
| "end": 1019, |
| "text": "(Bojanowski et al., 2017;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1020, |
| "end": 1040, |
| "text": "Pinter et al., 2017;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 1041, |
| "end": 1064, |
| "text": "Chaudhary et al., 2018;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1065, |
| "end": 1076, |
| "text": "Kudo, 2018)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction and Motivation", |
| "sec_num": "1" |
| }, |
| { |
| "text": "An increasing body of work focuses on various aspects of subword-informed representation learning such as segmentation of words into subwords and composing subword embeddings into word representations (Lazaridou et al., 2013; Sch\u00fctze, 2015, 2018; Avraham and Goldberg, 2017; Vania and Lopez, 2017; Kim et al., 2018; Zhang et al., 2018; Zhao et al., 2018, inter alia) . 1 The increased parameter sharing ability of such models is especially relevant for learning embeddings of rare and unseen words. Therefore, the importance of subword-level knowledge should be even more pronounced in low-data regimes for truly low-resource languages. Yet, a systematic study focusing exactly on the usefulness of subword information in such settings is currently missing in the literature. In this work, we fill this gap by providing a comprehensive analysis of subword-informed representation learning focused on low-resource setups.", |
| "cite_spans": [ |
| { |
| "start": 201, |
| "end": 225, |
| "text": "(Lazaridou et al., 2013;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 226, |
| "end": 246, |
| "text": "Sch\u00fctze, 2015, 2018;", |
| "ref_id": null |
| }, |
| { |
| "start": 247, |
| "end": 274, |
| "text": "Avraham and Goldberg, 2017;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 275, |
| "end": 297, |
| "text": "Vania and Lopez, 2017;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 298, |
| "end": 315, |
| "text": "Kim et al., 2018;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 316, |
| "end": 335, |
| "text": "Zhang et al., 2018;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 336, |
| "end": 366, |
| "text": "Zhao et al., 2018, inter alia)", |
| "ref_id": null |
| }, |
| { |
| "start": 369, |
| "end": 370, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction and Motivation", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our study centers on the following axes of comparison, focusing on three representative tasks where subword-level information can guide learn-ing, namely fine-grained entity typing (FGET), morphological tagging (MTAG), and named entity recognition (NER): 1) Since data scarcity can stem from unavailability of (i) task-specific training data or (ii) unannotated corpora to train the embeddings in the first place, or (iii) both, we analyse how different data regimes affect the final task performance. 2) We experiment with 16 languages representing 4 diverse morphological types, with a focus on truly low-resource languages such as Zulu, Rusyn, Buryat, or Bambara. 3) We experiment with a variety of subword-informed representation architectures, where the focus is on unsupervised, widely portable language-agnostic methods such as the ones based on character n-grams (Luong and Manning, 2016; Bojanowski et al., 2017) , Byte Pair Encodings (BPE) (Sennrich et al., 2016; Heinzerling and Strube, 2018) , Morfessor (Smit et al., 2014) , or BERT-style pretraining and fine-tuning (Devlin et al., 2019) which relies on WordPieces (Wu et al., 2016) . We demonstrate that by tuning subword-informed models in low-resource settings we can obtain substantial gains over subwordagnostic models such as skip-gram with negative sampling (Mikolov et al., 2013) across the board.", |
| "cite_spans": [ |
| { |
| "start": 871, |
| "end": 896, |
| "text": "(Luong and Manning, 2016;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 897, |
| "end": 921, |
| "text": "Bojanowski et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 950, |
| "end": 973, |
| "text": "(Sennrich et al., 2016;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 974, |
| "end": 1003, |
| "text": "Heinzerling and Strube, 2018)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1016, |
| "end": 1035, |
| "text": "(Smit et al., 2014)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 1080, |
| "end": 1101, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1129, |
| "end": 1146, |
| "text": "(Wu et al., 2016)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 1329, |
| "end": 1351, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction and Motivation", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The main goal of this study is to identify viable and effective subword-informed approaches for truly low-resource languages and offer modeling guidance in relation to the target task, the language at hand, and the (un)availability of general and/or task-specific training data. As expected, our key results indicate that there is no straightforward \"one-size-fits-all\" solution, although certain approaches (e.g., BPE-based or character n-grams) emerge as more robust in general. The optimal subword-informed configurations are largely task-, language-, and resource-dependent: their performance hinges on a complex interplay of the multiple factors mentioned above. For instance, we show that fine-tuning pretrained multilingual BERT (Devlin et al., 2019; Wu and Dredze, 2019 ) is a viable strategy for \"double\" low-resource settings in the NER and MTAG tasks, but it fails for the FGET task in the same setting; furthermore, its performance can be matched or surpassed by other subwordinformed methods in NER and MTAG as soon as they obtain sufficient embedding training data.", |
| "cite_spans": [ |
| { |
| "start": 736, |
| "end": 757, |
| "text": "(Devlin et al., 2019;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 758, |
| "end": 777, |
| "text": "Wu and Dredze, 2019", |
| "ref_id": "BIBREF43" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction and Motivation", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In what follows, we further motivate our work by analyzing two different sources of data scarcity: embedding training data (termed WE data) and task-specific training data (termed task data). Following that, we motivate our selection of test languages and outline the subword-informed representation methods compared in our evaluation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Types of Data Scarcity. The majority of languages in the world still lack basic language technology, and progress in natural language processing is largely hindered by the lack of annotated task data that can guide machine learning models (Agi\u0107 et al., 2016; . However, many languages face another challenge: the lack of large unannotated text corpora that can be used to induce useful general features such as word embeddings (Adams et al., 2017; Fang and Cohn, 2017; ): 2 i.e. WE data.", |
| "cite_spans": [ |
| { |
| "start": 239, |
| "end": 258, |
| "text": "(Agi\u0107 et al., 2016;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 427, |
| "end": 447, |
| "text": "(Adams et al., 2017;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 448, |
| "end": 468, |
| "text": "Fang and Cohn, 2017;", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The absence of data has over the recent years materialized the proxy fallacy. That is, methods tailored for low-resource languages are typically tested only by proxy, simulating low-data regimes exclusively on resource-rich languages (Agi\u0107 et al., 2017) . While this type of evaluation is useful for analyzing the main properties of the intended lowresource methods in controlled in vitro conditions, a complete evaluation should also provide results on true low-resource languages in vivo. In this paper we therefore conduct both types of evaluation. Note that in this work we still focus on low-resource languages that have at least some digital footprint (see the statistics later in Table 1) , while handling zero-resource languages without any available data (Kornai, 2013; is a challenge left for future work.", |
| "cite_spans": [ |
| { |
| "start": 234, |
| "end": 253, |
| "text": "(Agi\u0107 et al., 2017)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 764, |
| "end": 778, |
| "text": "(Kornai, 2013;", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 687, |
| "end": 695, |
| "text": "Table 1)", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "2" |
| }, |
| { |
| "text": "(Selection of) Languages. Both sources of data scarcity potentially manifest in degraded task performance for low-resource languages: our goal is to analyze the extent to which these factors affect downstream tasks across morphologically diverse language types that naturally come with varying data sizes to train their respective embeddings and task-based models. Our selection of test languages is therefore guided by the following goals: a) following recent initiatives (e.g. in language modeling) Gerz et al., 2018) , we aim to ensure coverage of different genealogical and typological properties; b) we aim to cover low-resource languages with varying amounts of available WE data and task-specific data.", |
| "cite_spans": [ |
| { |
| "start": 501, |
| "end": 519, |
| "text": "Gerz et al., 2018)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We select 16 languages in total spanning 4 broad morphological types, listed in Table 1 . Among these, we chose one (relatively) high-resource language for each type: Turkish (agglutinative), English (fusional), Hebrew (introflexive), and Chinese (isolating). We use these four languages to simulate data scarcity scenarios and run experiments where we control the degree of data scarcity related to both embedding training data and task-related data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 80, |
| "end": 87, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The remaining 12 languages are treated as test languages with varying amounts of available data (see Table 1 . For instance, relying on the Wikipedia data for embedding training, Gothic (GOT) is the language from our set that contains the fewest number of word tokens in its respective Wikipedia (18K, in terms of Wikipedia size this ranks it as 273th out of 304 Wikipedia languages); Irish Gaelic (GA) with 4.4M tokens is ranked 87/304.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 101, |
| "end": 108, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Subword-Informed Word Representations. We mainly follow the framework of Zhu et al. (2019) for the construction of subword-informed word representations; the reader is encouraged to refer to the original paper for more details. In short, to compute the representation for a given word w \u2208 V , where V is the word vocabulary, the framework is based on three main components: 1) segmentation of words into subwords, 2) interaction between subword and position embeddings, and 3) a composition function that yields the final word embedding from the constituent subwords. Zhu et al. (2019) explored a large space of possible subword-informed configurations. Based on their findings, we select a representative subset of model configurations. They can be obtained by varying the components listed in Table 2 .", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 90, |
| "text": "Zhu et al. (2019)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 795, |
| "end": 802, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Concretely, w is first segmented into an ordered subword sequence from the subword vocabulary S by a deterministic subword segmentation method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To enable automatic language-agnostic segmentation across multiple languages, we focus on unsupervised segmentation methods: we work with Morfessor (Smit et al., 2014) , character n-grams (Bojanowski et al., 2017) and BPE (Gage, 1994) . We use the default parameters for Morfessor, and the same 3 to 6 character n-gram range as Bojanowski et al. (2017) . For BPE, the number of merge operations is a tunable hyper-parameter. It controls the segmentation \"aggressiveness\": the larger the number the more conservative the BPE segmentation is. Following Heinzerling and Strube (2018), we investigate the values {1e3, 1e4, 1e5}: this allows us to test varying segmentation granularity in relation to different language types.", |
| "cite_spans": [ |
| { |
| "start": 148, |
| "end": 167, |
| "text": "(Smit et al., 2014)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 188, |
| "end": 213, |
| "text": "(Bojanowski et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 222, |
| "end": 234, |
| "text": "(Gage, 1994)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 328, |
| "end": 352, |
| "text": "Bojanowski et al. (2017)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "2" |
| }, |
| { |
| "text": "After segmentation into subwords, each subword is represented by a vector s from the subword embedding matrix S \u2208 R |S|\u00d7d , where d is the dimensionality of subword embeddings. Optionally, the word itself can be appended to the subword sequence and embedded into the subword space in order to incorporate word-level information (Bojanowski et al., 2017) . To encode subword order, s can be further enriched by a trainable position embedding p. We use addition to combine subword and position embeddings, namely s := s + p, which has become the de-facto standard method to encode positional information (Gehring et al., 2017; Vaswani et al., 2017; Devlin et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 328, |
| "end": 353, |
| "text": "(Bojanowski et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 602, |
| "end": 624, |
| "text": "(Gehring et al., 2017;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 625, |
| "end": 646, |
| "text": "Vaswani et al., 2017;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 647, |
| "end": 667, |
| "text": "Devlin et al., 2019)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Finally, the subword embedding sequence is passed to a composition function, which computes the final word representation. Li et al. (2018) and Zhu et al. (2019) Table 2 : Components for constructing subwordinformed word representations. In the bpeX label X \u2208 {1e3, 1e4, 1e5} denotes the BPE vocabulary size. (Mikolov et al., 2013) as our word-level distributional model: the target word embedding is computed by our subword-informed model, and the context word is parameterized by the (word-level) context embedding matrix W c \u2208 R |V |\u00d7dc . We compare subword-informed architectures to two well-known word representation models, also captured by the general framework of Zhu et al. (2019) : 1) the subword-agnostic skip-gram model from the word2vec package (Mikolov et al., 2013 ) (W2V), and 2) fastText (FT) (Bojanowski et al., 2017) . The comparison to W2V aims to validate the potential benefit of subword-informed word representations for truly low-resource languages, while the comparison to FT measures the gains that can be achieved by more sophisticated and fine-tuned subword-informed architectures. We also compare with pretrained multilingual BERT base (Devlin et al., 2019) on the languages supported by this model.", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 139, |
| "text": "Li et al. (2018)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 144, |
| "end": 161, |
| "text": "Zhu et al. (2019)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 309, |
| "end": 331, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 672, |
| "end": 689, |
| "text": "Zhu et al. (2019)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 758, |
| "end": 779, |
| "text": "(Mikolov et al., 2013", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 810, |
| "end": 835, |
| "text": "(Bojanowski et al., 2017)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 162, |
| "end": 169, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Fine-Grained Entity Typing. FGET is cast as a sequence classification problem, where an entity mention consisting of one or more tokens (e.g. Lincolnshire, Bill Clinton), is mapped to one of the 112 fine-grained entity types from the FIGER inventory (Ling and Weld, 2012; Yaghoobzadeh and Sch\u00fctze, 2015; Heinzerling and Strube, 2018) . Since entity mentions are short token sequences and not full sentences, this semi-morphological/semantic task requires a model to rely on the subword information of individual tokens in the absence of sentence context. That is, subwords can provide evidence useful for entity type classification in the absence of context. For instance, Lincolnshire is assigned the type /location/county as -shire is a suffix that strongly indicates a location. Hence, FGET is well-suited for evaluating subword-informed rep-resentations, and can benefit from the information.", |
| "cite_spans": [ |
| { |
| "start": 250, |
| "end": 271, |
| "text": "(Ling and Weld, 2012;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 272, |
| "end": 303, |
| "text": "Yaghoobzadeh and Sch\u00fctze, 2015;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 304, |
| "end": 333, |
| "text": "Heinzerling and Strube, 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Tasks", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Morphological Tagging. MTAG is the task of annotating each word in a sentence with features such as part-of-speech, gender, number, tense, and case. These features are represented as a set of key-value pairs. For example, classified is a finite (Fin) verb (V) in indicative (Ind) mood, third person, past tense, which is annotated with the morphological tag {POS=V, Mood=Ind, Person=3, Tense=Past, Verb-Form=Fin}, and the female singular third-person possessive personal pronoun her with the morphological tag {Gender=Fem, Number=Sing, Person=3, Poss=Yes, PronType=Prs}.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Tasks", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Named Entity Recognition. NER is the task of annotating textual mentions of real-world entities with their semantic type, such as person, location, and organization: e.g., Barack Obama (person) was born in Hawaii (location).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Tasks", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Embedding Training: WE Data. For training word and subword embeddings, we rely on Wikipedia text for all 16 languages, with corresponding Wikipedia sizes listed in Table 1 . For training embeddings in controlled low-resource settings with our 4 \"simulation\" languages, we sample nine data points to simulate low-resource scenarios with WE data. Specifically, we sample 10K, 20K, 50K, 100K, 200K, 500K, 1M, 2M, and 5M tokens of article text for each of the 4 languages. For the other 12 languages we report results obtained by training embedding on the full Wikipedia edition.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 164, |
| "end": 171, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Task-Specific Data: Task Data. The maximum number of training instances for all languages is again provided in Table 1 . As before, for 4 languages we simulate low-resource settings by taking only a sample of the available task data: for FGET we work with 200, 2K or 20K training instances which roughly correspond to training regimes of different data availability, while we select 300, 3 1K, and 10K sentences for NER and MGET. Again, for the remaining 12 languages, we use all the available data to run the experiments. We adopt existing data splits into training, development, and test portions for MTAG (Cotterell and Heigold, 2017) , and random splits for FGET (Heinzerling and Strube, 2018; Zhu et al., 2019) and NER (Pan et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 608, |
| "end": 637, |
| "text": "(Cotterell and Heigold, 2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 667, |
| "end": 697, |
| "text": "(Heinzerling and Strube, 2018;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 698, |
| "end": 715, |
| "text": "Zhu et al., 2019)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 724, |
| "end": 742, |
| "text": "(Pan et al., 2017)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 111, |
| "end": 118, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4" |
| }, |
| { |
| "text": "A large number of data points for scarcity simulations allow us to trace how performance on the three tasks varies in relation to the availability of WE data versus task data, and what data source is more important for the final performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Embedding Training Setup. When training our subword-informed representations, we argue that keeping hyper-parameters fixed across different data points will possibly result in underfitting for larger data sizes or overfitting for smaller data sizes. Therefore, we split data points into three groups: [10K, 50K] (G1), (50K, 500K] (G2) and (500K, 5M ] (G3), and use the same hyperparameters for word embedding training within the same group. For G1, we train with batch size 32 for 60 epochs and set the minimum word frequency threshold to 2. For G2 the values are: 128/30/3, and 512/15/5 for G3. This way, we ensure that 1) the difference of the absolute data sizes can be compared within the same data group, and 2) for the corresponding data points in different groups (10K, 100K, 1M ) the sample efficiency can be compared, as the models trained on these data points undergo roughly the same number of updates. 4", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Task-Specific Training Setup. For FGET, we use the dataset of Heinzerling and Strube (2018) obtained by mapping entity mentions from Wikidata (Vrande\u010di\u0107 and Kr\u00f6tzsch, 2014) to their associated FIGER-based most notable entity type (Ling and Weld, 2012) . For each language, we randomly sample up to 100k pairs of entity mentions with corresponding entity type and create random 60/20/20 train/dev/test splits. Our FGET model is designed after the hierarchical architecture by Zhu et al. (2019) . For each entity token, we first use our subword-informed model to obtain word representations, and then feed the token embedding sequence into a bidirectional LSTM with 2 hidden layers of size 512, followed by a projection layer which predicts the entity type. We initialize our FGET model with the pretrained subword model, and fine-tune it during training. With BERT, we input the entire entity mention and then use the representation of the special [CLS] token for classification. We train with early stopping, using Adam (Kingma and Ba, 2015) with default parameters across all languages. As suggested by Wu and Dredze (2019) , BERT hyper-parameters are more sensitive to smaller data sizes, so we tune them on the smallest data point with 200 training instances. We follow Wu and Dredze (2019) to select hyperparameter candidates, i.e., 2e\u22125/3e\u22125/5e\u22125 for learning rate, 16/32 for batch size and triangular learning rate scheduler with first 10% of batches as warm-up. We do an exhaustive search on four high resource languages: EN, TR, HE, ZH and select the hyper-parameter combination with the best average score on the development sets.", |
| "cite_spans": [ |
| { |
| "start": 142, |
| "end": 172, |
| "text": "(Vrande\u010di\u0107 and Kr\u00f6tzsch, 2014)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 230, |
| "end": 251, |
| "text": "(Ling and Weld, 2012)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 475, |
| "end": 492, |
| "text": "Zhu et al. (2019)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 1104, |
| "end": 1124, |
| "text": "Wu and Dredze (2019)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 1273, |
| "end": 1293, |
| "text": "Wu and Dredze (2019)", |
| "ref_id": "BIBREF43" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For MTAG, we evaluate on the multilingual morphological annotations provided by the Universal Dependencies project (Nivre et al., 2016) and adopt the experimental protocol of Cotterell and Heigold (2017) . Specifically, we cast MTAG as a sequence labeling task by treating the concatenation of all key-value pairs for a given word as the word's label. As sequence labeling model, we train a bidirectional LSTM (Hochreiter and Schmidhuber, 1997; , with two layers of size 1024 and dropout 0.4, using early stopping on the development set. For experiments involving multilingual BERT, we fine-tune all of BERT's layers and feed the final layer into an LSTM before classification. The evaluation metric is per-label accuracy, i.e., a word's morphological tag is either predicted correctly or not, and there is no partial credit for the correct prediction of only a subset of features.", |
| "cite_spans": [ |
| { |
| "start": 115, |
| "end": 135, |
| "text": "(Nivre et al., 2016)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 175, |
| "end": 203, |
| "text": "Cotterell and Heigold (2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 410, |
| "end": 444, |
| "text": "(Hochreiter and Schmidhuber, 1997;", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We evaluate NER performance on WikiAnn (Pan et al., 2017) , a multilingual dataset which provides three-class entity type annotations which were automatically extracted from Wikipedia. We train sequence labeling models using exactly the same architectures and hyper-parameters as in MTAG, and report F1 scores. As WikiAnn does not come with predefined train/dev/test sets, we create random 60/20/20 splits.", |
| "cite_spans": [ |
| { |
| "start": 39, |
| "end": 57, |
| "text": "(Pan et al., 2017)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Results for data scarcity simulation experiments are summarized in Figures 1-3 , while the results on the remaining 12 languages for all three tasks are provided in Tables 3-4, with the best results among different configurations of subword-informed methods reported. As the first main finding, the results show that subword-informed architectures substantially outperform the subword-agnostic skip-gram W2V baseline, and the gaps are in some cases very large: e.g., see the results in Figures 1-3 for the settings with extremely scarce WE data. These scores verify the importance of subword-level knowledge for low-resource setups.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 67, |
| "end": 78, |
| "text": "Figures 1-3", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 486, |
| "end": 497, |
| "text": "Figures 1-3", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Simulating Data Scarcity. Another general finding concerns the importance of WE data versus task data. The simulation results in Figure 1 -3 suggest that both types of data are instrumental to improved task performance: This finding is universal as we observe similar behaviors across tasks and across different languages. While WE data is important, considerably larger gains are achieved by collecting more task data: e.g., see the large gains in FGET when training on 200 versus 2K entity mentions. In summary, both types of data scarcity decrease performance, but the impact of scarce task data seems more pronounced. Collecting more WE data when dealing with scarce task data leads to larger gains in the FGET task compared to MTAG or NER.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 129, |
| "end": 137, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "While subword models are generally better than the baselines across different data points, less aggressive segmentation models and token-based models close the gap very quickly when increasing WE data, which is in line with the findings of Zhu et al. (2019) , where morf eventually prevails in this task with abundant WE data. This again verifies the usefulness of subword-level knowledge for low-(WE) data regimes. Similar trends emerge in terms of task data, but the advantage of subword models seems more salient with more task data. The underlying task architectures start making use of subword features more effectively: this shows that subword-level knowledge is particularly useful for the three chosen morphological tasks.", |
| "cite_spans": [ |
| { |
| "start": 240, |
| "end": 257, |
| "text": "Zhu et al. (2019)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Performance of BERT. An interesting analysis regarding the (relative) performance of pretrained multilingual BERT model emerges from Figures 1-3 . Fine-tuned BERT displays much stronger performance in low-resources settings for the MTAG and NER tasks than for the FGET task (e.g., compare the sub-figures in the first columns of the corresponding figures). The explanation of MTAG and NER performance is intuitive. A pretrained BERT model encodes a massive amount of background knowledge available during its (multilingual) pretraining. However, as we supplement other subword-informed representation learning methods with more data for training the embeddings, the gap gets smaller until it almost completely vanishes: other methods now also get to see some of the distributional information which BERT already consumed in its pretraining.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 133, |
| "end": 144, |
| "text": "Figures 1-3", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "BERT performance on FGET versus MTAG and NER concerns the very nature of the tasks at hand. The input data for FGET consist mostly of 2-4 word tokens (i.e., entity mentions), while MTAG and NER operate on full sentences as input. Since BERT has been pretrained on sentences, this setting is a natural fit and makes fine-tuning to these tasks easier: BERT already provides a sort of \"contextual subword composition function\". This stands in contrast with the other subword-informed approaches.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "There, we might have good non-contextual subword embeddings and a pretrained \"non-contextual\" composition function, but we have to learn how to effectively leverage the context for the task at hand (i.e., by running an LSTM over the subwordinformed token representations) from scratch. 5", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Truly Low-Resource Languages. The results on the 12 test languages in Table 3 -4 suggest that subword-informed models are better than the baselines in most cases: this validates the initial findings from the simulation experiments. That is, leveraging subword information is important for WE induction as well as for task-specific training. The Figure 3 : Test performance (F1 score) in the NER task in data scarcity simulation experiments. We do not show results for Chinese as the annotations of the Chinese NER are provided only on the character-level and thus impede experimentation with most of the subword-informed methods used in our evaluation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 70, |
| "end": 77, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 345, |
| "end": 353, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "gains with subword methods become larger for languages with fewer WE data (e.g., ZU, BM, GOT); this is again consistent with the previously reported simulation experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Tasks, Language Types, Subword Configurations. The results further suggest that the optimal configuration indeed varies across different tasks and language types, and therefore it is required to carefully tune the configuration to reach improved performance. For instance, as agglutinative languages have different granularities of morphological complexity, it is not even possible to isolate a single optimal segmentation method within this language type. Overall, the segmentation based charn followed by BPE emerge as most robust choices across all languages and tasks. However, charn has the largest number of parameters and is slower to train compared to other segmentations, and in case of BPE its number of merge operations must be tuned to yield competitive scores. While we do not see extremely clear patterns from the results in relation to particular language types, the scores suggest that for agglutinative and fusional languages a hybrid segmentation such as charn or a moderate one (bpe1e4, bpe1e5) is a good choice. For introflexive and isolating languages, more aggressive segmentations seem to be also competitive in FGET and MTAG, while bpe1e4 being very effective for ZH, and charn (again) and bpe1e5 seems to be preferred in NER.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Apart from segmentation methods, we also analyzed the effect of word token embeddings (w+) and position embeddings (p+) in the subwordinformed learning framework (Zhu et al., 2019) (see before Table 2 in \u00a72), shown in Figure 4 . NER can clearly benefit from both w+ and p+ and w+ is also useful for MTAG. However, for other tasks, the fluctuations between configurations are minimal once the segmentation has been fixed, which suggests that the most critical component is indeed the chosen segmentation method: this is why we have mostly focused on the analyses of the segmentation method and its impact on task performance in this work. Regarding the composition functions, as demonstrated in Zhu et al. (2019) , more complex composition functions do not necessarily yield superior results in a range of downstream tasks. We therefore leave the exploration of more sophisticated composition functions for future work.", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 180, |
| "text": "(Zhu et al., 2019)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 694, |
| "end": 711, |
| "text": "Zhu et al. (2019)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 193, |
| "end": 200, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 218, |
| "end": 226, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We have presented an empirical study focused on the importance of subword-informed word repre- Figure 4 : Comparisons of configurations with and without word token (w-, w+) and the position embedding (p-, p+). The results are obtained by collecting all data points in the data scarcity simulation for four high resource languages and the other 12 languages with both full WE data and task data. sentation architectures for truly low-resource languages. Our experiments on three diverse morphological tasks with 16 typologically diverse languages of varying degrees of data scarcity have validated that subword-level knowledge is indeed crucial for improved task performance in such lowdata setups. The large amount of results reported in this work has enabled comparisons of different subword-informed methods in relation to multiple aspects such as the degree of data scarcity (both in terms of embedding training data and task-specific annotated data), the task at hand, the actual language, as well as the methods' internal design (e.g. the choice of the segmentation method). Our results have demonstrated that all these aspects must be considered in order to identify an optimal subword-informed representation architecture for a particular use case, that is, for a particular language (type), task, and data availability. However, similar paterns emerge: e.g., resorting to a segmentation method based on character n-grams seems most robust across the three tasks and across languages, although there are clear outliers. In future work, we will extend our focus to other target languages, including the ones with very limited (Adams et al., 2017) or non-existent digital footprint.", |
| "cite_spans": [ |
| { |
| "start": 1632, |
| "end": 1652, |
| "text": "(Adams et al., 2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 95, |
| "end": 103, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "An overview of a variety of subword-informed word representation architectures and different segmentation and composition strategies is provided byZhu et al. (2019).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For instance, as of April 2019, Wikipedia is available only in 304 out of the estimated 7,000 existing languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "With a smaller number of instances (e.g., 100), NER and MGET model training was unstable and resulted in near-zero performance across multiple runs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We train fastText and skip-gram from word2vec with the same number of epochs that is used to train our subword-informed models on the corresponding data points.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Another factor at play is multilingual BERT's limited vocabulary size (100K WordPiece symbols), leaving on average a bit under 1K symbols per language. Due to the different sizes of Wikipedias used for pretraining BERT, some languages might even be represented with far fewer than 1K vocabulary entries, thereby limiting the effective language-specific model capacity. Therefore, it is not that surprising that monolingual subword-informed representations gradually surpass BERT as more language-specific WE data becomes available. This finding is also supported by the results reported inTable 3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work is supported by the ERC Consolidator Grant LEXICAL: Lexical Acquisition Across Languages (no 648909) and the Klaus Tschira Foundation, Heidelberg, Germany. We thank the three anonymous reviewers for their helpful suggestions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Cross-lingual word embeddings for low-resource language modeling", |
| "authors": [ |
| { |
| "first": "Oliver", |
| "middle": [], |
| "last": "Adams", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Makarucha", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bird", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "937--947", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oliver Adams, Adam Makarucha, Graham Neubig, Steven Bird, and Trevor Cohn. 2017. Cross-lingual word embeddings for low-resource language model- ing. In Proceedings of EACL, pages 937-947.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Multilingual projection for parsing truly low-resource languages", |
| "authors": [ |
| { |
| "first": "Zeljko", |
| "middle": [], |
| "last": "Agi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Anders", |
| "middle": [], |
| "last": "Johannsen", |
| "suffix": "" |
| }, |
| { |
| "first": "Barbara", |
| "middle": [], |
| "last": "Plank", |
| "suffix": "" |
| }, |
| { |
| "first": "Natalie", |
| "middle": [], |
| "last": "H\u00e9ctor Mart\u00ednez Alonso", |
| "suffix": "" |
| }, |
| { |
| "first": "Anders", |
| "middle": [], |
| "last": "Schluter", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "S\u00f8gaard", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Transactions of the ACL", |
| "volume": "4", |
| "issue": "", |
| "pages": "301--312", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeljko Agi\u0107, Anders Johannsen, Barbara Plank, H\u00e9ctor Mart\u00ednez Alonso, Natalie Schluter, and Anders S\u00f8gaard. 2016. Multilingual projection for parsing truly low-resource languages. Transactions of the ACL, 4:301-312.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Cross-lingual tagger evaluation without test data", |
| "authors": [ |
| { |
| "first": "Zeljko", |
| "middle": [], |
| "last": "Agi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Barbara", |
| "middle": [], |
| "last": "Plank", |
| "suffix": "" |
| }, |
| { |
| "first": "Anders", |
| "middle": [], |
| "last": "S\u00f8gaard", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "248--253", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeljko Agi\u0107, Barbara Plank, and Anders S\u00f8gaard. 2017. Cross-lingual tagger evaluation without test data. In Proceedings of EACL, pages 248-253.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Contextual string embeddings for sequence labeling", |
| "authors": [ |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Akbik", |
| "suffix": "" |
| }, |
| { |
| "first": "Duncan", |
| "middle": [], |
| "last": "Blythe", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Vollgraf", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "1638--1649", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alan Akbik, Duncan Blythe, and Roland Vollgraf. 2018. Contextual string embeddings for sequence labeling. In Proceedings of COLING, pages 1638- 1649.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "The interplay of semantics and morphology in word embeddings", |
| "authors": [ |
| { |
| "first": "Oded", |
| "middle": [], |
| "last": "Avraham", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "422--426", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oded Avraham and Yoav Goldberg. 2017. The inter- play of semantics and morphology in word embed- dings. In Proceedings of EACL, pages 422-426.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Enriching word vectors with subword information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the ACL", |
| "volume": "5", |
| "issue": "", |
| "pages": "135--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the ACL, 5:135-146.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Adapting word embeddings to new languages with morphological and phonological subword representations", |
| "authors": [ |
| { |
| "first": "Aditi", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Chunting", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Lori", |
| "middle": [], |
| "last": "Levin", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "R" |
| ], |
| "last": "Mortensen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "3285--3295", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aditi Chaudhary, Chunting Zhou, Lori Levin, Gra- ham Neubig, David R. Mortensen, and Jaime Car- bonell. 2018. Adapting word embeddings to new languages with morphological and phonological sub- word representations. In Proceedings of EMNLP, pages 3285-3295.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Crosslingual character-level neural morphological tagging", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "Georg", |
| "middle": [], |
| "last": "Heigold", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "748--759", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1078" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Cotterell and Georg Heigold. 2017. Cross- lingual character-level neural morphological tag- ging. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 748-759, Copenhagen, Denmark. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Are all languages equally hard to language-model?", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mielke", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Roark", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Cotterell, Sebastian J. Mielke, Jason Eisner, and Brian Roark. 2018. Are all languages equally hard to language-model? In Proceedings of NAACL- HLT.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Morphological word-embeddings", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "1287--1292", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Cotterell and Hinrich Sch\u00fctze. 2015. Morpholog- ical word-embeddings. In Proceedings of NAACL- HLT, pages 1287-1292.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Joint semantic synthesis and morphological analysis of the derived word", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the ACL", |
| "volume": "6", |
| "issue": "", |
| "pages": "33--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Cotterell and Hinrich Sch\u00fctze. 2018. Joint se- mantic synthesis and morphological analysis of the derived word. Transactions of the ACL, 6:33-48.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of NAACL-HLT.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "One size does not fit all: Comparing NMT representations of different granularities", |
| "authors": [ |
| { |
| "first": "Nadir", |
| "middle": [], |
| "last": "Durrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Fahim", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Sajjad", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "1504--1516", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nadir Durrani, Fahim Dalvi, Hassan Sajjad, Yonatan Belinkov, and Preslav Nakov. 2019. One size does not fit all: Comparing NMT representations of dif- ferent granularities. In Proceedings of NAACL-HLT, pages 1504-1516.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Model transfer for tagging low-resource languages using a bilingual dictionary", |
| "authors": [ |
| { |
| "first": "Meng", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "587--593", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Meng Fang and Trevor Cohn. 2017. Model transfer for tagging low-resource languages using a bilingual dictionary. In Proceedings of ACL, pages 587-593.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A new algorithm for data compression", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Gage", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "C Users J", |
| "volume": "12", |
| "issue": "2", |
| "pages": "23--38", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip Gage. 1994. A new algorithm for data compres- sion. C Users J., 12(2):23-38.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Convolutional sequence to sequence learning", |
| "authors": [ |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Gehring", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Denis", |
| "middle": [], |
| "last": "Yarats", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Dauphin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "1243--1252", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonas Gehring, Michael Auli, David Grangier, Denis Yarats, and Yann Dauphin. 2017. Convolutional sequence to sequence learning. In Proceedings of ICML, pages 1243-1252.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "On the relation between linguistic typology and (limitations of) multilingual language modeling", |
| "authors": [ |
| { |
| "first": "Daniela", |
| "middle": [], |
| "last": "Gerz", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Edoardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Ponti", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "316--327", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniela Gerz, Ivan Vuli\u0107, Edoardo Maria Ponti, Roi Reichart, and Anna Korhonen. 2018. On the rela- tion between linguistic typology and (limitations of) multilingual language modeling. In Proceedings of EMNLP, pages 316-327.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Learning word vectors for 157 languages", |
| "authors": [ |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Prakhar", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "3483--3487", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edouard Grave, Piotr Bojanowski, Prakhar Gupta, Ar- mand Joulin, and Tomas Mikolov. 2018. Learning word vectors for 157 languages. In Proceedings of LREC, pages 3483-3487.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "BPEmb: Tokenization-free pre-trained subword embeddings in 275 languages", |
| "authors": [ |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Heinzerling", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Strube", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Benjamin Heinzerling and Michael Strube. 2018. BPEmb: Tokenization-free pre-trained subword em- beddings in 275 languages. In Proceedings of LREC.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/neco.1997.9.8.1735" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Learning to generate word representations using subword information", |
| "authors": [ |
| { |
| "first": "Yeachan", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Kang-Min", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Ji-Min", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Sangkeun", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "2551--2561", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yeachan Kim, Kang-Min Kim, Ji-Min Lee, and SangKeun Lee. 2018. Learning to generate word representations using subword information. In Pro- ceedings of COLING, pages 2551-2561.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In Proceedings of ICLR.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Digital language death", |
| "authors": [ |
| { |
| "first": "Andr\u00e1s", |
| "middle": [], |
| "last": "Kornai", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "PloS One", |
| "volume": "8", |
| "issue": "10", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0077056" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andr\u00e1s Kornai. 2013. Digital language death. PloS One, 8(10):e77056.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Subword regularization: Improving neural network translation models with multiple subword candidates", |
| "authors": [ |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "66--75", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taku Kudo. 2018. Subword regularization: Improving neural network translation models with multiple sub- word candidates. In Proceedings of ACL, pages 66- 75.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Neural architectures for named entity recognition", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Ballesteros", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandeep", |
| "middle": [], |
| "last": "Subramanian", |
| "suffix": "" |
| }, |
| { |
| "first": "Kazuya", |
| "middle": [], |
| "last": "Kawakami", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "260--270", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample, Miguel Ballesteros, Sandeep Sub- ramanian, Kazuya Kawakami, and Chris Dyer. 2016. Neural architectures for named entity recognition. In Proceedings of NAACL-HLT, pages 260-270.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Phrase-based & neural unsupervised machine translation", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "5039--5049", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample, Myle Ott, Alexis Conneau, Lu- dovic Denoyer, and Marc'Aurelio Ranzato. 2018. Phrase-based & neural unsupervised machine trans- lation. In Proceedings of EMNLP, pages 5039- 5049.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Compositional-ly derived representations of morphologically complex words in distributional semantics", |
| "authors": [ |
| { |
| "first": "Angeliki", |
| "middle": [], |
| "last": "Lazaridou", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Marelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Zamparelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1517--1526", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Angeliki Lazaridou, Marco Marelli, Roberto Zampar- elli, and Marco Baroni. 2013. Compositional-ly derived representations of morphologically complex words in distributional semantics. In Proceedings of ACL, pages 1517-1526.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Subword-level composition functions for learning word embeddings", |
| "authors": [ |
| { |
| "first": "Bofang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Aleksandr", |
| "middle": [], |
| "last": "Drozd", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyong", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Second Workshop on Subword/Character LEvel Models", |
| "volume": "", |
| "issue": "", |
| "pages": "38--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bofang Li, Aleksandr Drozd, Tao Liu, and Xiaoyong Du. 2018. Subword-level composition functions for learning word embeddings. In Proceedings of the Second Workshop on Subword/Character LEvel Models, pages 38-48.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Fine-grained entity recognition", |
| "authors": [ |
| { |
| "first": "Xiao", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "S" |
| ], |
| "last": "Weld", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "94--100", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiao Ling and Daniel S. Weld. 2012. Fine-grained en- tity recognition. In Proceedings of AAAI, pages 94- 100.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Achieving open vocabulary neural machine translation with hybrid word-character models", |
| "authors": [ |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1054--1063", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minh-Thang Luong and Christopher D. Manning. 2016. Achieving open vocabulary neural machine transla- tion with hybrid word-character models. In Proceed- ings of ACL, pages 1054-1063.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregory", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Gregory S. Corrado, and Jeffrey Dean. 2013. Distributed rep- resentations of words and phrases and their compo- sitionality. In Proceedings of NIPS, pages 3111- 3119.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Universal dependencies v1: A multilingual treebank collection", |
| "authors": [ |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Catherine", |
| "middle": [], |
| "last": "De Marneffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Filip", |
| "middle": [], |
| "last": "Ginter", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Hajic", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| }, |
| { |
| "first": "Sampo", |
| "middle": [], |
| "last": "Pyysalo", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "1659--1666", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joakim Nivre, Marie-Catherine de Marneffe, Filip Gin- ter, Yoav Goldberg, Jan Hajic, Christopher D. Man- ning, Ryan McDonald, Slav Petrov, Sampo Pyysalo, Natalia Silveira, Reut Tsarfaty, and Daniel Zeman. 2016. Universal dependencies v1: A multilingual treebank collection. In Proceedings of LREC, pages 1659-1666.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Crosslingual name tagging and linking for 282 languages", |
| "authors": [ |
| { |
| "first": "Xiaoman", |
| "middle": [], |
| "last": "Pan", |
| "suffix": "" |
| }, |
| { |
| "first": "Boliang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "May", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Nothman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| }, |
| { |
| "first": "Heng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1946--1958", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaoman Pan, Boliang Zhang, Jonathan May, Joel Nothman, Kevin Knight, and Heng Ji. 2017. Cross- lingual name tagging and linking for 282 languages. In Proceedings of ACL, pages 1946-1958.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [ |
| "E" |
| ], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proccedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proccedings of NAACL-HLT, pages 2227-2237.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Card-660: Cambridge rare word dataset -a reliable benchmark for infrequent word representation models", |
| "authors": [ |
| { |
| "first": "Dimitri", |
| "middle": [], |
| "last": "Mohammad Taher Pilehvar", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Kartsaklis", |
| "suffix": "" |
| }, |
| { |
| "first": "Nigel", |
| "middle": [], |
| "last": "Prokhorov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Collier", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1391--1401", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammad Taher Pilehvar, Dimitri Kartsaklis, Vic- tor Prokhorov, and Nigel Collier. 2018. Card-660: Cambridge rare word dataset -a reliable benchmark for infrequent word representation models. In Pro- ceedings of EMNLP, pages 1391-1401.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Mimicking word embeddings using subword RNNs", |
| "authors": [ |
| { |
| "first": "Yuval", |
| "middle": [], |
| "last": "Pinter", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Guthrie", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Eisenstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "102--112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuval Pinter, Robert Guthrie, and Jacob Eisenstein. 2017. Mimicking word embeddings using subword RNNs. In Proceedings of EMNLP, pages 102-112.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Multilingual part-of-speech tagging with bidirectional long short-term memory models and auxiliary loss", |
| "authors": [ |
| { |
| "first": "Barbara", |
| "middle": [], |
| "last": "Plank", |
| "suffix": "" |
| }, |
| { |
| "first": "Anders", |
| "middle": [], |
| "last": "S\u00f8gaard", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "412--418", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Barbara Plank, Anders S\u00f8gaard, and Yoav Goldberg. 2016. Multilingual part-of-speech tagging with bidi- rectional long short-term memory models and auxil- iary loss. In Proceedings of ACL, pages 412-418.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Modeling language variation and universals: A survey on typological linguistics for natural language processing", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Edoardo Maria Ponti", |
| "suffix": "" |
| }, |
| { |
| "first": "O'", |
| "middle": [], |
| "last": "Helen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yevgeni", |
| "middle": [], |
| "last": "Horan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Berzak", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Thierry", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Poibeau", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Shutova", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1807.00914" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edoardo Maria Ponti, Helen O'Horan, Yevgeni Berzak, Ivan Vuli\u0107, Roi Reichart, Thierry Poibeau, Ekaterina Shutova, and Anna Korhonen. 2018. Modeling lan- guage variation and universals: A survey on typo- logical linguistics for natural language processing. arXiv preprint arXiv:1807.00914.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1715--1725", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of ACL, pages 1715- 1725.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Morfessor 2.0: Toolkit for statistical morphological segmentation", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Smit", |
| "suffix": "" |
| }, |
| { |
| "first": "Sami", |
| "middle": [], |
| "last": "Virpioja", |
| "suffix": "" |
| }, |
| { |
| "first": "Stig-Arne", |
| "middle": [], |
| "last": "Grnroos", |
| "suffix": "" |
| }, |
| { |
| "first": "Mikko", |
| "middle": [], |
| "last": "Kurimo", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "21--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Smit, Sami Virpioja, Stig-Arne Grnroos, and Mikko Kurimo. 2014. Morfessor 2.0: Toolkit for statistical morphological segmentation. In Proceed- ings of EACL, pages 21-24.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "From characters to words to in between: Do we capture morphology?", |
| "authors": [ |
| { |
| "first": "Clara", |
| "middle": [], |
| "last": "Vania", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Lopez", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "2016--2027", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Clara Vania and Adam Lopez. 2017. From characters to words to in between: Do we capture morphology? In Proceedings of ACL, pages 2016-2027.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Proceedings of NIPS, pages 5998- 6008.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Wikidata: A free collaborative knowledge base", |
| "authors": [ |
| { |
| "first": "Denny", |
| "middle": [], |
| "last": "Vrande\u010di\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Markus", |
| "middle": [], |
| "last": "Kr\u00f6tzsch", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Communications of the ACM", |
| "volume": "57", |
| "issue": "", |
| "pages": "78--85", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Denny Vrande\u010di\u0107 and Markus Kr\u00f6tzsch. 2014. Wiki- data: A free collaborative knowledge base. Commu- nications of the ACM, 57:78-85.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Beto, Bentz, Becas: The surprising cross-lingual effectiveness of BERT. CoRR", |
| "authors": [ |
| { |
| "first": "Shijie", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shijie Wu and Mark Dredze. 2019. Beto, Bentz, Becas: The surprising cross-lingual effectiveness of BERT. CoRR, abs/1904.09077.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Google's neural machine translation system: Bridging the gap between human and machine translation", |
| "authors": [ |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Norouzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Qin", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Klaus", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Klingner", |
| "suffix": "" |
| }, |
| { |
| "first": "Apurva", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| }, |
| { |
| "first": "Melvin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaobing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshikiyo", |
| "middle": [], |
| "last": "Gouws", |
| "suffix": "" |
| }, |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kato", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideto", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| }, |
| { |
| "first": "Keith", |
| "middle": [], |
| "last": "Kazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Stevens", |
| "suffix": "" |
| }, |
| { |
| "first": "Nishant", |
| "middle": [], |
| "last": "Kurian", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Patil", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Oriol Vinyals", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, Jeff Klingner, Apurva Shah, Melvin John- son, Xiaobing Liu, ukasz Kaiser, Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Jason Riesa, Alex Rud- nick, Oriol Vinyals, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2016. Google's neural machine translation system: Bridging the gap between human and machine translation. CoRR, abs/1609.08144.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Corpus-level fine-grained entity typing using contextual information", |
| "authors": [ |
| { |
| "first": "Yadollah", |
| "middle": [], |
| "last": "Yaghoobzadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "715--725", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yadollah Yaghoobzadeh and Hinrich Sch\u00fctze. 2015. Corpus-level fine-grained entity typing using contex- tual information. In Proceedings of EMNLP, pages 715-725.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Subword-augmented embedding for cloze reading comprehension", |
| "authors": [ |
| { |
| "first": "Zhuosheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yafang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "1802--1814", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhuosheng Zhang, Yafang Huang, and Hai Zhao. 2018. Subword-augmented embedding for cloze reading comprehension. In Proceedings of COLING, pages 1802-1814.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Generalizing word embeddings using bag of subwords", |
| "authors": [ |
| { |
| "first": "Jinman", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Sidharth", |
| "middle": [], |
| "last": "Mudgal", |
| "suffix": "" |
| }, |
| { |
| "first": "Yingyu", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "601--606", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinman Zhao, Sidharth Mudgal, and Yingyu Liang. 2018. Generalizing word embeddings using bag of subwords. In Proceedings of EMNLP, pages 601- 606.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "A systematic study of leveraging subword information for learning word representations", |
| "authors": [ |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi Zhu, Ivan Vuli\u0107, and Anna Korhonen. 2019. A sys- tematic study of leveraging subword information for learning word representations. In Proceedings of NAACL-HLT.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "Test performance (accuracy) in the FGET task for different segmentation methods in data scarcity simulation experiments across 4 languages representing 4 broad morphological types, averaged over 5 runs. Some data points with Chinese (ZH) are not shown as in those cases the subword model is reduced to single characters only.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "num": null, |
| "text": "Test performance in the MTAG task in data scarcity simulation experiments.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF1": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "text": "Overview of test languages and data availability. EMB denotes the maximum number of tokens in corresponding Wikipedias used for training embeddings. Actual Wikipedia sizes are larger than 5M for (TE, TR, EN, HE, ZH), but were limited to 5M tokens in order to ensure comparable evaluation settings for data scarcity simulation experiments across different languages. FGET, NER, and MTAG rows show the number of instances for the three evaluation tasks (see \u00a73): number of entity mentions for FGET, number of sentences for NER and MTAG. In MTAG, we omit languages for which UDv2.3 provides only a test set, but no training set. The BERT row shows the languages supported by multilingual BERT. Languages are identified by their ISO 639-1 code." |
| }, |
| "TABREF2": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Component Option</td><td>Label</td></tr><tr><td>Segmentation Morfessor</td><td>morf</td></tr><tr><td>BPE</td><td>bpeX</td></tr><tr><td>char n-gram</td><td>charn</td></tr><tr><td>Word token exclusion</td><td>w-</td></tr><tr><td>inclusion</td><td>w+</td></tr><tr><td>Position embedding exclusion</td><td>p-</td></tr><tr><td>additive</td><td>p+</td></tr><tr><td>Composition function addition</td><td>add</td></tr></table>", |
| "text": "have empirically verified that composition by simple addition, among other more complex composition functions, is a robust choice. Therefore, we use addition in all our experiments.Similar toBojanowski et al. (2017); Zhu et al. (2019), we adopt skip-gram with negative sampling" |
| }, |
| "TABREF4": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td/><td/><td colspan=\"2\">Agglutinative</td><td/><td/><td/><td>Fusional</td><td/><td/><td colspan=\"2\">Intro Isolat</td></tr><tr><td>BM</td><td>BXR</td><td>MYV</td><td>TE</td><td>ZU</td><td>FO</td><td>GA</td><td>GOT</td><td>MT</td><td>RUE</td><td>AM</td><td>YO</td></tr><tr><td>bert -</td><td>-</td><td>-</td><td colspan=\"2\">49.20 -</td><td>-</td><td colspan=\"2\">47.09 -</td><td>-</td><td>-</td><td>-</td><td>81.76</td></tr><tr><td>bert -</td><td>-</td><td>-</td><td colspan=\"2\">82.31 -</td><td>-</td><td colspan=\"2\">88.45 -</td><td>-</td><td>-</td><td>-</td><td>95.53</td></tr></table>", |
| "text": "morf 52.43 52.47 79.11 57.79 53.00 54.43 50.77 29.90 49.48 50.38 41.82 83.43 charn 56.09 57.33 81.69 58.83 56.34 58.44 52.62 34.02 54.46 58.59 45.65 84.85 bpe1e3 53.61 51.30 81.13 58.73 55.41 56.04 50.74 31.55 52.79 55.57 47.99 85.22 bpe1e4 54.20 53.81 81.93 59.24 55.67 56.67 51.47 26.39 52.15 54.81 47.05 84.42 bpe1e5 -53.80 80.00 58.13 -56.31 51.52 -51.52 52.52 44.74 83.39 ft 51.91 57.96 81.05 57.79 52.62 53.74 49.67 31.96 53.95 53.64 44.80 83.71 w2v 52.28 42.19 76.86 56.99 52.95 53.07 49.07 24.53 46.61 47.36 36.81 82.56 morf 73.29 76.58 83.40 77.01 65.22 84.29 86.94 59.49 74.37 81.87 66.67 90.01 charn 83.02 81.59 93.22 88.23 74.47 91.08 88.95 84.99 83.56 88.70 72.92 94.68 bpe1e3 77.22 79.33 89.00 85.82 71.91 89.73 89.18 81.03 81.63 85.30 70.84 92.35 bpe1e4 76.43 79.73 89.00 85.44 65.22 89.25 88.48 70.59 80.26 86.39 64.07 92.47 bpe1e5 -80.65 89.36 84.02 -88.66 89.48 -81.64 86.12 68.95 93.07 ft 73.29 79.81 88.57 86.88 58.16 89.48 89.18 58.16 81.64 83.54 68.29 92.58 w2v 69.57 79.66 87.50 82.97 62.37 87.81 87.99 58.56 79.43 84.21 61.37 89.57" |
| }, |
| "TABREF5": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "text": "Test accuracy for FGET and test F1 score NER for the 12 low-resource test languages. The results are obtained by training on the full WE data (except for BERT) and the full task data of the corresponding languages." |
| }, |
| "TABREF7": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "text": "Test accuracy for MTAG for low-resource languages from UD where train/dev/test sets are available." |
| } |
| } |
| } |
| } |