| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:42:58.163280Z" |
| }, |
| "title": "The LMU Munich System for the WMT 2020 Unsupervised Machine Translation Shared Task", |
| "authors": [ |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Chronopoulou", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "LMU Munich", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Stojanovski", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "LMU Munich", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "stojanovski@cis.lmu.de" |
| }, |
| { |
| "first": "Viktor", |
| "middle": [], |
| "last": "Hangya", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "LMU Munich", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "hangyav@cis.lmu.de" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Fraser", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "LMU Munich", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "fraser@cis.lmu.de" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes the submission of LMU Munich to the WMT 2020 unsupervised shared task, in two language directions, German\u2194Upper Sorbian. Our core unsupervised neural machine translation (UNMT) system follows the strategy of Chronopoulou et al. (2020), using a monolingual pretrained language generation model (on German) and finetuning it on both German and Upper Sorbian, before initializing a UNMT model, which is trained with online backtranslation. Pseudoparallel data obtained from an unsupervised statistical machine translation (USMT) system is used to fine-tune the UNMT model. We also apply BPE-Dropout to the low-resource (Upper Sorbian) data to obtain a more robust system. We additionally experiment with residual adapters and find them useful in the Upper Sorbian\u2192German direction. We explore sampling during backtranslation and curriculum learning to use SMT translations in a more principled way. Finally, we ensemble our bestperforming systems and reach a BLEU score of 32.4 on German\u2192Upper Sorbian and 35.2 on Upper Sorbian\u2192German.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes the submission of LMU Munich to the WMT 2020 unsupervised shared task, in two language directions, German\u2194Upper Sorbian. Our core unsupervised neural machine translation (UNMT) system follows the strategy of Chronopoulou et al. (2020), using a monolingual pretrained language generation model (on German) and finetuning it on both German and Upper Sorbian, before initializing a UNMT model, which is trained with online backtranslation. Pseudoparallel data obtained from an unsupervised statistical machine translation (USMT) system is used to fine-tune the UNMT model. We also apply BPE-Dropout to the low-resource (Upper Sorbian) data to obtain a more robust system. We additionally experiment with residual adapters and find them useful in the Upper Sorbian\u2192German direction. We explore sampling during backtranslation and curriculum learning to use SMT translations in a more principled way. Finally, we ensemble our bestperforming systems and reach a BLEU score of 32.4 on German\u2192Upper Sorbian and 35.2 on Upper Sorbian\u2192German.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Neural machine translation achieves remarkable results (Bahdanau et al., 2015; Vaswani et al., 2017) when large parallel training corpora are available. However, such corpora are only available for a limited number of languages. UNMT addresses this issue by using monolingual data only (Artetxe et al., 2018c; Lample et al., 2018) . The performance of UNMT models is further improved using transfer learning from a pretrained cross-lingual model (Lample and Conneau, 2019; Song et al., 2019) . However, pretraining also demands large monolingual corpora for both languages. Without abundant data, UNMT methods are often ineffective (Guzm\u00e1n et al., 2019) . Therefore, effectively trans-lating between a high-resource and a low-resource language, in terms of monolingual data, which is the target of this year's unsupervised shared task, is challenging.", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 78, |
| "text": "(Bahdanau et al., 2015;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 79, |
| "end": 100, |
| "text": "Vaswani et al., 2017)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 286, |
| "end": 309, |
| "text": "(Artetxe et al., 2018c;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 310, |
| "end": 330, |
| "text": "Lample et al., 2018)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 446, |
| "end": 472, |
| "text": "(Lample and Conneau, 2019;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 473, |
| "end": 491, |
| "text": "Song et al., 2019)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 632, |
| "end": 653, |
| "text": "(Guzm\u00e1n et al., 2019)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We participate in the WMT 2020 unsupervised machine translation shared task. The task includes two directions: German\u2192Upper Sorbian (De\u2192Hsb) and Upper Sorbian\u2192German (Hsb\u2192De). Our systems are constrained, using only the provided Hsb monolingual data and De NewsCrawl monolingual data released for WMT. We pretrain a monolingual encoder-decoder model on a language generation task with the Masked Sequence to Sequence model (MASS) (Song et al., 2019) and fine-tune it on both languages of interest, following Chronopoulou et al. (2020) . We then train it on UNMT, using online backtranslation. We use our USMT system to backtranslate monolingual data in both languages. This pseudo-parallel corpus serves to fine-tune our UNMT model. Iterative offline backtranslation is later leveraged, yielding a performance boost. We use BPE-Dropout (Provilkov et al., 2020) as a data augmentation technique, sampling instead of greedy decoding in online backtranslation, and curriculum learning to best include the SMT pseudo-parallel data. We also use residual adapters (Houlsby et al., 2019) to translate to the low-resource language (Hsb). Results Summary. The ensemble of our bestperforming systems yields the best performance in terms of BLEU 1 among the participants of the unsupervised machine translation shared task. We release the code and our best models 2 in order to facilitate reproduction of our work and experimentation in this field. We note that we have built upon Language generation pretraining Figure 1 : Illustration of our system. We denote with green the systems that were ensembled for the De\u2192Hsb direction and with maroon the systems that were ensembled for the Hsb\u2192De direction. Right arrows indicate transfer of weights. The numbers in gray correspond to the rows of Table 1 . Online BT refers to the backtranslation of sentences with the actual model and updating it with the generated pseudo-parallel data. Pseudo-S M T refers to data obtained by backtranslating using the USMT baseline system while pseudo-NMT to our translations using system 5. The components of our approach are explained in Section 2.", |
| "cite_spans": [ |
| { |
| "start": 430, |
| "end": 449, |
| "text": "(Song et al., 2019)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 508, |
| "end": 534, |
| "text": "Chronopoulou et al. (2020)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 836, |
| "end": 860, |
| "text": "(Provilkov et al., 2020)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1058, |
| "end": 1080, |
| "text": "(Houlsby et al., 2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1502, |
| "end": 1510, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1782, |
| "end": 1789, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "the MASS codebase 3 for our experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Figure 1 presents all the different components of our system and how they are connected to each other. We train both an unsupervised SMT (#1) and NMT (#2) model. The UNMT model is based on a pretrained MASS model (#0), which is monolingual (De). The model is later fine-tuned on both Hsb and De. We additionally explore fine-tuning only on Hsb using adapters. These models are used to initialize an NMT model (#2, #4) which is trained with online backtranslation. We additionally experiment with sampling (#3) during backtranslation. The USMT model is used to backtranslate Hsb and De data. This synthetic bi-text is used to fine-tune the baseline UNMT model (#5). We use the synthetic bi-text also to fine-tune directly the adapteraugmented MASS model, while employing online backtranslation and sampling (#8). We experiment with curriculum learning (#6) to estimate the optimal way to feed the model this pseudo-parallel data. We also use our UNMT model to generate backtranslations and fine-tune existing models (#7). Further USMT-backtranslated data is used in #9. Finally, some models are fine-tuned with monolingual data which is oversampled and segmented 3 https://github.com/microsoft/MASS with BPE-Dropout (#10, #11). The details of these components are outlined in the following.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "First we describe the USMT system which we use to generate pseudo-parallel data to fine-tune our NMT system. We use monoses (Artetxe et al., 2018b) , which builds unsupervised bilingual word embeddings (BWEs) and integrates them to Moses (Koehn et al., 2006) , but apply some modifications to it. As a first step, we build unsupervised BWEs with fastText (Bojanowski et al., 2017) and VecMap (Artetxe et al., 2018a) containing representations of 1-, 2and 3-grams. Since the size of the available monolingual Hsb data is low, mapping monolingual embeddings to BWEs without any bilingual signal fails, i.e., we find no meaningful translations by manually investigating the most similar crosslingual pairs of a few words. Instead, we rely on identical words occurring in both De and Hsb corpora as the initial seed dictionary. The BWEs are then converted to phrase-tables using cosine similarity of words and a language model is trained on the available monolingual data. The shared task organizers released a validation set which we use to tune the parameters of the system with MERT, instead of running unsupervised tuning as described in Artetxe et al. (2018b) . Finally, we run 4 itera-tive refinement steps to further improve the system. Other than the above, all steps and parameters are unchanged.", |
| "cite_spans": [ |
| { |
| "start": 124, |
| "end": 147, |
| "text": "(Artetxe et al., 2018b)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 238, |
| "end": 258, |
| "text": "(Koehn et al., 2006)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 355, |
| "end": 380, |
| "text": "(Bojanowski et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1138, |
| "end": 1160, |
| "text": "Artetxe et al. (2018b)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unsupervised SMT", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We use this system in inference mode to backtranslate 7M De and 750K Hsb sentences. We refer to this pseudo-parallel dataset as 7.7M SMT pseudo-parallel. We also backtranslate 10M more De sentences. This dataset is later used to fine-tune one of our systems. We refer to it as 10M Hsb-De SMT pseudo-parallel.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unsupervised SMT", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We initialize our UNMT systems with an encoderdecoder Transformer (Vaswani et al., 2017) , which is pretrained using the MASS (Song et al., 2019) objective. The model is pretrained by trying to reconstruct a sentence fragment given the remaining part of the sentence. The encoder takes a randomly masked fragment as input, while the decoder tries to predict the masked fragment. MASS is inspired by BERT (Devlin et al., 2019) , but is more suitable for machine translation, as it pretrains the encoder-decoder and the attention mechanism, whereas BERT is an encoder Transformer. In order to pretrain the model, instead of training MASS on both De and Hsb, we initially train it on De. After this, we fine-tune it on both De and Hsb, following RE-LM (Chronopoulou et al., 2020) . The intuition behind this is that, if we simultaneously train a cross-lingual model on unbalanced data, where X is much larger than Y , the model starts to overfit the low-resource side Y before being trained on all the high-resource language data (X). This results in poor translations. We refer to our pretrained model as FINE-TUNED MASS.", |
| "cite_spans": [ |
| { |
| "start": 66, |
| "end": 88, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 126, |
| "end": 145, |
| "text": "(Song et al., 2019)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 404, |
| "end": 425, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 749, |
| "end": 776, |
| "text": "(Chronopoulou et al., 2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MASS", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "To fine-tune the pretrained De MASS model on Hsb, we need to overcome the following issue: the pretrained model uses BPE segmentation and vocabulary based only on De. To this end, we again follow RE-LM. We denote these BPE tokens as BPE De and the resulting vocabulary as V De . We aim to fine-tune the monolingual MASS model to Hsb. Splitting Hsb with BPE De would result in heavy segmentation of Hsb words. To prevent this from happening, we learn BPEs on the joint De and Hsb corpus (BPE joint ). We then use BPE joint tokens to split the Hsb data, resulting in a vocabulary V Hsb . This method increases the number of shared tokens and enables cross-lingual transfer of the pretrained model. The final vocabulary is the union of the V De and V Hsb vocabularies. We extend the input and output embedding layer to account for the new vocabulary items. The new parameters are then learned during fine-tuning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Vocabulary Extension for NMT", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "Besides initializing our UNMT systems with FINE-TUNED MASS, we also experiment with pretraining MASS on De and fine-tuning only on Hsb. During fine-tuning, we freeze the encoder and decoder Transformer layers and add adapters (Houlsby et al., 2019) to each of the Transformer layers. Adapters can prevent catastrophic forgetting (Goodfellow et al., 2013) and show promising results in various tasks (Bapna and Firat, 2019; Artetxe et al., 2020) . We fine-tune only the output layer, the embeddings and the decoder's attention to the encoder as well as the lightweight adapter layers.", |
| "cite_spans": [ |
| { |
| "start": 226, |
| "end": 248, |
| "text": "(Houlsby et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 399, |
| "end": 422, |
| "text": "(Bapna and Firat, 2019;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 423, |
| "end": 444, |
| "text": "Artetxe et al., 2020)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adapters", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We investigate adapters as fine-tuning in this way is considerably more computationally efficient. We also experimented with freezing the decoder's attention to the encoder as well as adding an adapter on top of it, but these architecture designs are worse in terms of perplexity during MASS fine-tuning as well as BLEU scores during UNMT.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adapters", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We use the fine-tuned model to initialize an encoder-decoder Transformer, augmented with adapters. The adapter-augmented model is then trained in an unsupervised way, using online backtranslation. All layers are trainable during unsupervised NMT training. We refer to this model as FINE-TUNED MASS + ADAPTERS.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adapters", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We initialize our UNMT models with FINE-TUNED MASS. Following Song et al. 2019, we train the systems in an unsupervised manner, using online backtranslation (Sennrich et al., 2016a) of the monolingual Hsb and De data, that were also used for pretraining. As proposed in Song et al. 2019, we do not use denoising auto-encoding (Vincent et al., 2008) . We use online backtranslation to generate pseudo bilingual data for training. We refer to the resulting model as UNMT BASELINE.", |
| "cite_spans": [ |
| { |
| "start": 157, |
| "end": 181, |
| "text": "(Sennrich et al., 2016a)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 326, |
| "end": 348, |
| "text": "(Vincent et al., 2008)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unsupervised NMT (online backtranslation)", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "We experiment with sampling instead of greedy decoding during online backtranslation. Edunov et al. (2018) show that sampling is beneficial for backtranslation compared to greedy decoding or beam search for systems trained on larger amounts of parallel data. Although we do not use any parallel data, we assumed that our initial UNMT baseline is of reasonable quality and that sampling would be beneficial. However, in order to provide a balance, we randomly use either greedy decoding or sampling during training. The frequency with which sampling is used is a hyperparameter which we set to 0.5. Sampling temperature is set to 0.95.", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 106, |
| "text": "Edunov et al. (2018)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sampling", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "Considering the high improvements achieved by including SMT backtranslated data, we conduct experiments to determine a more meaningful way to feed the data to the model using curriculum learning (Kocmi and Bojar, 2017; Platanios et al., 2019; Zhang et al., 2019) . We learn the curriculum using Bayesian Optimization (BO) for which we use an open source implementation 4 . Similar work has been proposed for transfer learning (Ruder and Plank, 2017) and NMT (Wang et al., 2020) . As we already have a reasonably trained NMT model, we use it to compute instance-level features for learning the curriculum. Each sentence pair from the SMT backtranslated data is represented with two features: the model scores for this pair in the original (backtranslation \u2192 monolingual sentence) and reverse direction (monolingual \u2192 backtranslation). The weights that determine the importance of these features are learned separately for De\u2192Hsb and Hsb\u2192De, so that we have 4 features in total. BO runs for 30 trials. The feature weights are constrained in the range [\u22121, 1]. Each trial runs 5.4K NMT updates. The curriculum optimizes the sum of Hsb\u2192De and De\u2192Hsb validation perplexity. For the optimization trials, we only use the SMT backtranslated data as pseudo-parallel data and do not use online backtranslation. Finally, based on the feature weights and the features for each sentence, we sort the pseudo-parallel data and fine-tune the UNMT BASELINE with SMT backtranslations and online backtranslation. It would be interesting to study if a similar approach can be used to estimate a more optimal loading of monolingual data during MASS pretraining and UNMT.", |
| "cite_spans": [ |
| { |
| "start": 195, |
| "end": 218, |
| "text": "(Kocmi and Bojar, 2017;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 219, |
| "end": 242, |
| "text": "Platanios et al., 2019;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 243, |
| "end": 262, |
| "text": "Zhang et al., 2019)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 426, |
| "end": 449, |
| "text": "(Ruder and Plank, 2017)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 458, |
| "end": 477, |
| "text": "(Wang et al., 2020)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Curriculum learning", |
| "sec_num": "2.6" |
| }, |
| { |
| "text": "We also experiment with creating synthetic training data using offline backtranslation with one of our UNMT systems (#5 in Table 1 ). We translate 750K De sentences to Hsb and 750K Hsb sen-4 https://ax.dev/ tences to De. The resulting pseudo-parallel system is denoted as 750K NMT pseudo-parallel corpus and is used to fine-tune the same system.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 123, |
| "end": 130, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Offline Iterative Backtranslation", |
| "sec_num": "2.7" |
| }, |
| { |
| "text": "BPE segmentation is useful in machine translation, as it efficiently addresses the open vocabulary problem. This approach keeps the most frequent words intact and splits the rare ones into multiple tokens. It builds a vocabulary of subwords and a merge table, specifying which subwords have to be merged and the priority of the merges. BPE segmentation always splits a word deterministically. Introducing stochasticity to the algorithm (Provilkov et al., 2020) , by simply removing a merge from the merges with a pre-defined probability p, results in significant BLEU improvements for various languages in low-and medium-resource datasets.", |
| "cite_spans": [ |
| { |
| "start": 436, |
| "end": 460, |
| "text": "(Provilkov et al., 2020)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BPE-Dropout", |
| "sec_num": "2.8" |
| }, |
| { |
| "text": "We use BPE-Dropout in the following way: we oversample the Hsb monolingual data by a factor of 10 and apply BPE-Dropout. In that way, we get different segmentations of the same sentences and feed this data to the model. We also oversample the 750K SMT pseudo-parallel corpus in the same manner, but only apply BPE-Dropout to the Hsb side. These monolingual and pseudoparallel oversampled datasets are used to fine-tune our models. These systems perform better than our other single systems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BPE-Dropout", |
| "sec_num": "2.8" |
| }, |
| { |
| "text": "For the final models, we perform ensemble decoding with the best training models obtained in our experiments. We evaluate several combinations of model ensembles. Based on BLEU scores on the test set provided during development, we decide on two separate ensembles for De\u2192Hsb and Hsb\u2192De for the final submission.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ensembling", |
| "sec_num": "2.9" |
| }, |
| { |
| "text": "In line with the rules of the WMT 2020 unsupervised shared task 5 , we used 327M sentences from WMT monolingual News Crawl 6 dataset for German, collected over the period of 2007 to 2019. We also used the Upper Sorbian side of the provided parallel data as well as all of the monolingual data, a total amount of 756K sentences, provided by the Table 1 : BLEU scores of UMT for De-Hsb and Hsb-De systems. The systems with the underlined results were ensembled and used in our primary submissions. #12 is our primary system submitted to the organizers in the De\u2192Hsb direction, while #13 is our primary system submitted in the Hsb\u2192De direction. 6* was trained after the shared task and is not used for the final submission.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 344, |
| "end": 351, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Pre-processing", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "organizers. We used the provided parallel data for validation/testing (2K/2K sentences). We normalized punctuation, tokenized and true-cased the data using standard scripts from the Moses toolkit (Koehn et al., 2006) . We note that we tokenized Hsb data using Czech as the language of tokenization, since these two languages are very closely related and there are no tokenization rules for Hsb in Moses. We used BPE (Sennrich et al., 2016b) segmentation for our neural system. Specifically, we learned 32K codes and computed the vocabulary using the De data. We then also learned the same amount of BPEs on the joint corpus (De, Hsb) and computed the joint vocabulary. We extended the initial vocabulary, adding to it unseen items. We used this augmented vocabulary to fine-tune the MASS model and run all the UNMT training experiments.", |
| "cite_spans": [ |
| { |
| "start": 196, |
| "end": 216, |
| "text": "(Koehn et al., 2006)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 416, |
| "end": 440, |
| "text": "(Sennrich et al., 2016b)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Pre-processing", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We fixed the quotes to be the same as in the source sentences (German-style). We also applied a recaser using Moses (Koehn et al., 2006) to convert the translations to mixed case.", |
| "cite_spans": [ |
| { |
| "start": 116, |
| "end": 136, |
| "text": "(Koehn et al., 2006)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Post-processing", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Unsupervised SMT. As mentioned before, we used fastText (Bojanowski et al., 2017) to build 300 dimensional embeddings on the available monolingual data. We build BWEs with VecMap (Artetxe et al., 2018a) using identical words as the seed dictionary and restricting the vocabulary to the most frequent 200K, 400K and 400K 1-, 2and 3-grams respectively. We used monoses (Artetxe et al., 2018b) as the USMT pipeline but used the available validation data for parameter tuning and ran 4 iterative refinement steps. MASS. We use a Transformer, which consists of 6-layer encoder and 6-layer decoder with 1024 embedding/hidden size, 4096 feed-forward network size and 8 attention heads. We pretrain MASS on De monolingual data, using Adam (Kingma and Ba, 2015) optimizer with inverse square root learning rate scheduling and a learning rate of 10 \u22124 . We used a per-GPU batch size of 32. We trained the model for approximately 2 weeks on 8 NVIDIA GTX 1080 Ti 11 GB GPUs. The rest of the hyperparameters follows the original MASS paper. We fine-tune MASS on both De and Hsb using the same setup, but on 4 GPUs of the same type. Fine-tuning was performed for 2 days. Unsupervised NMT. For unsupervised NMT, we further train the fine-tuned MASS using online backtranslation. We use 4 GPUs to train each one of our UNMT models. We report BLEU using SacreBLEU (Post, 2018) 7 on the provided test set. Unsupervised NMT + Pseudo-parallel MT. We train our UNMT systems using a pseudo-parallel supervised translation loss, in addition to the online backtranslation objective. We found out that aug-menting UNMT systems with pseudo-parallel data obtained by USMT leads to major improvements in translation quality, as previous work has showed (Artetxe et al., 2018b; Stojanovski et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 56, |
| "end": 81, |
| "text": "(Bojanowski et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 367, |
| "end": 390, |
| "text": "(Artetxe et al., 2018b)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1347, |
| "end": 1359, |
| "text": "(Post, 2018)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1725, |
| "end": 1748, |
| "text": "(Artetxe et al., 2018b;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1749, |
| "end": 1774, |
| "text": "Stojanovski et al., 2019)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The results of our systems on the test set provided during development are presented in Table 1 . Our USMT model (#1) performs competitively, but is largely outperformed by the UNMT baseline (#2). These results are interesting considering that both systems are trained using small amounts of monolingual Hsb data. We believe that the performance of the UNMT model is largely due to the MASS fine-tuning scheme which allowed us to obtain a strong pretrained model for both languages. We also observe (#3) that mixing greedy decoding and sampling during backtranslation is beneficial compared to always using greedy decoding (#2), especially for De \u2192Hsb which improved by 1.0 BLEU. However, it is likely that sampling is useful only if the model is of reasonable quality. We note that the adapter-augmented model (#4) is worse than the UNMT baseline.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 88, |
| "end": 95, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "After these initial experiments, we use the USMT model (#1) to backtranslate all Hsb monolingual data and 7M De sentences. This pseudo-parallel data is leveraged to fine-tune our UNMT models alongside online backtranslation. This approach, denoted as model #5, improves the UNMT baseline (#3) by more than 5.5 BLEU for De\u2192Hsb and 4.5 BLEU for Hsb\u2192De. The curriculum learning approach (#6) yields a small improvement of 0.6 BLEU for Hsb\u2192De. Unfortunately, the curriculum learning model ran without the use of sampling. We later train the model with sampling (#6*) and obtain slight improvements in both directions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Using NMT backtranslations in an offline manner (#7) provides for a large improvement in the Hsb\u2192De direction, obtaining 33.2 BLEU. Further training our high scoring model #7 on USMT backtranslations, depicted as model #9, degrades performance on Hsb\u2192De. This might indicate that USMT backtranslations alone are not very important for high performance, but simply adding any kind of pseudo-parallel data during training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The adapter-augmented model with USMT backtranslations (#8) manages to close the gap to the baseline model. Comparing #5 and #8, we can see that the model with adapters is worse by 0.9 BLEU on De\u2192Hsb, but better by 0.4 on Hsb\u2192De. Due to time constraints, we train #4 and #8 in parallel and #8 is not fine-tuned from #4. Overall, adapters are a promising research direction as they lead to faster MASS fine-tuning and comparable performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We observe considerable improvements using BPE-Dropout. As noted before, we oversample the parallel and Hsb monolingual data and apply BPE-Dropout only on Hsb. We use this data to fine-tune some of our already trained models, specifically #5 and #7 which results in models #10 and #11, respectively. This approach improves the Hsb\u2192De direction by up to 1.5 BLEU and up to 1.0 BLEU for De\u2192Hsb. System #11 proved to be our best single system in both translation directions. We hypothesize that using BPE-Dropout while simultaneously oversampling the data provides for a data augmentation effect. In future work, it would be interesting to decouple these two steps and measure their effect separately.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Ensembling further boosts performance. Ensemble #12 is used for De\u2192Hsb and #13 for Hsb\u2192De. We note that while computing ensemble BLEU scores during development, we did not fix the issue with German-style quotes. This resulted in ensemble #13 obtaining better scores on Hsb\u2192De. We later fix the quotes issue and find out that ensemble #12 is better on both translation directions and is the best system overall.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In this paper, we present the LMU Munich system for the WMT 2020 unsupervised shared task for translation between German and Upper Sorbian. Our system is a combination of an SMT and an NMT model trained in an unsupervised way. The UNMT model is trained by fine-tuning a MASS model, according to the recently proposed RE-LM approach. The experiments show that the MASS fine-tuning technique is efficient even if little monolingual data is available for one language and results in a strong UNMT model. We also show that using pseudoparallel data from USMT and UNMT backtranslations improves performance considerably. Furthermore, we show that oversampling the low-resource Upper Sorbian and applying BPE-Dropout, which can effectively be seen as data augmentation, results in further improvements. Adapters in MASS fine-tuning provided for a balance between performance and computational efficiency. Finally, smaller but noticeable gains are obtained from us-ing curriculum learning and sampling during decoding in backtranslation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "http://matrix.statmt.org/matrix/ systems_list/1920 2 https://github.com/alexandra-chron/ umt-lmu-wmt2020", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.statmt.org/wmt20/unsup_ and_very_low_res/ 6 http://data.statmt.org/news-crawl/de/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by the European Research Council (ERC) under the European Union's Horizon research and innovation programme (grant agreement No. 640550) and by the German Research Foundation (DFG; grant FR 2829/4-1). We would like to thank Jind\u0159ich Libovick\u00fd for fruitful discussions regarding the use of BPE-Dropout as a data augmentation technique.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "A robust self-learning method for fully unsupervised cross-lingual mappings of word embeddings", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "789--798", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2018a. A robust self-learning method for fully un- supervised cross-lingual mappings of word embed- dings. In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 789-798.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Unsupervised statistical machine translation", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "3632--3642", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1399" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2018b. Unsupervised statistical machine transla- tion. In Proceedings of the Conference on Empiri- cal Methods in Natural Language Processing, pages 3632-3642.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Unsupervised neural machine translation", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2018c. Unsupervised neural ma- chine translation. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "On the cross-lingual transferability of monolingual representations", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| }, |
| { |
| "first": "Dani", |
| "middle": [], |
| "last": "Yogatama", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Sebastian Ruder, and Dani Yogatama. 2020. On the cross-lingual transferability of mono- lingual representations. In Proceedings of the An- nual Meeting of the Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In International Con- ference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Simple, scalable adaptation for neural machine translation", |
| "authors": [ |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Bapna", |
| "suffix": "" |
| }, |
| { |
| "first": "Orhan", |
| "middle": [], |
| "last": "Firat", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing and the International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1538--1548", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankur Bapna and Orhan Firat. 2019. Simple, scalable adaptation for neural machine translation. In Pro- ceedings of the Conference on Empirical Methods in Natural Language Processing and the International Joint Conference on Natural Language Processing, pages 1538-1548.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Enriching Word Vectors with Subword Information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "135--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching Word Vectors with Subword Information. Transactions of the Associa- tion for Computational Linguistics, pages 135-146.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Reusing a Pretrained Language Model on Languages with Limited corpora for Unsupervised NMT", |
| "authors": [ |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Chronopoulou", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Stojanovski", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Fraser", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2009.07610" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexandra Chronopoulou, Dario Stojanovski, and Alexander Fraser. 2020. Reusing a Pretrained Language Model on Languages with Limited cor- pora for Unsupervised NMT. arXiv preprint arXiv:2009.07610.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the Conference of the North American Chapter of the Association for Com- putational Linguistics: Human Language Technolo- gies, pages 4171-4186.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Understanding back-translation at scale", |
| "authors": [ |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "489--500", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1045" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sergey Edunov, Myle Ott, Michael Auli, and David Grangier. 2018. Understanding back-translation at scale. In Proceedings of the Conference on Empiri- cal Methods in Natural Language Processing, pages 489-500.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "An empirical investigation of catastrophic forgetting in gradient-based neural networks", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Ian", |
| "suffix": "" |
| }, |
| { |
| "first": "Mehdi", |
| "middle": [], |
| "last": "Goodfellow", |
| "suffix": "" |
| }, |
| { |
| "first": "Da", |
| "middle": [], |
| "last": "Mirza", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1312.6211" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ian J Goodfellow, Mehdi Mirza, Da Xiao, Aaron Courville, and Yoshua Bengio. 2013. An em- pirical investigation of catastrophic forgetting in gradient-based neural networks. arXiv preprint arXiv:1312.6211.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "The FLORES evaluation datasets for low-resource machine translation: Nepali-English and Sinhala-English", |
| "authors": [ |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng-Jen", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Juan", |
| "middle": [], |
| "last": "Pino", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing and the International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "6100--6113", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francisco Guzm\u00e1n, Peng-Jen Chen, Myle Ott, Juan Pino, Guillaume Lample, Philipp Koehn, Vishrav Chaudhary, and Marc'Aurelio Ranzato. 2019. The FLORES evaluation datasets for low-resource ma- chine translation: Nepali-English and Sinhala- English. In Proceedings of the Conference on Em- pirical Methods in Natural Language Processing and the International Joint Conference on Natural Language Processing, pages 6100-6113.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Parameter-efficient transfer learning for NLP", |
| "authors": [ |
| { |
| "first": "Neil", |
| "middle": [], |
| "last": "Houlsby", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrei", |
| "middle": [], |
| "last": "Giurgiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Stanislaw", |
| "middle": [], |
| "last": "Jastrzebski", |
| "suffix": "" |
| }, |
| { |
| "first": "Bruna", |
| "middle": [], |
| "last": "Morrone", |
| "suffix": "" |
| }, |
| { |
| "first": "Quentin", |
| "middle": [], |
| "last": "De Laroussilhe", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Gesmundo", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Attariyan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sylvain", |
| "middle": [], |
| "last": "Gelly", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin de Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. 2019. Parameter-efficient transfer learning for NLP. In Proceedings of the International Conference on Machine Learning.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederick", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederick P Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Curriculum learning and minibatch bucketing in neural machine translation", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kocmi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "379--386", |
| "other_ids": { |
| "DOI": [ |
| "10.26615/978-954-452-049-6_050" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kocmi and Ond\u0159ej Bojar. 2017. Curriculum learn- ing and minibatch bucketing in neural machine trans- lation. In Proceedings of the International Confer- ence Recent Advances in Natural Language Process- ing, pages 379-386.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Open source toolkit for statistical machine translation: Factored translation models and confusion network decoding", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| }, |
| { |
| "first": "Wade", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicola", |
| "middle": [], |
| "last": "Bertoldi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ondrej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Brooke", |
| "middle": [], |
| "last": "Cowan", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Zens", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn, Marcello Federico, Wade Shen, Nicola Bertoldi, Ondrej Bojar, Chris Callison-Burch, Brooke Cowan, Chris Dyer, Hieu Hoang, Richard Zens, et al. 2006. Open source toolkit for statisti- cal machine translation: Factored translation models and confusion network decoding. In Final Report of the 2006 JHU Summer Workshop.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Crosslingual language model pretraining", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "7057--7067", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample and Alexis Conneau. 2019. Cross- lingual language model pretraining. In Advances in Neural Information Processing Systems, page 7057-7067.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Phrase-based & neural unsupervised machine translation", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "5039--5049", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample, Myle Ott, Alexis Conneau, Lu- dovic Denoyer, and Marc'Aurelio Ranzato. 2018. Phrase-based & neural unsupervised machine trans- lation. In Proceedings of the Conference on Empiri- cal Methods in Natural Language Processing, pages 5039-5049.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Competence-based curriculum learning for neural machine translation", |
| "authors": [ |
| { |
| "first": "Otilia", |
| "middle": [], |
| "last": "Emmanouil Antonios Platanios", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Stretcu", |
| "suffix": "" |
| }, |
| { |
| "first": "Barnabas", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Poczos", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1162--1172", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1119" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emmanouil Antonios Platanios, Otilia Stretcu, Graham Neubig, Barnabas Poczos, and Tom Mitchell. 2019. Competence-based curriculum learning for neural machine translation. In Proceedings of the Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, pages 1162-1172.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "A call for clarity in reporting BLEU scores", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Conference on Machine Translation: Research Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "186--191", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-6319" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Conference on Ma- chine Translation: Research Papers, pages 186- 191.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "BPE-Dropout: Simple and effective subword regularization", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Provilkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Dmitrii", |
| "middle": [], |
| "last": "Emelianenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Voita", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1882--1892", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Provilkov, Dmitrii Emelianenko, and Elena Voita. 2020. BPE-Dropout: Simple and effective subword regularization. In Proceedings of the Annual Meet- ing of the Association for Computational Linguistics, pages 1882-1892.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Learning to select data for transfer learning with Bayesian optimization", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| }, |
| { |
| "first": "Barbara", |
| "middle": [], |
| "last": "Plank", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "372--382", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1038" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Ruder and Barbara Plank. 2017. Learning to select data for transfer learning with Bayesian op- timization. In Proceedings of the Conference on Empirical Methods in Natural Language Processing, pages 372-382.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Improving neural machine translation models with monolingual data", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "86--96", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016a. Improving neural machine translation mod- els with monolingual data. In Proceedings of the An- nual Meeting of the Association for Computational Linguistics, pages 86-96.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1715--1725", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016b. Neural machine translation of rare words with subword units. In Proceedings of the Annual Meeting of the Association for Computational Lin- guistics, pages 1715-1725.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "MASS: Masked Sequence to Sequence pre-training for language generation", |
| "authors": [ |
| { |
| "first": "Kaitao", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the International Conference on Machine learning", |
| "volume": "", |
| "issue": "", |
| "pages": "5926--5936", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, and Tie- Yan Liu. 2019. MASS: Masked Sequence to Se- quence pre-training for language generation. In Pro- ceedings of the International Conference on Ma- chine learning, pages 5926-5936.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "The LMU Munich unsupervised machine translation system for WMT19", |
| "authors": [ |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Stojanovski", |
| "suffix": "" |
| }, |
| { |
| "first": "Viktor", |
| "middle": [], |
| "last": "Hangya", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Huck", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Fraser", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "393--399", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5344" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dario Stojanovski, Viktor Hangya, Matthias Huck, and Alexander Fraser. 2019. The LMU Munich unsuper- vised machine translation system for WMT19. In Proceedings of the Fourth Conference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 393-399.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, page 5998-6008.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Extracting and composing robust features with denoising autoencoders", |
| "authors": [ |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Hugo", |
| "middle": [], |
| "last": "Larochelle", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierre-Antoine", |
| "middle": [], |
| "last": "Manzagol", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "1096--1103", |
| "other_ids": { |
| "DOI": [ |
| "http://doi.acm.org/10.1145/1390156.1390294" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pascal Vincent, Hugo Larochelle, Yoshua Bengio, and Pierre-Antoine Manzagol. 2008. Extracting and composing robust features with denoising autoen- coders. In Proceedings of the International Confer- ence on Machine Learning, pages 1096-1103.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Learning a multidomain curriculum for neural machine translation", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ye", |
| "middle": [], |
| "last": "Tian", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiquan", |
| "middle": [], |
| "last": "Ngiam", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinfei", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Isaac", |
| "middle": [], |
| "last": "Caswell", |
| "suffix": "" |
| }, |
| { |
| "first": "Zarana", |
| "middle": [], |
| "last": "Parekh", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.689" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Wang, Ye Tian, Jiquan Ngiam, Yinfei Yang, Isaac Caswell, and Zarana Parekh. 2020. Learning a multi- domain curriculum for neural machine translation.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "7711--7723", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "In Proceedings of the Annual Meeting of the Asso- ciation for Computational Linguistics, pages 7711- 7723.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Curriculum learning for domain adaptation in neural machine translation", |
| "authors": [ |
| { |
| "first": "Xuan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Pamela", |
| "middle": [], |
| "last": "Shapiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Gaurav", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Mc-Namee", |
| "suffix": "" |
| }, |
| { |
| "first": "Marine", |
| "middle": [], |
| "last": "Carpuat", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1903--1915", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1189" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xuan Zhang, Pamela Shapiro, Gaurav Kumar, Paul Mc- Namee, Marine Carpuat, and Kevin Duh. 2019. Cur- riculum learning for domain adaptation in neural ma- chine translation. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1903-1915.", |
| "links": null |
| } |
| }, |
| "ref_entries": {} |
| } |
| } |