| { |
| "paper_id": "P19-1022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:26:04.604937Z" |
| }, |
| "title": "Domain Adaptive Inference for Neural Machine Translation", |
| "authors": [ |
| { |
| "first": "Danielle", |
| "middle": [], |
| "last": "Saunders", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Cambridge", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Stahlberg", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Cambridge", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Adri\u00e0", |
| "middle": [], |
| "last": "De Gispert", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "SDL Research", |
| "location": { |
| "settlement": "Cambridge", |
| "country": "UK" |
| } |
| }, |
| "email": "agispert@sdl.com" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Byrne", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Cambridge", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "bbyrne@sdl.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We investigate adaptive ensemble weighting for Neural Machine Translation, addressing the case of improving performance on a new and potentially unknown domain without sacrificing performance on the original domain. We adapt sequentially across two Spanish-English and three English-German tasks, comparing unregularized fine-tuning, L2 and Elastic Weight Consolidation. We then report a novel scheme for adaptive NMT ensemble decoding by extending Bayesian Interpolation with source information, and show strong improvements across test domains without access to the domain label.", |
| "pdf_parse": { |
| "paper_id": "P19-1022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We investigate adaptive ensemble weighting for Neural Machine Translation, addressing the case of improving performance on a new and potentially unknown domain without sacrificing performance on the original domain. We adapt sequentially across two Spanish-English and three English-German tasks, comparing unregularized fine-tuning, L2 and Elastic Weight Consolidation. We then report a novel scheme for adaptive NMT ensemble decoding by extending Bayesian Interpolation with source information, and show strong improvements across test domains without access to the domain label.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Neural Machine Translation (NMT) models are effective when trained on broad domains with large datasets, such as news translation (Bojar et al., 2017) . However, test data may be drawn from a different domain, on which general models can perform poorly (Koehn and Knowles, 2017) . We address the problem of adapting to one or more domains while maintaining good performance across all domains. Crucially, we assume the realistic scenario where the domain is unknown at inference time.", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 150, |
| "text": "(Bojar et al., 2017)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 253, |
| "end": 278, |
| "text": "(Koehn and Knowles, 2017)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "One solution is ensembling models trained on different domains (Freitag and Al-Onaizan, 2016) . This approach has two main drawbacks. Firstly, obtaining models for each domain is challenging. Training from scratch on each new domain is impractical, while continuing training on a new domain can cause catastrophic forgetting of previous tasks (French, 1999) , even in an ensemble (Freitag and Al-Onaizan, 2016) . Secondly, ensemble weighting requires knowledge of the test domain.", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 93, |
| "text": "(Freitag and Al-Onaizan, 2016)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 343, |
| "end": 357, |
| "text": "(French, 1999)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 380, |
| "end": 410, |
| "text": "(Freitag and Al-Onaizan, 2016)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We address the model training problem with regularized fine-tuning, using an L2 regularizer and Elastic Weight Consolidation (EWC) (Kirkpatrick et al., 2017) . We finetune sequentially to translate up to three domains with the same model.", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 157, |
| "text": "(Kirkpatrick et al., 2017)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We then develop an adaptive inference scheme for NMT ensembles by extending Bayesian Interpolation (BI) (Allauzen and Riley, 2011) to sequence-to-sequence models. 1 This lets us calculate ensemble weights adaptively over time without needing the domain label, giving strong improvements over uniform ensembling for baseline and fine-tuned models.", |
| "cite_spans": [ |
| { |
| "start": 104, |
| "end": 130, |
| "text": "(Allauzen and Riley, 2011)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In NMT fine-tuning, a model is first trained on a task A, typically translating a large generaldomain corpus (Luong and Manning, 2015) . The optimized parameters \u03b8 *", |
| "cite_spans": [ |
| { |
| "start": 109, |
| "end": 134, |
| "text": "(Luong and Manning, 2015)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive training", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "A are fine-tuned on task B, a new domain. Without regularization, catastrophic forgetting can occur: performance on task A degrades as parameters adjust to the new objective. A regularized objective is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive training", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "L(\u03b8) = L B (\u03b8) + \u039b j F j (\u03b8 j \u2212 \u03b8 * A,j ) 2 (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive training", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "where L A (\u03b8) and L B (\u03b8) are the likelihood of tasks A and B. We compare three cases:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive training", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "\u2022 No-reg, where \u039b = 0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive training", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "\u2022 L2, where F j = 1 for each parameter index j", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive training", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "\u2022 EWC, where F j = E \u2207 2 L A (\u03b8 j )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive training", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": ", a sample estimate of task A Fisher information. This effectively measures the importance of \u03b8 j to task A.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive training", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "For L2 and EWC we tune \u039b on the validation sets for new and old tasks to balance forgetting against new-domain performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive training", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "We extend the BI formalism to condition on a source sequence, letting us apply it to adaptive NMT ensemble weighting. We consider models p k (y|x) trained on K distinct domains, used for tasks t = 1, . . . , T . In our case a task is decoding from one domain, so T = K. We assume throughout that p(t) = 1 T , i.e. that tasks are equally likely absent any other information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "A standard, fixed-weight ensemble would translate with:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "argmax y p(y|x) = argmax y K k=1 W k p k (y|x) (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "The BI formalism assumes that we have tuned sets of ensemble weights \u03bb k,t for each task. This defines a task-conditional ensemble", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(y|x, t) = K k=1 \u03bb k,t p k (y|x)", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "which can be used as a fixed weight ensemble if the task is known. However if the task t is not known, we wish to translate with:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "argmax y p(y|x) = argmax y T t=1 p(t, y|x) (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "At step i, where h i is history y 1:i\u22121 :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(y i |h i , x) = T t=1 p(t, y i |h i , x) = T t=1 p(t|h i , x) p(y i |h i , t, x) = K k=1 p k (y i |h i , x) T t=1 p(t|h i , x)\u03bb k,t = K k=1 W k,i p k (y i |h i , x)", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "This has the form of an adaptively weighted ensemble where, by comparison with Eq. 2:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "W k,i = T t=1 p(t|h i , x)\u03bb k,t", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "In decoding, at each step i adaptation relies on a recomputed estimate of the task posterior:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(t|h i , x) = p(h i |t, x)p(t|x) T t =1 p(h i |t , x)p(t |x)", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "1.2.1 Static decoder configurations", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "In static decoding (Eq. 2), the weights W k are constant for each source sentence x. BI simplifies to a uniform ensemble when \u03bb k,t = p(t|x) = 1 T . This leads to W k,i = 1 K (see Eq. 6) as a fixed equalweight interpolation of the component models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "Static decoding can also be performed with task posteriors conditioned only on the source sentence, which reflects the assumption that the history can be disregarded and that p(t|h i , x) = p(t|x). In the most straightforward case, we assume that only domain k is useful for task t: \u03bb k,t = \u03b4 k (t) (1 for k = t, 0 otherwise). Model weighting simplifies to a fixed ensemble:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "W k = p(k|x)", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "and decoding proceeds according to Eq. 2. We refer to this as decoding with an informative source (IS). We propose using G t , an collection of n-gram language models trained on source language sentences from tasks t, to estimate p(t|x):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(t|x) = p(x|t)p(t) T t =1 p(x|t )p(t ) = G t (x) T t =1 G t (x)", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "In this way we use source language n-gram language models to estimate p(t = k|x) in Eq. 8 for static decoding with an informative source.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "For adaptive decoding with Bayesian Interpolation, as in Eq. 5, the model weights vary during decoding according to Eq. 6 and Eq. 7. We assume here that p(t|x) = p(t) = 1 T . This corresponds to the approach in Allauzen and Riley (2011) , which considers only language model combination for speech recognition. We refer to this in experiments simply as BI. A refinement is to incorporate Eq. 9 into Eq. 7, which would be Bayesian Interpolation with an informative source (BI+IS).", |
| "cite_spans": [ |
| { |
| "start": 211, |
| "end": 236, |
| "text": "Allauzen and Riley (2011)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoder configurations", |
| "sec_num": "1.2.2" |
| }, |
| { |
| "text": "We now address the choice of \u03bb k,t . A simple but restrictive approach is to take \u03bb k,t = \u03b4 k (t). We refer to this as identity-BI, and it embodies the assumption that only one domain is useful for each task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoder configurations", |
| "sec_num": "1.2.2" |
| }, |
| { |
| "text": "Alternatively, if we have validation data V t for each task t, parameter search can be done to optimize \u03bb k,t for BLEU over V t for each task. This is straightforward but relatively costly. We propose a simpler approach based on the source language n-gram language models from Eq. 9. We assume that each G t is also a language model for its corresponding domain k. With G k,t = x\u2208Vt G k (x), we take:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoder configurations", |
| "sec_num": "1.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03bb k,t = G k,t k G k ,t", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Adaptive decoder configurations", |
| "sec_num": "1.2.2" |
| }, |
| { |
| "text": "\u03bb k,t can be interpreted as the probability that task t contains sentences x drawn from domain k as estimated over the V t . Figure 1 demonstrates this adaptive decoding scheme when weighting a biomedical and a general (news) domain model to produce a biomedical sentence under BI. The model weights W k,i are even until biomedical-specific vocabulary is produced, at which point the in-domain model dominates.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 125, |
| "end": 133, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adaptive decoder configurations", |
| "sec_num": "1.2.2" |
| }, |
| { |
| "text": "We summarize our approaches to decoding in Table 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary", |
| "sec_num": "1.2.3" |
| }, |
| { |
| "text": "Decoder p(t|x) \u03bb k,t Static Uniform 1 T 1 T IS Eq. 9 \u03b4 k (t) Adaptive Identity-BI 1 T \u03b4 k (t) BI 1 T", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary", |
| "sec_num": "1.2.3" |
| }, |
| { |
| "text": "Eq. 10 BI+IS Eq. 9 Eq. 10 ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary", |
| "sec_num": "1.2.3" |
| }, |
| { |
| "text": "Approaches to NMT domain adaptation include training data selection or generation (Sennrich et al., 2016a; Wang et al., 2017; Sajjad et al., 2017) and fine-tuning output distributions (Dakwale and Monz, 2017; . Vilar (2018) regularizes parameters with an importance network, while freeze subsets of the model parameters before finetuning. Both observe forgetting with the adapted model on the general domain data in the realistic scenario where the test data domain is unknown. Barone et al. 2017 ", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 106, |
| "text": "(Sennrich et al., 2016a;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 107, |
| "end": 125, |
| "text": "Wang et al., 2017;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 126, |
| "end": 146, |
| "text": "Sajjad et al., 2017)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 184, |
| "end": 208, |
| "text": "(Dakwale and Monz, 2017;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 211, |
| "end": 223, |
| "text": "Vilar (2018)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "1.3" |
| }, |
| { |
| "text": "We report on Spanish-English (es-en) and English-German (en-de). For es-en we use the Scielo corpus (Neves et al., 2016) , with Health as the general domain, adapting to Biological Sciences ('Bio'). We evaluate on the domain-labeled Health and Bio 2016 test data.", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 120, |
| "text": "(Neves et al., 2016)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The en-de general domain is the WMT18 News task (Bojar et al., 2017) , with all data except ParaCrawl oversampled by 2 . We validate on newstest17 and evaluate on newstest18. We adapt first to the IWSLT 2016 TED task (Cettolo et al., 2016) , and then sequentially to the APE 2017 IT task (Turchi et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 68, |
| "text": "(Bojar et al., 2017)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 217, |
| "end": 239, |
| "text": "(Cettolo et al., 2016)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 288, |
| "end": 309, |
| "text": "(Turchi et al., 2017)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We filter training sentences for minimum three tokens and maximum 120 tokens, and remove sentence pairs with length ratios higher than 4.5:1 or lower than 1:4.5. Table 2 shows filtered training sentence counts. Each language pair uses a 32K-merge source-target BPE vocabulary trained on the general domain (Sennrich et al., 2016b) .", |
| "cite_spans": [ |
| { |
| "start": 306, |
| "end": 330, |
| "text": "(Sennrich et al., 2016b)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 162, |
| "end": 169, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We implement in Tensor2Tensor (Vaswani et al., 2018) and use its base Transformer model (Vaswani et al., 2017) for all NMT models. At inference time we decode with beam size 4 in SGNMT (Stahlberg et al., 2017) and evaluate with case-sensitive detokenized BLEU using Sacre-BLEU (Post, 2018) . For BI, we use 4-gram KENLM models (Heafield, 2011) . We wish to improve performance on new domains without reduced performance on the general domain, to give strong models for adaptive decoding. For es-en, the Health and Bio tasks overlap, but catastrophic forgetting still occurs under noreg (Table 3) . Regularization reduces forgetting and allows further improvements on Bio over noreg fine-tuning. We find EWC outperforms the L2 approach of Barone et al. (2017) in learning the new task and in reduced forgetting.", |
| "cite_spans": [ |
| { |
| "start": 30, |
| "end": 52, |
| "text": "(Vaswani et al., 2018)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 88, |
| "end": 110, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 185, |
| "end": 209, |
| "text": "(Stahlberg et al., 2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 277, |
| "end": 289, |
| "text": "(Post, 2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 327, |
| "end": 343, |
| "text": "(Heafield, 2011)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 586, |
| "end": 595, |
| "text": "(Table 3)", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In the en-de News/TED task (Table 4) , all fine-tuning schemes give similar improvements on TED. However, EWC outperforms no-reg and L2 on News, not only reducing forgetting but giving 0.5 BLEU improvement over the baseline News model.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 27, |
| "end": 36, |
| "text": "(Table 4)", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The IT task is very small: training on IT data alone results in over-fitting, with a 17 BLEU improvement under fine-tuning. However, no-reg fine-tuning rapidly forgets previous tasks. EWC reduces forgetting on two previous tasks while further improving on the target domain.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "2" |
| }, |
| { |
| "text": "At inference time we may not know the test data domain to match with the best adapted model, let alone optimal weights for an ensemble on that domain. Table 5 shows improvements on data without domain labelling using our adaptive decoding schemes with unadapted models trained only on one domain (models 1+2 from Table 3 and 1+2+3 from Table 4 ). We compare with the 'oracle' model trained on each domain, which we can only use if we know the test domain.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 151, |
| "end": 158, |
| "text": "Table 5", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 313, |
| "end": 320, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 336, |
| "end": 343, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adaptive decoding results", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Uniform ensembling under-performs all oracle models except es-en Bio, especially on general domains. Identity-BI strongly improves over uniform ensembling, and BI with \u03bb as in Eq. 10 improves further for all but es-en Bio. BI and IS both individually outperform the oracle for all but IS-News, indicating these schemes do not simply learn to select a single model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding results", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The combined scheme of BI+IS outperforms either BI or IS individually, except in en-de IT. We speculate IT is a distinct enough domain that p(t|x) has little effect on adapted BI weights.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding results", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In Table 6 we apply the best adaptive decoding scheme, BI+IS, to models fine-tuned with EWC. The es-en ensemble consists of models 1+6 from Table 3 and the en-de ensemble models 1+7+10 from Table 4 . As described in Section 2.1 EWC models perform well over multiple domains, so the improvement over uniform ensembling is less striking than for unadapted models. Nevertheless adaptive decoding improves over both uniform ensembling and the oracle model in most cases.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 6", |
| "ref_id": "TABREF7" |
| }, |
| { |
| "start": 140, |
| "end": 198, |
| "text": "Table 3 and the en-de ensemble models 1+7+10 from Table 4", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adaptive decoding results", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "With adaptive decoding, we do not need to assume whether a uniform ensemble or a single model might perform better for some potentially unknown domain. We highlight this in Table 7 by reporting results with the ensembles of Tables 5 and 6 over concatenated test sets, to mimic the realistic scenario of unlabelled test data. We additionally include the uniform no-reg ensembling approach given in Freitag and Al-Onaizan (2016) using models 1+4 from Table 3 and 1+5+8 from Table 4 .", |
| "cite_spans": [ |
| { |
| "start": 398, |
| "end": 427, |
| "text": "Freitag and Al-Onaizan (2016)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 173, |
| "end": 180, |
| "text": "Table 7", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 224, |
| "end": 239, |
| "text": "Tables 5 and 6", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 450, |
| "end": 481, |
| "text": "Table 3 and 1+5+8 from Table 4", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adaptive decoding results", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Uniform no-reg ensembling outperforms unadapted uniform ensembling, since fine-tuning gives better in-domain performance. EWC achieves similar or better in-domain results to noreg while reducing forgetting, resulting in better uniform ensemble performance than no-reg. BI+IS decoding with single-domain trained models achieves gains over both the naive uniform approach and over oracle single-domain models. BI+IS with EWC-adapted models gives a 0.9 / 3.4 BLEU gain over the strong uniform EWC ensemble, and a 2.4 / 10.2 overall BLEU gain over the approach described in Freitag and Al-Onaizan (2016) .", |
| "cite_spans": [ |
| { |
| "start": 570, |
| "end": 599, |
| "text": "Freitag and Al-Onaizan (2016)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptive decoding results", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We report on training and decoding techniques that adapt NMT to new domains while preserving performance on the original domain. We demonstrate that EWC effectively regularizes NMT finetuning, outperforming other schemes reported for NMT. We extend Bayesian Interpolation with source information and apply it to NMT decoding with unadapted and fine-tuned models, adaptively weighting ensembles to out-perform the ora-cle case, without relying on test domain labels. We suggest our approach, reported for domain adaptation, is broadly useful for NMT ensembling.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "3" |
| }, |
| { |
| "text": "See bayesian combination schemes at https:// github.com/ucam-smt/sgnmt", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.hpc.cam.ac.uk", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by EPSRC grant EP/L027623/1 and has been performed using resources provided by the Cambridge Tier-2 system operated by the University of Cambridge Research Computing Service 2 funded by EPSRC Tier-2 capital grant EP/P020259/1. Initial work by Danielle Saunders took place during an internship at SDL Research.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Bayesian Language Model Interpolation for Mobile Speech Input", |
| "authors": [ |
| { |
| "first": "Cyril", |
| "middle": [], |
| "last": "Allauzen", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Riley", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Twelfth Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cyril Allauzen and Michael Riley. 2011. Bayesian Language Model Interpolation for Mobile Speech Input. In Proceedings of the Twelfth Annual Con- ference of the International Speech Communication Association.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Regularization techniques for fine-tuning in Neural Machine Translation", |
| "authors": [ |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Valerio Miceli", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Barone", |
| "suffix": "" |
| }, |
| { |
| "first": "Ulrich", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Germann", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1489--1494", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1156" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antonio Valerio Miceli Barone, Barry Haddow, Ulrich Germann, and Rico Sennrich. 2017. Regularization techniques for fine-tuning in Neural Machine Trans- lation. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Process- ing, pages 1489-1494.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Findings of the 2017 Conference on Machine Translation (WMT17). In Proceedings of the Second Conference on Machine Translation", |
| "authors": [ |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajen", |
| "middle": [], |
| "last": "Chatterjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Federmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Yvette", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Shujian", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Huck", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Varvara", |
| "middle": [], |
| "last": "Logacheva", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "169--214", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-4717" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ond\u0159ej Bojar, Rajen Chatterjee, Christian Federmann, Yvette Graham, Barry Haddow, Shujian Huang, Matthias Huck, Philipp Koehn, Qun Liu, Varvara Logacheva, et al. 2017. Findings of the 2017 Conference on Machine Translation (WMT17). In Proceedings of the Second Conference on Machine Translation, pages 169-214.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The IWSLT 2016 evaluation campaign", |
| "authors": [ |
| { |
| "first": "Mauro", |
| "middle": [], |
| "last": "Cettolo", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Niehues", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "St\u00fcker", |
| "suffix": "" |
| }, |
| { |
| "first": "Luisa", |
| "middle": [], |
| "last": "Bentivogli", |
| "suffix": "" |
| }, |
| { |
| "first": "Roldano", |
| "middle": [], |
| "last": "Cattoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "IWSLT 2016, International Workshop on Spoken Language Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mauro Cettolo, Jan Niehues, Sebastian St\u00fcker, Luisa Bentivogli, Roldano Cattoni, and Marcello Federico. 2016. The IWSLT 2016 evaluation campaign. In IWSLT 2016, International Workshop on Spoken Language Translation.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Finetuning for Neural Machine Translation with limited degradation across in-and out-of-domain data. Proceedings of the 16th Machine Translation Summit", |
| "authors": [ |
| { |
| "first": "Praveen", |
| "middle": [], |
| "last": "Dakwale", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "156--169", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Praveen Dakwale and Christof Monz. 2017. Fine- tuning for Neural Machine Translation with limited degradation across in-and out-of-domain data. Pro- ceedings of the 16th Machine Translation Summit (MT-Summit 2017), pages 156-169.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Fast domain adaptation for Neural Machine Translation", |
| "authors": [ |
| { |
| "first": "Markus", |
| "middle": [], |
| "last": "Freitag", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaser", |
| "middle": [], |
| "last": "Al-Onaizan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Markus Freitag and Yaser Al-Onaizan. 2016. Fast domain adaptation for Neural Machine Translation. CoRR, abs/1612.06897.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Catastrophic forgetting in connectionist networks", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Robert", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "French", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Trends in cognitive sciences", |
| "volume": "3", |
| "issue": "4", |
| "pages": "128--135", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert M French. 1999. Catastrophic forgetting in connectionist networks. Trends in cognitive sci- ences, 3(4):128-135.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Ensemble learning for multi-source Neural Machine Translation", |
| "authors": [ |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Garmash", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "1409--1418", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ekaterina Garmash and Christof Monz. 2016. En- semble learning for multi-source Neural Machine Translation. In Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, pages 1409-1418.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "KenLM: Faster and smaller language model queries", |
| "authors": [ |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Heafield", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Sixth Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "187--197", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenneth Heafield. 2011. KenLM: Faster and smaller language model queries. In Proceedings of the Sixth Workshop on Statistical Machine Translation, pages 187-197.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Regularized Training Objective for Continued Training for Domain Adaptation in Neural Machine Translation", |
| "authors": [ |
| { |
| "first": "Huda", |
| "middle": [], |
| "last": "Khayrallah", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Thompson", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2nd Workshop on Neural Machine Translation and Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "36--44", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-2705" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Huda Khayrallah, Brian Thompson, Kevin Duh, and Philipp Koehn. 2018. Regularized Training Objec- tive for Continued Training for Domain Adaptation in Neural Machine Translation. In Proceedings of the 2nd Workshop on Neural Machine Translation and Generation, pages 36-44.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Overcoming catastrophic forgetting in neural networks. Proceedings of the National Academy of Sciences of the United States of America", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Kirkpatrick", |
| "suffix": "" |
| }, |
| { |
| "first": "Razvan", |
| "middle": [], |
| "last": "Pascanu", |
| "suffix": "" |
| }, |
| { |
| "first": "Neil", |
| "middle": [], |
| "last": "Rabinowitz", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Veness", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Desjardins", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrei", |
| "middle": [ |
| "A" |
| ], |
| "last": "Rusu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kieran", |
| "middle": [], |
| "last": "Milan", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Quan", |
| "suffix": "" |
| }, |
| { |
| "first": "Tiago", |
| "middle": [], |
| "last": "Ramalho", |
| "suffix": "" |
| }, |
| { |
| "first": "Agnieszka", |
| "middle": [], |
| "last": "Grabska-Barwinska", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "114", |
| "issue": "", |
| "pages": "3521--3526", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Kirkpatrick, Razvan Pascanu, Neil Rabinowitz, Joel Veness, Guillaume Desjardins, Andrei A Rusu, Kieran Milan, John Quan, Tiago Ramalho, Ag- nieszka Grabska-Barwinska, et al. 2017. Overcom- ing catastrophic forgetting in neural networks. Pro- ceedings of the National Academy of Sciences of the United States of America, 114(13):3521-3526.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Six challenges for Neural Machine Translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Knowles", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the First Workshop on Neural Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "28--39", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-3204" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn and Rebecca Knowles. 2017. Six chal- lenges for Neural Machine Translation. In Pro- ceedings of the First Workshop on Neural Machine Translation, pages 28-39.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Stanford Neural Machine Translation systems for spoken language domains", |
| "authors": [ |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the International Workshop on Spoken Language Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "76--79", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minh-Thang Luong and Christopher D Manning. 2015. Stanford Neural Machine Translation systems for spoken language domains. In Proceedings of the In- ternational Workshop on Spoken Language Transla- tion, pages 76-79.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "The ScieLO Corpus: a Parallel Corpus of Scientific Publications for Biomedicine", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Mariana", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Neves", |
| "suffix": "" |
| }, |
| { |
| "first": "Aur\u00e9lie", |
| "middle": [], |
| "last": "Jimeno-Yepes", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "N\u00e9v\u00e9ol", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mariana L Neves, Antonio Jimeno-Yepes, and Aur\u00e9lie N\u00e9v\u00e9ol. 2016. The ScieLO Corpus: a Parallel Cor- pus of Scientific Publications for Biomedicine. In LREC.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A call for clarity in reporting BLEU scores", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Post. 2018. A call for clarity in reporting BLEU scores. CoRR, abs/1804.08771.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Neural Machine Translation training in a multi-domain scenario", |
| "authors": [ |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Sajjad", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadir", |
| "middle": [], |
| "last": "Durrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Fahim", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Vogel", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "IWSLT 2017, International Workshop on Spoken Language Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hassan Sajjad, Nadir Durrani, Fahim Dalvi, Yonatan Belinkov, and Stephan Vogel. 2017. Neural Ma- chine Translation training in a multi-domain sce- nario. In IWSLT 2017, International Workshop on Spoken Language Translation.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "The University of Edinburgh's Neural MT Systems for WMT17", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Currey", |
| "suffix": "" |
| }, |
| { |
| "first": "Ulrich", |
| "middle": [], |
| "last": "Germann", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Heafield", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Valerio Miceli", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Barone", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Second Conference on Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "389--399", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-4739" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Alexandra Birch, Anna Currey, Ulrich Germann, Barry Haddow, Kenneth Heafield, An- tonio Valerio Miceli Barone, and Philip Williams. 2017. The University of Edinburgh's Neural MT Systems for WMT17. In Proceedings of the Second Conference on Machine Translation, pages 389- 399.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Improving Neural Machine Translation Models with Monolingual Data", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "86--96", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1009" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016a. Improving Neural Machine Translation Models with Monolingual Data. In Proceedings of the 54th Annual Meeting of the Association for Com- putational Linguistics, volume 1, pages 86-96.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Neural Machine Translation of Rare Words with Subword Units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1715--1725", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016b. Neural Machine Translation of Rare Words with Subword Units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics, volume 1, pages 1715-1725.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "SGNMT-A Flexible NMT Decoding Platform for Quick Prototyping of New Models and Search Strategies", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Stahlberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Eva", |
| "middle": [], |
| "last": "Hasler", |
| "suffix": "" |
| }, |
| { |
| "first": "Danielle", |
| "middle": [], |
| "last": "Saunders", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Byrne", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "25--30", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-2005" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Stahlberg, Eva Hasler, Danielle Saunders, and Bill Byrne. 2017. SGNMT-A Flexible NMT De- coding Platform for Quick Prototyping of New Mod- els and Search Strategies. In Proceedings of the 2017 Conference on Empirical Methods in Natu- ral Language Processing: System Demonstrations, pages 25-30.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Overcoming catastrophic forgetting during domain adaptation of neural machine translation", |
| "authors": [ |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Thompson", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Gwinnup", |
| "suffix": "" |
| }, |
| { |
| "first": "Huda", |
| "middle": [], |
| "last": "Khayrallah", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brian Thompson, Jeremy Gwinnup, Huda Khayrallah, Kevin Duh, and Philipp Koehn. 2019. Overcoming catastrophic forgetting during domain adaptation of neural machine translation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Freezing subnetworks to analyze domain adaptation in Neural Machine Translation", |
| "authors": [ |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Thompson", |
| "suffix": "" |
| }, |
| { |
| "first": "Huda", |
| "middle": [], |
| "last": "Khayrallah", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonios", |
| "middle": [], |
| "last": "Anastasopoulos", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Arya", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Mccarthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Marvin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Mcnamee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gwinnup", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Conference on Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "124--132", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-6313" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brian Thompson, Huda Khayrallah, Antonios Anasta- sopoulos, Arya D McCarthy, Kevin Duh, Rebecca Marvin, Paul McNamee, Jeremy Gwinnup, Tim An- derson, and Philipp Koehn. 2018. Freezing subnet- works to analyze domain adaptation in Neural Ma- chine Translation. In Proceedings of the Third Con- ference on Machine Translation, pages 124-132.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "WMT17 en-de APE shared task data", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Turchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajen", |
| "middle": [], |
| "last": "Chatterjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Matteo", |
| "middle": [], |
| "last": "Negri", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "LIN-DAT/CLARIN digital library at the Institute of Formal and Applied Linguistics (\u00daFAL), Faculty of Mathematics and Physics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Turchi, Rajen Chatterjee, and Matteo Negri. 2017. WMT17 en-de APE shared task data. LIN- DAT/CLARIN digital library at the Institute of For- mal and Applied Linguistics (\u00daFAL), Faculty of Mathematics and Physics, Charles University.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Tensor2Tensor for Neural Machine Translation. CoRR", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Brevdo", |
| "suffix": "" |
| }, |
| { |
| "first": "Francois", |
| "middle": [], |
| "last": "Chollet", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Gouws", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Nal", |
| "middle": [], |
| "last": "Kalchbrenner", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Sepassi", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Samy Bengio, Eugene Brevdo, Fran- cois Chollet, Aidan N. Gomez, Stephan Gouws, Llion Jones, \u0141ukasz Kaiser, Nal Kalchbrenner, Niki Parmar, Ryan Sepassi, Noam Shazeer, and Jakob Uszkoreit. 2018. Tensor2Tensor for Neural Machine Translation. CoRR, abs/1803.07416.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "6000--6010", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, pages 6000-6010.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Learning hidden unit contribution for adapting neural machine translation models", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Vilar", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "2", |
| "issue": "", |
| "pages": "500--505", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-2080" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Vilar. 2018. Learning hidden unit contribution for adapting neural machine translation models. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, volume 2, pages 500-505.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Sentence embedding for Neural Machine Translation domain adaptation", |
| "authors": [ |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Finch", |
| "suffix": "" |
| }, |
| { |
| "first": "Masao", |
| "middle": [], |
| "last": "Utiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "560--566", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-2089" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rui Wang, Andrew Finch, Masao Utiyama, and Ei- ichiro Sumita. 2017. Sentence embedding for Neu- ral Machine Translation domain adaptation. In Pro- ceedings of the 55th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 2: Short Papers), volume 2, pages 560-566.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Adaptively adjusting ensemble model weights W k,i (Eq. 6) during decoding with BI", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "text": "fine-tune with L2 regularization to reduce forgetting. Concurrently with our work,Thompson et al. (2019) apply EWC to reduce forgetting during NMT domain adaptation.During inference, Garmash and Monz (2016) use a gating network to learn weights for a multisource NMT ensemble.Freitag and Al-Onaizan (2016) use uniform ensembles of general and noreg fine-tuned models.", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "" |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td colspan=\"2\">2.1 Adaptive training results</td><td/></tr><tr><td>Training scheme</td><td colspan=\"2\">Health Bio</td></tr><tr><td>1 Health</td><td>35.9</td><td>33.1</td></tr><tr><td>2 Bio</td><td>29.6</td><td>36.1</td></tr><tr><td>3 Health and Bio</td><td>35.8</td><td>37.2</td></tr><tr><td>4 1 then Bio, No-reg</td><td>30.3</td><td>36.6</td></tr><tr><td>5 1 then Bio, L2</td><td>35.1</td><td>37.3</td></tr><tr><td>6 1 then Bio, EWC</td><td>35.2</td><td>37.8</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "Corpora training sentence counts" |
| }, |
| "TABREF3": { |
| "content": "<table><tr><td>Training scheme</td><td colspan=\"2\">News TED</td><td>IT</td></tr><tr><td>1 News</td><td>37.8</td><td colspan=\"2\">25.3 35.3</td></tr><tr><td>2 TED</td><td>23.7</td><td colspan=\"2\">24.1 14.4</td></tr><tr><td>3 IT</td><td>1.6</td><td>1.8</td><td>39.6</td></tr><tr><td>4 News and TED</td><td>38.2</td><td colspan=\"2\">25.5 35.4</td></tr><tr><td>5 1 then TED, No-reg</td><td>30.6</td><td colspan=\"2\">27.0 22.1</td></tr><tr><td>6 1 then TED, L2</td><td>37.9</td><td colspan=\"2\">26.7 31.8</td></tr><tr><td>7 1 then TED, EWC</td><td>38.3</td><td colspan=\"2\">27.0 33.1</td></tr><tr><td>8 5 then IT, No-reg</td><td>8.0</td><td>6.9</td><td>56.3</td></tr><tr><td>6 then IT, L2</td><td>32.3</td><td colspan=\"2\">22.6 56.9</td></tr><tr><td>10 7 then IT, EWC</td><td>35.8</td><td colspan=\"2\">24.6 57.0</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "Test BLEU for es-en adaptive training. EWC reduces forgetting compared to other fine-tuning methods, while offering the greatest improvement on the new domain." |
| }, |
| "TABREF4": { |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "Test BLEU for en-de adaptive training, with sequential adaptation to a third task. EWC-tuned models give the best performance on each domain." |
| }, |
| "TABREF6": { |
| "content": "<table><tr><td>Decoder configuration</td><td>es-en</td><td/><td/><td>en-de</td></tr><tr><td/><td colspan=\"4\">Health Bio News TED</td><td>IT</td></tr><tr><td>Oracle model</td><td>35.9</td><td>37.8</td><td>37.8</td><td colspan=\"2\">27.0 57.0</td></tr><tr><td>Uniform</td><td>36.0</td><td>36.4</td><td>38.9</td><td colspan=\"2\">26.0 43.5</td></tr><tr><td>BI + IS</td><td>36.2</td><td>38.0</td><td>38.7</td><td colspan=\"2\">26.1 56.4</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "Test BLEU for 2-model es-en and 3-model en-de unadapted model ensembling, compared to oracle unadapted model chosen if test domain is known. Uniform ensembling generally underperforms the oracle, while BI+IS outperforms the oracle." |
| }, |
| "TABREF7": { |
| "content": "<table><tr><td>Decoder configuration</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "Test BLEU for 2-model es-en and 3-model en-de model ensembling for models adapted with EWC, compared to oracle model last trained on each domain, chosen if test domain is known. BI+IS outperforms uniform ensembling and in some cases outperforms the oracle." |
| }, |
| "TABREF8": { |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "Total BLEU for test data concatenated across domains. Results from 2-model es-en and 3-model en-de ensembles, compared to oracle model chosen if test domain is known. No-reg uniform corresponds to the approach ofFreitag and Al-Onaizan (2016). BI+IS performs similarly to strong oracles with no test domain labeling." |
| } |
| } |
| } |
| } |