| { |
| "paper_id": "N19-1041", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:02:32.348733Z" |
| }, |
| "title": "ReWE: Regressing Word Embeddings for Regularization of Neural Machine Translation Systems", |
| "authors": [ |
| { |
| "first": "Inigo", |
| "middle": [ |
| "Jauregi" |
| ], |
| "last": "Unanue", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Technology Sydney", |
| "location": { |
| "settlement": "Sydney", |
| "country": "Australia" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ehsan", |
| "middle": [ |
| "Zare" |
| ], |
| "last": "Borzeshi", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "ezborzeshi@cmcrc.com" |
| }, |
| { |
| "first": "Nazanin", |
| "middle": [], |
| "last": "Esmaili", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "nesmaili@cmcrc.com" |
| }, |
| { |
| "first": "Massimo", |
| "middle": [], |
| "last": "Piccardi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Technology Sydney", |
| "location": { |
| "settlement": "Sydney", |
| "country": "Australia" |
| } |
| }, |
| "email": "massimo.piccardi@uts.edu.au" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Regularization of neural machine translation is still a significant problem, especially in low-resource settings. To mollify this problem, we propose regressing word embeddings (ReWE) as a new regularization technique in a system that is jointly trained to predict the next word in the translation (categorical value) and its word embedding (continuous value). Such a joint training allows the proposed system to learn the distributional properties represented by the word embeddings, empirically improving the generalization to unseen sentences. Experiments over three translation datasets have showed a consistent improvement over a strong baseline, ranging between 0.91 and 2.54 BLEU points, and also a marked improvement over a state-of-the-art system.", |
| "pdf_parse": { |
| "paper_id": "N19-1041", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Regularization of neural machine translation is still a significant problem, especially in low-resource settings. To mollify this problem, we propose regressing word embeddings (ReWE) as a new regularization technique in a system that is jointly trained to predict the next word in the translation (categorical value) and its word embedding (continuous value). Such a joint training allows the proposed system to learn the distributional properties represented by the word embeddings, empirically improving the generalization to unseen sentences. Experiments over three translation datasets have showed a consistent improvement over a strong baseline, ranging between 0.91 and 2.54 BLEU points, and also a marked improvement over a state-of-the-art system.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The last few years have witnessed remarkable improvements in the performance of machine translation (MT) systems. These improvements are strongly linked to the development of neural machine translation (NMT): based on encoderdecoder architectures (also known as seq2seq), NMT can use recurrent neural networks (RNNs) Cho et al., 2014; Wu et al., 2016) , convolutional neural networks (CNNs) (Gehring et al., 2017) or transformers (Vaswani et al., 2017) to learn how to map a sentence from the source language to an adequate translation in the target language. In addition, attention mechanisms (Bahdanau et al., 2015; Luong et al., 2015) help soft-align the encoded source words with the predictions, further improving the translation.", |
| "cite_spans": [ |
| { |
| "start": 317, |
| "end": 334, |
| "text": "Cho et al., 2014;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 335, |
| "end": 351, |
| "text": "Wu et al., 2016)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 391, |
| "end": 413, |
| "text": "(Gehring et al., 2017)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 430, |
| "end": 452, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 594, |
| "end": 617, |
| "text": "(Bahdanau et al., 2015;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 618, |
| "end": 637, |
| "text": "Luong et al., 2015)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "NMT systems are usually trained via maximum likelihood estimation (MLE). However, as * * The author has changed affiliation to Microsoft after the completion of this work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "His new email is: Ehsan.ZareBorzeshi@microsoft.com Figure 1 : The proposed regularizer: the hidden vector in the decoder, s j , transits through two paths: 1) a linear and a softmax layers that output vector v j (vocab dim) which is used for predicting the target word as usual, and 2) a two-layer network (ReWE) that outputs a vector, e j , of word embedding size (word emb dim). During training, e j is used in a regressive loss with the ground-truth embedding.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 51, |
| "end": 59, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "pointed out by (Elbayad et al., 2018) , MLE suffers from two obvious limitations: the first is that it treats all the predictions other than the ground truth as equally incorrect. As a consequence, synonyms and semantically-similar words -which are often regarded as highly interchangeable with the ground truth -are completely ignored during training. The second limitation is that MLEtrained systems suffer from \"exposure bias\" Ranzato et al., 2015) and do not generalize well over the large output space of translations. Owing to these limitations, NMT systems still struggle to outperform other traditional MT approaches when the amount of supervised data is limited (Koehn and Knowles, 2017) . In this paper, we propose a novel regularization technique for NMT aimed to influence model learning with contextual properties. The technique -nicknamed ReWE from \"regressing word embedding\" -consists of modifying a conventional seq2seq decoder to jointly learn to a) predict the next word in the translation (categorical value), as usual, and b) regress its word embedding (numerical value). Figure 1 shows the modified decoder. Both predictions are incorporated in the training objective, combining standard MLE with a continuous loss function based on word embeddings. The rationale is to encourage the system to learn to co-predict the next word together with its context (by means of the word embedding representation), in the hope of achieving improved generalization. At inference time, the system operates as a standard NMT system, retaining the categorical prediction and ignoring the predicted embedding. We qualify our proposal as a regularization technique since, like any other regularizers, it only aims to influence the model's training, while leaving the inference unchanged. We have evaluated the proposed system over three translation datasets of different size, namely English-French (en-fr), Czech-English (cs-en), and Basque-English (eu-en). In each case, ReWE has significantly outperformed its baseline, with a marked improvement of up to 2.54 BLEU points for eu-en, and consistently outperformed a state-of-the-art system (Denkowski and Neubig, 2017) .", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 37, |
| "text": "(Elbayad et al., 2018)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 430, |
| "end": 451, |
| "text": "Ranzato et al., 2015)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 671, |
| "end": 696, |
| "text": "(Koehn and Knowles, 2017)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 2146, |
| "end": 2174, |
| "text": "(Denkowski and Neubig, 2017)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1093, |
| "end": 1101, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A substantial literature has been devoted to improving the generalization of NMT systems. Fadaee et al. (2017) have proposed a data augmentation approach for low-resource settings that generates synthetic sentence pairs by replacing words in the original training sentences with rare words. Kudo (2018) has trained an NMT model with different subword segmentations to enhance its robustness, achieving consistent improvements over low-resource and out-of-domain settings. Zhang et al. (2018) have presented a novel regularization method that encourages target-bidirectional agreement. Other work has proposed improvements over the use of a single ground truth for training: Ma et al. (2018) have augmented the conventional seq2seq model with a bag-of-words loss under the assumption that the space of correct translations share similar bag-of-words vectors, achieving promising results on a Chinese-English translation dataset; Elbayad et al. (2018) have used sentence-level and token-level reward distributions to \"smooth\" the single ground truth. Chousa et al. (2018) have similarly leveraged a token-level smoother.", |
| "cite_spans": [ |
| { |
| "start": 90, |
| "end": 110, |
| "text": "Fadaee et al. (2017)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 291, |
| "end": 302, |
| "text": "Kudo (2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 472, |
| "end": 491, |
| "text": "Zhang et al. (2018)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 674, |
| "end": 690, |
| "text": "Ma et al. (2018)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 928, |
| "end": 949, |
| "text": "Elbayad et al. (2018)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1049, |
| "end": 1069, |
| "text": "Chousa et al. (2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In a recent paper, Denkowski and Neubig (2017) have achieved state-of-the-art translation accuracy by leveraging a variety of techniques which include: dropout (Srivastava et al., 2014) , lexicon bias (Arthur et al., 2016) , pre-translation (Niehues et al., 2016) , data bootstrapping , byte-pair encoding (Sennrich et al., 2016) and ensembles of independent models (Rokach, 2010) . However, to our knowledge none of the mentioned approaches have explicitly attempted to leverage the embeddings of the ground-truth tokens as targets. For this reason, in this paper we explore regressing toward pre-trained word embeddings as an attempt to capture contextual properties and achieve improved model regularization.", |
| "cite_spans": [ |
| { |
| "start": 19, |
| "end": 46, |
| "text": "Denkowski and Neubig (2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 160, |
| "end": 185, |
| "text": "(Srivastava et al., 2014)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 201, |
| "end": 222, |
| "text": "(Arthur et al., 2016)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 241, |
| "end": 263, |
| "text": "(Niehues et al., 2016)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 306, |
| "end": 329, |
| "text": "(Sennrich et al., 2016)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 366, |
| "end": 380, |
| "text": "(Rokach, 2010)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The model is a standard NMT model with attention in which we use RNNs for the encoder and decoder. Following the notation of (Bahdanau et al., 2015) , the RNN in the decoder generates a sequence of hidden vectors, {s 1 , . . . , s m }, given the context vector, the previous hidden state s j\u22121 and the previous predicted word y j\u22121 :", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 148, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model 3.1 Seq2seq baseline", |
| "sec_num": "3" |
| }, |
| { |
| "text": "s j = dec rnn (s j\u22121 , y j\u22121 , c j ) j = 1, . . . , m (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model 3.1 Seq2seq baseline", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where y 0 and s 0 are initializations for the state and label chains. Each hidden vector s j (of parameter size S) is then linearly transformed into a vector of vocabulary size, V , and a softmax layer converts it into a vector of probabilities (Eq. 2), where W (a matrix of size V \u00d7 S) and b (a vector of size V \u00d7 1) are learnable parameters. The predicted conditional probability distribution over the words in the target vocabulary, p j , is given as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model 3.1 Seq2seq baseline", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p j = sof tmax(Ws j + b)", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Model 3.1 Seq2seq baseline", |
| "sec_num": "3" |
| }, |
| { |
| "text": "As usual, training attempts to minimize the negative log-likelihood (NLL), defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model 3.1 Seq2seq baseline", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "N LL loss = \u2212 m j=1 log(p j (y j ))", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Model 3.1 Seq2seq baseline", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where p j (y j ) notes the probability of ground-truth word y j . The NLL loss is minimized when the probability of the ground truth is one and that of all other words is zero, treating all predictions different from the ground truth as equally incorrect.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model 3.1 Seq2seq baseline", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Pre-trained word embeddings (Pennington et al., 2014; Bojanowski et al., 2017; Mikolov et al., 2013) capture the contextual similarities of words, typically by maximizing the probability of word w t+k to occur in the context of center word w t . This probability can be expressed as:", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 53, |
| "text": "(Pennington et al., 2014;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 54, |
| "end": 78, |
| "text": "Bojanowski et al., 2017;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 79, |
| "end": 100, |
| "text": "Mikolov et al., 2013)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ReWE", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(w t+k |w t ), \u2212 c \u2264 k \u2264 c, k = 0 t = 1, . . . , T", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "ReWE", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where c is the size of the context and T is the total number of words in the training set. Traditionally, word embeddings have only been used as input representations. In this paper, we instead propose using them in output as part of the training objective, in the hope of achieving regularization and improving prediction accuracy. Building upon the baseline model presented in Section 3.1, we have designed a new \"joint learning\" setting: our decoder still predicts the probability distribution over the vocabulary, p j (Eq. 2), while simultaneously regressing the same shared s j to the ground-truth word embedding, e(y j ). The ReWE module consists of two linear layers with a Rectified Linear Unit (ReLU) in between, outputting a vector e j of word embedding size (Eq. 5). Please note that adding this extra module adds negligible computational costs and training time. Full details of this module are given in the supplementary material.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ReWE", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "e j = ReW E(s j ) = W 2 (ReLU (W 1 s j + b 1 )) + b 2", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "ReWE", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The training objective is a numerical loss, l (Eq. 6), computed between the output vector, e j , and the ground-truth embedding, e(y j ):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ReWE", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "ReW E loss = l(e j , e(y j ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ReWE", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the experiment, we have explored two cases for the ReW E loss : the minimum square error (MSE) 1 and the cosine embedding loss (CEL) 2 . Finally, the N LL loss and the ReW E loss are combined to form the training objective using a positive trade-off coefficient, \u03bb:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ReWE", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Loss = N LL loss + \u03bbReW E loss", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "ReWE", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "As mentioned in the Introduction, at inference time we ignore the ReWE output, e j , and the model operates as a standard NMT system. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ReWE", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We have developed our models building upon the OpenNMT toolkit (Klein et al., 2017) 3 . For training, we have used the same settings as (Denkowski and Neubig, 2017) . We have also explored the use of sub-word units learned with byte pair encoding (BPE) (Sennrich et al., 2016) . All the preprocessing steps, hyperparameter values and training parameters are described in detail in the supplementary material to ease reproducibility of our results.", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 83, |
| "text": "(Klein et al., 2017)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 136, |
| "end": 164, |
| "text": "(Denkowski and Neubig, 2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 253, |
| "end": 276, |
| "text": "(Sennrich et al., 2016)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We have evaluated these systems over three publicly-available datasets from the 2016 ACL Conference on Machine Translation (WMT16) 4 and the 2016 International Workshop on Spoken Language Translation (IWSLT16) 5 . Table 1 lists the datasets and their main features. Despite having nearly 90,000 parallel sentences, the eu-en dataset only contains 2,000 human-translated sentences; the others are translations of Wikipedia page titles and localization files. Therefore, we regard the eu-en dataset as very low-resource.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 214, |
| "end": 221, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In addition to the seq2seq baseline, we have compared our results with those recently reported by Denkowski and Neubig for non-ensemble models (2017). For all models, we report the BLEU scores (Papineni et al., 2002) , with the addition of selected comparative examples. Two contrastive experiments are also added in supplementary notes.", |
| "cite_spans": [ |
| { |
| "start": 193, |
| "end": 216, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "As a preliminary experiment, we have carried out a sensitivity analysis to determine the optimal value of the trade-off coefficient, \u03bb (Eq. 6), using the en-fr validation set. The results are shown in Figure 2 , where each point is the average of three runs trained with different seeds. The figure shows that 3 Our code can be found at: https://github.com/ijauregiCMCRC/ReWE NMT 4 WMT16: http://www.statmt.org/wmt16/ 5 IWSLT16: https://workshop2016.iwslt.org/ Models en-fr cs-en eu-en Word BPE Word BPE Word BPE (Denkowski and Neubig, 2017) 33.60 34.50 21.00 22.60 (Denkowski and Neubig, 2017) the MSE loss has outperformed slightly the baseline for small values of \u03bb (< 1), but the BLEU score has dropped drastically for larger values. Conversely, the CEL loss has increased steadily with \u03bb, reaching 38.23 BLEU points for \u03bb = 20, with a marked improvement of 1.53 points over the baseline. This result has been encouraging and therefore for the rest of the experiments we have used CEL as the ReW E loss and kept the value of \u03bb to 20. In Section 4.3, we further discuss the behavior of CEL and MSE. Table 2 reports the results of the main experiment for all datasets. The values of our experiments are for blind runs over the test sets, averaged over 10 independent runs with different seeds. The results show that adding ReWE has significantly improved the baseline in all cases, with an average of 1.46 BLEU points. In the case of the eu-en dataset, the improvement has reached 2.54 BLEU points. We have also run unpaired t-tests between our baseline and ReWE, and the differences have proved statistically significant (p-values < 0.05) in all cases. Using BPE has proved beneficial for the cs-en and eu-en pairs, but not for the en-fr", |
| "cite_spans": [ |
| { |
| "start": 310, |
| "end": 311, |
| "text": "3", |
| "ref_id": null |
| }, |
| { |
| "start": 513, |
| "end": 541, |
| "text": "(Denkowski and Neubig, 2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 566, |
| "end": 594, |
| "text": "(Denkowski and Neubig, 2017)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 201, |
| "end": 209, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1102, |
| "end": 1109, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Hautatu Kontrol panela \u2192 Programa lehenetsiak , eta aldatu bertan .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Src:", |
| "sec_num": null |
| }, |
| { |
| "text": "Go to Control Panel \u2192 Default programs , and change it there . Baseline:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ref:", |
| "sec_num": null |
| }, |
| { |
| "text": "Select the Control Panel \u2192 program , and change . Baseline + ReWE: Select the Control Panel \u2192 Default Program , and change it . pair. We speculate that English and French may be closer to each other at word level and, therefore, less likely to benefit from the use of sub-word units. Conversely, Czech and Basque are morphologically very rich, justifying the improvements with BPE. Table 2 also shows that our model has outperformed almost all the state-of-the-art results reported in (Denkowski and Neubig, 2017 ) (dropout, lexicon bias, pre-translation, and bootstrapping), with the only exception of the pre-translation case for the cs-en pair with BPE. This shows that the proposed model is competitive with contemporary NMT techniques.", |
| "cite_spans": [ |
| { |
| "start": 485, |
| "end": 512, |
| "text": "(Denkowski and Neubig, 2017", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 382, |
| "end": 389, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ref:", |
| "sec_num": null |
| }, |
| { |
| "text": "To further explore the improvements obtained with ReWE, we have qualitatively compared several translations provided by the baseline and the baseline + ReWE (CEL), trained with identical seeds. Overall, we have noted a number of instances where ReWE has provided translations with more information from the source (higher adequacy). For reasons of space, we report only one example in Table 3 , but more examples are available in the supplementary material. In the example, the baseline has chosen a generic word, \"program\", while ReWE has been capable of correctly predicting \"Default Program\" and being specific about the object, \"it\". ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 385, |
| "end": 392, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Qualitative comparison", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To further explore the behaviour of the ReWE loss, Figure 3 plots the values of the NLL and ReWE (CEL) losses during training of our model over the en-fr training set. The natural values of the ReWE (CEL) loss (blue, dashed) are much lower than those of the NLL loss (red, +), and thus its contribution to the gradient is likely to be limited. However, when scaled up by a factor of \u03bb = 20 (magenta, \u00d7), its influence on the gradient becomes more marked. Empirically, both the NLL and ReWE (CEL) losses decrease as the training progresses and the total loss (green, \u2022) decreases. As shown in the results, this combined training objective has been able to lead to improved translation results.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 51, |
| "end": 59, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Conversely, the MSE loss has not exhibited a similarly smooth behaviour (supplementary material). Even when brought to scale with the NLL loss, it shows much larger fluctuations as the training progresses. In particular, it shows major increases at the re-starts of the optimizer for the simulated annealing that are not compensated for by the rest of the training. It is easy to speculate that the MSE loss is much more sensitive than the cosine distance to the changes in the weights caused by dropout and the re-starts. As such, it seems less suited for use as training objective.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In this paper, we have proposed a new regularization technique for NMT (ReWE) based on a joint learning setting in which a seq2seq model simultaneously learns to a) predict the next word in the translation and b) regress toward its word embedding. The results over three parallel corpora have shown that ReWE has consistently improved over both its baseline and recent state-of-the-art results from the literature. As future work, we plan to extend our experiments to better understand the potential of the proposed regularizer, in particular for unsupervised NMT (Artetxe et al., 2018; Lample et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 564, |
| "end": 586, |
| "text": "(Artetxe et al., 2018;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 587, |
| "end": 607, |
| "text": "Lample et al., 2018)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to acknowledge the financial support received from the Capital Markets Cooperative Research Centre (CMCRC), an industry-led research initiative of the Australian Government. We would also like to thank Ben Hachey, Michael Nolan and Nadia Shnier for their careful reading of our paper and their insightful comments. Finally, we are grateful to the anonymous reviewers for all their comments and suggestions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Unsupervised neural machine translation", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2018. Unsupervised neural ma- chine translation. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Incorporating discrete translation lexicons into neural machine translation", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Arthur", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Satoshi", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1557--1567", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip Arthur, Graham Neubig, and Satoshi Nakamura. 2016. Incorporating discrete translation lexicons into neural machine translation. In Empirical Meth- ods in Natural Language Processing, pages 1557- 1567.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In International Con- ference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Scheduled sampling for sequence prediction with recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Navdeep", |
| "middle": [], |
| "last": "Jaitly", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1171--1179", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samy Bengio, Oriol Vinyals, Navdeep Jaitly, and Noam Shazeer. 2015. Scheduled sampling for se- quence prediction with recurrent neural networks. In Advances in Neural Information Processing Sys- tems, pages 1171-1179.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Enriching word vectors with subword information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "135--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, pages 135-146.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Guided alignment training for topic-aware neural machine translation", |
| "authors": [ |
| { |
| "first": "Wenhu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Evgeny", |
| "middle": [], |
| "last": "Matusov", |
| "suffix": "" |
| }, |
| { |
| "first": "Shahram", |
| "middle": [], |
| "last": "Khadivi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan-Thorsten", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1607.01628" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenhu Chen, Evgeny Matusov, Shahram Khadivi, and Jan-Thorsten Peter. 2016. Guided alignment training for topic-aware neural machine translation. arXiv preprint arXiv:1607.01628.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Learning phrase representations using rnn encoder-decoder for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Van Merri\u00ebnboer", |
| "suffix": "" |
| }, |
| { |
| "first": "Caglar", |
| "middle": [], |
| "last": "Gulcehre", |
| "suffix": "" |
| }, |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Fethi", |
| "middle": [], |
| "last": "Bougares", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1724--1734", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyunghyun Cho, Bart Van Merri\u00ebnboer, Caglar Gul- cehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using rnn encoder-decoder for statistical machine translation. In Empirical Methods in Natural Language Processing, pages 1724-1734.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Training neural machine translation using word embedding-based loss", |
| "authors": [ |
| { |
| "first": "Katsuki", |
| "middle": [], |
| "last": "Chousa", |
| "suffix": "" |
| }, |
| { |
| "first": "Katsuhito", |
| "middle": [], |
| "last": "Sudoh", |
| "suffix": "" |
| }, |
| { |
| "first": "Satoshi", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1807.11219" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katsuki Chousa, Katsuhito Sudoh, and Satoshi Naka- mura. 2018. Training neural machine translation using word embedding-based loss. arXiv preprint arXiv:1807.11219.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Stronger baselines for trustable results in neural machine translation", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Denkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the First Workshop on Neural Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "18--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Denkowski and Graham Neubig. 2017. Stronger baselines for trustable results in neural ma- chine translation. In Proceedings of the First Work- shop on Neural Machine Translation, pages 18-27. Empirical Methods in Natural Language Processing.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Token-level and sequence-level loss smoothing for rnn language models", |
| "authors": [ |
| { |
| "first": "Maha", |
| "middle": [], |
| "last": "Elbayad", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Besacier", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Verbeek", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2094--2103", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maha Elbayad, Laurent Besacier, and Jakob Verbeek. 2018. Token-level and sequence-level loss smooth- ing for rnn language models. In Proceedings of the 56th Annual Meeting of the Association for Compu- tational Linguistics, pages 2094-2103.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Data augmentation for low-resource neural machine translation", |
| "authors": [ |
| { |
| "first": "Marzieh", |
| "middle": [], |
| "last": "Fadaee", |
| "suffix": "" |
| }, |
| { |
| "first": "Arianna", |
| "middle": [], |
| "last": "Bisazza", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "567--573", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marzieh Fadaee, Arianna Bisazza, and Christof Monz. 2017. Data augmentation for low-resource neural machine translation. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics, pages 567-573.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A convolutional encoder model for neural machine translation", |
| "authors": [ |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Gehring", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann N", |
| "middle": [], |
| "last": "Dauphin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "123--135", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonas Gehring, Michael Auli, David Grangier, and Yann N Dauphin. 2017. A convolutional encoder model for neural machine translation. In Proceed- ings of the 55th Annual Meeting of the Association for Computational Linguistics, pages 123-135.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Opennmt: Open-source toolkit for neural machine translation", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuntian", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Senellart", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander M", |
| "middle": [], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1701.02810" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Klein, Yoon Kim, Yuntian Deng, Jean Senellart, and Alexander M Rush. 2017. Opennmt: Open-source toolkit for neural machine translation. arXiv preprint arXiv:1701.02810.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Six challenges for neural machine translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Knowles", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the First Workshop on Neural Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "28--39", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn and Rebecca Knowles. 2017. Six chal- lenges for neural machine translation. In Pro- ceedings of the First Workshop on Neural Machine Translation, pages 28-39. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Subword regularization: Improving neural network translation models with multiple subword candidates", |
| "authors": [ |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "66--75", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taku Kudo. 2018. Subword regularization: Improv- ing neural network translation models with multiple subword candidates. In Proceedings of the 56th An- nual Meeting of the Association for Computational Linguistics, pages 66-75.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Phrase-based & neural unsupervised machine translation", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "5039--5049", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample, Myle Ott, Alexis Conneau, Lu- dovic Denoyer, and Marc'Aurelio Ranzato. 2018. Phrase-based & neural unsupervised machine trans- lation. Empirical Methods in Natural Language Processing, pages 5039-5049.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Effective approaches to attentionbased neural machine translation", |
| "authors": [ |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1412--1421", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minh-Thang Luong, Hieu Pham, and Christopher D Manning. 2015. Effective approaches to attention- based neural machine translation. In Empirical Methods in Natural Language Processing, pages 1412-1421.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Bag-of-words as target for neural machine translation", |
| "authors": [ |
| { |
| "first": "Shuming", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yizhong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Junyang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "332--338", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuming Ma, Xu Sun, Yizhong Wang, and Junyang Lin. 2018. Bag-of-words as target for neural ma- chine translation. In Proceedings of the 56th Annual Meeting of the Association for Computational Lin- guistics, pages 332-338.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in Neural Information Processing Systems, pages 3111-3119.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Pre-translation for neural machine translation", |
| "authors": [ |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Niehues", |
| "suffix": "" |
| }, |
| { |
| "first": "Eunah", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Thanh-Le", |
| "middle": [], |
| "last": "Ha", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Waibel", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1610.05243" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jan Niehues, Eunah Cho, Thanh-Le Ha, and Alex Waibel. 2016. Pre-translation for neural machine translation. arXiv preprint arXiv:1610.05243.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th annual meeting on association for computational linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th annual meeting on association for compu- tational linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representation. In Empirical Methods in Natural Language Processing, pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Sequence level training with recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Aurelio", |
| "middle": [], |
| "last": "Marc", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Wojciech", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zaremba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marc'Aurelio Ranzato, Sumit Chopra, Michael Auli, and Wojciech Zaremba. 2015. Sequence level train- ing with recurrent neural networks. In Advances in Neural Information Processing Systems.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Ensemble-based classifiers", |
| "authors": [ |
| { |
| "first": "Lior", |
| "middle": [], |
| "last": "Rokach", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Artificial Intelligence Review", |
| "volume": "33", |
| "issue": "1-2", |
| "pages": "1--39", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lior Rokach. 2010. Ensemble-based classifiers. Artifi- cial Intelligence Review, 33(1-2):1-39.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1715--1725", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Lin- guistics, pages 1715-1725.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Dropout: a simple way to prevent neural networks from overfitting", |
| "authors": [ |
| { |
| "first": "Nitish", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "15", |
| "issue": "1", |
| "pages": "1929--1958", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: a simple way to prevent neural networks from overfitting. The Journal of Machine Learning Research, 15(1):1929-1958.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3104--3112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural net- works. In Advances in neural information process- ing systems, pages 3104-3112.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Google's neural machine translation system: Bridging the gap between human and machine translation", |
| "authors": [ |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Norouzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Qin", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Klaus", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1609.08144" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. 2016. Google's neural ma- chine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Regularizing neural machine translation by target-bidirectional agreement", |
| "authors": [ |
| { |
| "first": "Zhirui", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuangzhi", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shujie", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mu", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Enhong", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1808.04064" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhirui Zhang, Shuangzhi Wu, Shujie Liu, Mu Li, Ming Zhou, and Enhong Chen. 2018. Regularizing neu- ral machine translation by target-bidirectional agree- ment. arXiv preprint arXiv:1808.04064.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "uris": null, |
| "num": null, |
| "text": "BLEU scores of three models over the enfr validation set for different \u03bb values: baseline (red, dashed), baseline + ReWE (MSE) (green, \u2022), baseline + ReWE (CEL) (blue, \u00d7). Each point in the graph is an average of 3 independently trained models.", |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "num": null, |
| "text": "Plot of the values of various loss functions during training of our model over the en-fr training set: green, \u2022: training loss (NLL + (\u03bb = 20) ReWE (CEL); Eq.7); red, +: NLL loss; blue, dashed: ReWE (CEL) loss; magenta, \u00d7: ReWE (CEL) loss scaled by \u03bb = 20. Each point in the graph is an average value of the corresponding loss over 25,000 sentences.", |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "content": "<table/>", |
| "text": "Top: parallel training data. Bottom: validation and test sets.", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "text": "BLEU scores over the test sets. Average of 10 models independently trained with different seeds.", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "content": "<table/>", |
| "text": "Translation example from the eu-en test set.", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |