| { |
| "paper_id": "K19-1027", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:06:24.808295Z" |
| }, |
| "title": "Unsupervised Neural Machine Translation with Future Rewarding", |
| "authors": [ |
| { |
| "first": "Xiangpeng", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chinese Academy of Sciences", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "weixiangpeng@iie.ac.cn" |
| }, |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chinese Academy of Sciences", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "huyue@iie.ac.cn" |
| }, |
| { |
| "first": "Luxi", |
| "middle": [], |
| "last": "Xing", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chinese Academy of Sciences", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "xingluxi@iie.ac.cn" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper, we alleviate the local optimality of back-translation by learning a policy (takes the form of an encoder-decoder and is defined by its parameters) with future rewarding under the reinforcement learning framework, which aims to optimize the global word predictions for unsupervised neural machine translation. To this end, we design a novel reward function to characterize high-quality translations from two aspects: n-gram matching and semantic adequacy. The n-gram matching is defined as an alternative for the discrete BLEU metric, and the semantic adequacy is used to measure the adequacy of conveying the meaning of the source sentence to the target. During training, our model strives for earning higher rewards by learning to produce grammatically more accurate and semantically more adequate translations. Besides, a variational inference network (VIN) is proposed to constrain the corresponding sentences in two languages have the same or similar latent semantic code. On the widely used WMT'14 English-French, WMT'16 English-German and NIST Chineseto-English benchmarks, our models respectively obtain 27.59/27.15, 19.65/23.42 and 22.40 BLEU points without using any labeled data, demonstrating consistent improvements over previous unsupervised NMT models.", |
| "pdf_parse": { |
| "paper_id": "K19-1027", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper, we alleviate the local optimality of back-translation by learning a policy (takes the form of an encoder-decoder and is defined by its parameters) with future rewarding under the reinforcement learning framework, which aims to optimize the global word predictions for unsupervised neural machine translation. To this end, we design a novel reward function to characterize high-quality translations from two aspects: n-gram matching and semantic adequacy. The n-gram matching is defined as an alternative for the discrete BLEU metric, and the semantic adequacy is used to measure the adequacy of conveying the meaning of the source sentence to the target. During training, our model strives for earning higher rewards by learning to produce grammatically more accurate and semantically more adequate translations. Besides, a variational inference network (VIN) is proposed to constrain the corresponding sentences in two languages have the same or similar latent semantic code. On the widely used WMT'14 English-French, WMT'16 English-German and NIST Chineseto-English benchmarks, our models respectively obtain 27.59/27.15, 19.65/23.42 and 22.40 BLEU points without using any labeled data, demonstrating consistent improvements over previous unsupervised NMT models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Neural Machine Translation (Sutskever et al., 2014; Bahdanau et al., 2015) directly models the entire translation process through training an encoder-decoder model that has achieved remarkable performance Gehring et al., 2017; Vaswani et al., 2017) when provided with massive amounts of parallel corpora. However, the lack of large-scale parallel data is a serious problem for the vast majority of language pairs.", |
| "cite_spans": [ |
| { |
| "start": 27, |
| "end": 51, |
| "text": "(Sutskever et al., 2014;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 52, |
| "end": 74, |
| "text": "Bahdanau et al., 2015)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 205, |
| "end": 226, |
| "text": "Gehring et al., 2017;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 227, |
| "end": 248, |
| "text": "Vaswani et al., 2017)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As a result, several works have recently tried to get rid of the dependence on parallel corpora using unsupervised setting, in which the NMT model only has access to two independent monolingual corpora with one for each language (Lample et al., 2018a; Artetxe et al., 2018b; Yang et al., 2018) . Among these works, the encoder and decoder act as a standard auto-encoder (AE) that are trained to reconstruct the inputs from their noised versions. Due to the lack of cross-language signals, unsupervised NMT usually requires pseudo parallel data generated with the back-translation method for achieving the final goal of translating between source and target languages.", |
| "cite_spans": [ |
| { |
| "start": 229, |
| "end": 251, |
| "text": "(Lample et al., 2018a;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 252, |
| "end": 274, |
| "text": "Artetxe et al., 2018b;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 275, |
| "end": 293, |
| "text": "Yang et al., 2018)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Back-translation typically uses beam search (Sennrich et al., 2016a) or just greedy search (Lample et al., 2018a,b) to generate synthetic sentences. Both are approximate algorithms to identify the maximum a posteriori (MAP) output, i.e. the sentence with the highest estimated probability given an input. Although back-translation with MAP prediction has been proved to be successful, it suffers from several apparent issues when trained with maximum likelihood estimation (MLE) only, including exposure bias and loss-evaluation mismatch. Thus, this method often fails to produce the optimal synthetic sentences for the subsequent training.", |
| "cite_spans": [ |
| { |
| "start": 44, |
| "end": 68, |
| "text": "(Sennrich et al., 2016a)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 91, |
| "end": 115, |
| "text": "(Lample et al., 2018a,b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we address the problem mentioned above with future rewarding for unsupervised NMT. The basic idea is to model the future direction of a translation and optimize the global word predictions under the policy gradient reinforcement learning framework. More concretely, we sample N translations via the policy for each input sentence and build a new objective function by combining the cross-entropy loss used in prior works with sequence-level rewards from policy gradient reinforcement learning. We consider the sequence-level reward from two aspects: 1) n-gram matching, which is the precision or recall of all sub-sequences of 1, 2, 3 and 4 tokens in generated sequence and is responsible for measuring the accuracy of surface word predictions; 2) semantic adequacy, which is the similarity between the underlying semantic representations of the generated translation and the input sentence. These two aspects of rewards are inspired by the general criteria of what properties a high-quality translation should have and are complementary to each other. Additionally, a variational inference network (VIN) is proposed to model the underlying semantics of monolingual sentences explicitly. It is used to map the source and target languages into a shared semantic space during autoencoding, as well as constrain the sentences and their translated counterparts have the same or similar semantic code during cross-language training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The major contributions of this paper can be summarized as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We propose a novel learning paradigm for unsupervised NMT that models future rewards to optimize the global word predictions via policy gradient reinforcement learning. To enforce the underlying semantic space, we introduce a VIN into our model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We introduce an effective reward function that jointly accounts for the n-gram matching and the semantic adequacy of generated translations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We conduct extensive experiments on English-French, English-German and NIST Chinese-to-English translation tasks. Experimental results show that the proposed approach achieves significant improvements across different language pairs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section, we first describe the composition of the introduced model and then give details of the newly proposed unsupervised training method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unsupervised Neural Machine Translation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The introduced translation model consists of six components: including two encoders with sharing last few layers, two completely independent decoders with one for each language, and two newly introduced VINs with one for each language. For the encoders and decoders, we follow the recently emerged Transformer (Vaswani et al., 2017) . Specifically, each encoder is composed of a stack of four identical layers, and each layer consists of a multi-head self-attention sub-layer and a fully connected feed-forward sub-layer. The encoders of the source and target languages are respectively parameterized as \u0398 enc src and \u0398 enc tgt , and the encoding operation is denoted as e(x l ; \u0398 enc l ), x l is the input sequence of word embeddings, l \u2208 {src, tgt}. The decoders are also composed of four identical layers. In addition to the two sublayers in each encoder layer, the decoder inserts a third sub-layer, which performs multi-head attention over the output of the encoder stack, the details we refer the reader to (Vaswani et al., 2017) . Similar to encoders, we denote source decoder as \u0398 dec src , target decoder as \u0398 dec tgt , and decoding operation as d(x l ; \u0398 dec l ), l \u2208 {src, tgt}. For VINs, each of them is composed of a standard Gaussian distribution N (0, 1) as the prior, and a neural posterior that is implemented as feed-forward neural network and parameterized by \u03c8 l , l \u2208 {src, tgt}.", |
| "cite_spans": [ |
| { |
| "start": 310, |
| "end": 332, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 1013, |
| "end": 1035, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Composition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In this work, the entire model is trained in an unsupervised manner by optimizing two objectives: 1) variational denoising auto-encoding; 2) crosslanguage training with future rewarding.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Composition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Firstly, two auto-encoders are respectively trained to learn to reconstruct their inputs. In this form, each encoder should learn to compose the input sentence of its corresponding language, and each decoder is expected to learn to recover the original input sentence from this composition. However, without any constraint, the auto-encoder would make very literal word-by-word copies, without capturing any internal structure of the input sentence involved. To address this issue, prior works often adapt the same strategy as Denosing Auto-Encoding (DAE) (Vincent et al., 2008) , and add some noise to the input sentences (Hill et al., 2016) . As shown in Figure 1 , we augment the DAE with a variational inference network (VIN) to model underlying semantics of monolingual sentences explicitly, which assumes that there exists a latent variable z from this semantic space. And this variable, together with the noised input sentence, guides the decoding process. With this assumption, we define the objective function of reconstruction as follow:", |
| "cite_spans": [ |
| { |
| "start": 556, |
| "end": 578, |
| "text": "(Vincent et al., 2008)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 623, |
| "end": 642, |
| "text": "(Hill et al., 2016)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 657, |
| "end": 665, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Variational Denoising Auto-Encoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L l rec = logP \u0398 l\u2192l (x l |z, C(x l ))", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Variational Denoising Auto-Encoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Variational Denoising Auto-Encoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u0398 l\u2192l = \u0398 enc l \u2022\u0398 dec l \u2022\u03c8 l represents the com- bination of \u0398 enc l , \u0398 dec l", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Variational Denoising Auto-Encoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "and \u03c8 l , l \u2208 {src, tgt}. C denotes a stochastic noise model, in which we apply the same method as in (Lample et al., 2018a) .", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 124, |
| "text": "(Lample et al., 2018a)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Variational Denoising Auto-Encoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The continuous latent variable z, acts as the underlying semantics here, is approximated by a neural posterior inference network q \u03c8 l (z|x l ). Following , the posterior approximation is regarded as a diagonal Gaussian N (\u00b5, diag(\u03c3 2 )), and its mean \u00b5 and variance \u03c3 2 are parameterized with deep neural networks. We also reparameterize z as a function of \u00b5 and \u03c3 (i.e., z = \u00b5 + \u03c3 \u03b5, \u03b5 is a standard Gaussian variable that plays a role of introducing noises) rather than using the standard sampling method. We aim to map source and target languages into a shared semantic space and use the following objective function for VINs:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Variational Denoising Auto-Encoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L l z = \u2212KL(q \u03c8 l (z|x l )||N (0, 1))", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Variational Denoising Auto-Encoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where l \u2208 {src, tgt}. KL(Q||P ) is the Kullback-Leibler divergence between Q and P . We finally incorporate the auto-encoder and the VIN into an end-to-end neural network, and the overall training objective of auto-encoding is to minimize the following loss function:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Variational Denoising Auto-Encoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L l ae = \u2212(L l z + L l rec )", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Variational Denoising Auto-Encoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In spite of the auto-encoding, the second objective of unsupervised NMT is to constrain the model to be able to map an input sentence from the source (target) language to the target (source) language. Due to the lack of alignment information between two independent monolingual corpora, the back-translation (Sennrich et al., 2016a) method is used to synthetise a pseudo parallel corpus for cross-language training. More concretely, given an input sentence in one language, which can be firstly translated into the other language (i.e. use the corresponding encoder and the decoder of the other language) by applying the model in inference mode with greedy decoding. And then, the model is trained to reconstruct the original sentence from this translation. The most widely used method in previous works to train the model for sequence generation, called maximum likelihood estimation (MLE for short), it assumes that the ground-truth is provided at each step during training. The objective of MLE is defined as the maximization of the following log-likelihood:", |
| "cite_spans": [ |
| { |
| "start": 308, |
| "end": 332, |
| "text": "(Sennrich et al., 2016a)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-language Training with Future Rewarding", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L l 1 mle = logP \u0398 l 2 \u2192l 1 (x l 1 |z p ,x l 2 )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Cross-language Training with Future Rewarding", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-language Training with Future Rewarding", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "\u0398 l 2 \u2192l 1 = \u0398 enc l 2 \u2022 \u0398 dec l 1 \u2022 \u03c8 l 2 represents the combination of \u0398 enc l 2 , \u0398 dec l 1 and \u03c8 l 2 . z p is ap- proximated by the introduced VIN (i.e., reparam- eterized from the Gaussian q \u03c8 l 2 (z p |x l 2 )).x l 2 = d(e(x l 1 ; \u0398 enc l 1 ); \u0398 dec l 2 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-language Training with Future Rewarding", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "is obtained by greedy decoding in inference mode (l 1 = src, l 2 = tgt or l 1 = tgt, l 2 = src).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-language Training with Future Rewarding", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Unfortunately, maximizing L l 1 mle does not always produce the best results on discrete evaluation metrics such as BLEU (Papineni et al., 2002) , as the accumulation of errors caused by exposure bias as well as the inconsistency between training and testing measurements lead to the models tend to be short-sighted. We bridge the discrepancy between training and testing modes caused by MLE through learning a policy to model future rewards, which can directly optimize the global word predictions and is made possible with reinforcement learning, as illustrated in Figure 2 . To reduce the variance of the model, we use the selfcritical policy gradient learning algorithm (Rennie et al., 2017).", |
| "cite_spans": [ |
| { |
| "start": 121, |
| "end": 144, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 567, |
| "end": 575, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Future Rewarding", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "For self-critical policy gradient learning, we produce two separate output sequences at each training iteration:x, the sampled translation, which is obtained by sampling from the final output probability distribution, andx g , the baseline output, obtained by performing a greedy search. Thus, the objective function of cross-language training can be redefined as the expected advantages of the sampled sequence over the baseline Figure 2 : Illustration of the proposed method for cross-language training with future rewarding. Three aspects of losses are respectively abbreviated as L l1 z , L l1 mle and L l1 rl . And L l1 z is an auxiliary function that constrains the sentences and their translated counterparts in other language have the same or similar semantic codes. sequence:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 430, |
| "end": 438, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Future Rewarding", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L l 1 rl =E P \u0398 l 2 \u2192l 1 (x l 1 |zp,x l 2 ) [r(x l 1 ) \u2212 r(x g l 1 )] =logP \u0398 l 2 \u2192l 1 (x l 1 |z p ,x l 2 ) \u00d7 [r(x l 1 ) \u2212 r(x g l 1 )]", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Future Rewarding", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "where a terminal reward r is observed after the generation reaches the end of each sentence. It is worth noting that considering a baseline reward into training objective can reduce the variance of the model. And we can see that maximizing L rl is equivalent to maximizing the conditional likelihood of the sampled sequencex if it obtains a higher reward than the baselinex g , thus increasing the expected reward of our model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Future Rewarding", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "r in Equation 5 denotes the sequence-level reward that evaluates the quality of generated translations. In this subsection, we discuss two major factors that contribute to the success of a translation, that is, n-gram matching and semantic adequacy, and describe how to approximate these factors through computable reward functions. N-gram matching For a translation generated by a NMT model, we need to measure the accuracy of surface word predictions. For that purpose, the BLEU (Papineni et al., 2002) score is often utilized in previous works. However, the BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. Thus, we apply the smoothed version of GLEU as the reward for measuring n-gram precision or recall. More concretely, given a generated translationx l 1 in one language and the ground-truth reference x l 1 , we record all sub-sequences of 1, 2, 3 and 4 tokens in x l 1 and x l 1 , and start all n-gram counts from 1 instead of 0. Then we compute a recall R gleu , which is the ratio of the number of matching n-grams to the number of total n-grams in x l 1 (ground-truth), and a precision P gleu , which is the ratio of the number of matching n-grams to the number of total n-grams inx l 1 (generated output). Finally, the reward of the generated translationx l 1 on n-gram matching is defined as:", |
| "cite_spans": [ |
| { |
| "start": 481, |
| "end": 504, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reward", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "r 1 (x l 1 ) = min{R gleu , P gleu }", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Reward", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "where r 1 ranges from zero to one and it is symmetrical when switchingx and x. Semantic adequacy We want the model can adequately convey the meaning of the source sentence to the target as much as possible. Thus, we introduce another crucial reward function that is used to measure the semantic adequacy of the generated translations. More concretely, for a generated translationx l 1 in one language, we compute the representation ofx l 1 as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reward", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "e i = TFIDF(w i ), w i \u2208x l 1 w i = e i /Sum(e 1 , e 2 , ..., e Tx l 1 ) cx l 1 = Tx l 1 i=1 w ix i l 1", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Reward", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "Identically, for the corresponding input sentence in another language, its representation cx l 2 can be extracted from the embedding matrixx l 2 . As the source and target word embeddings are often mapped to a shared-latent space in unsupervised NMT, we therefore can directly use the following cosine similarity as the reward for semantic adequacy:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reward", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "r 2 (x l 1 ) = (cx l 1 , cx l 2 ) cx l 1 \u2022 cx l 2", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Reward", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "where (, ) indicates the dot product operation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reward", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "The final reward for a translationx l 1 is a linear combination of the rewards discussed above:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reward", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "r(x l 1 ) = r 1 (x l 1 ) + r 2 (x l 1 )", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Reward", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "where r 1 (x l 1 ) and r 2 (x l 1 ) complement to each other and work jointly to guide the learning of our model. Note that the combination of these two aspects of rewards helps because it can prevent the cases that the generated translation with high ngram matching but low semantic adequacy to have relatively high rewards, and vice versa.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reward", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "In addition to the aforementioned MLE objective function (Eq. 4) and the RL objective function (Eq. 5), there is an auxiliary function that constrains the sentences and their translated counterparts have the same or similar semantic code and is defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overall Objective Function", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "L l 1 z = \u2212KL(q \u03c8 l 1 (z q |x l 1 )||q \u03c8 l 2 (z p |x l 2 )) (10)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overall Objective Function", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "Finally, the overall training objective of crosslanguage training is to minimize the following loss function with hyperparameters \u03b7:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overall Objective Function", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "L l 1 cl = \u2212((1 \u2212 \u03b7)(L l 1 mle + L l 1 z ) + \u03b7L l 1 rl ) (11)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overall Objective Function", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "where \u03b7 is a scaling factor. In the beginning of the training \u03b7 = 0, while as we move on with the training we can increase the \u03b7 to slowly reduce the effect of MLE loss. And \u03b7 is updated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overall Objective Function", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b7 = min(0.8, max(0.0, steps \u2212 n s n e \u2212 n s ))", |
| "eq_num": "(12)" |
| } |
| ], |
| "section": "Overall Objective Function", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "where steps is the global steps that the model has been updated, n s and n e are the start and end steps for increasing \u03b7 respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overall Objective Function", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "There are two stages in the proposed unsupervised training. In the first stage, we pre-train the proposed model with denoising auto-encoding and cross-language training, until no improvement is achieved on the development set. This ensures that the model starts with a much better policy than random because now the model can focus on the good part of the search space. In the second state, we use an annealing schedule to teach the model to produce stable sequences gradually. That is, after the initial pre-training steps, we continue training the model with future rewarding. During each iteration, we perform one batch of denoising auto-encoding and cross-language training for the source as well as target languages alternately. For model selection, we randomly extract 3000 source and target sentences to form a development set. Following (Lample et al., 2018a) , we translate the source sentences to the target language and then convert the resulting sentences back to the source language. The quality of the model is then evaluated by computing the BLEU score over the original inputs and their reconstructions via this two-step translation process. The performance is finally averaged over two directions, and the selected model is the one with the highest score.", |
| "cite_spans": [ |
| { |
| "start": 845, |
| "end": 867, |
| "text": "(Lample et al., 2018a)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Procedure", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "We mainly evaluate the proposed approach on the widely used English-German, English-French and NIST Chinese-to-English 1 translation tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "For English-French and English-German, we use 30M sentences from the WMT monolingual News Crawl datasets from years 2007 through 2017. We use the publicly available implementation of Moses 2 scripts for tokenization. Besides, we use a shared vocabulary for source and target languages with 60K subword tokens based on byte-pair encoding (Sennrich et al., 2016b) . We remove sentences longer than 50 subword-tokens. Experimental results are reported on newstest2014 for English-French translation and newstest2016 for English-German translation. We adopt the same method as in (Lample et al., 2018b) to obtain cross-lingual embeddings.", |
| "cite_spans": [ |
| { |
| "start": 337, |
| "end": 361, |
| "text": "(Sennrich et al., 2016b)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 576, |
| "end": 598, |
| "text": "(Lample et al., 2018b)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For NIST Chinese-to-English translation, our training data consists of 1.6M sentence pairs randomly extracted from LDC corpora 3 , which has been widely utilized by previous works. Similar to (Yang et al., 2018) , we build the monolingual dataset by randomly shuffling the Chinese and English sentences respectively since the data set is not big enough. We set the vocabulary size to 30K for both Chinese and English. The average BLEU score over NIST02\u223c06 is reported 1 The reason that we do not conduct experiments on English-to-Chinese translation is that we do not get public test sets for English-to-Chinese.", |
| "cite_spans": [ |
| { |
| "start": 192, |
| "end": 211, |
| "text": "(Yang et al., 2018)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 468, |
| "end": 469, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "2 http://www.statmt.org/moses/ 3 LDC2002E18, LDC2003E07, LDC2003E14, the Hansards portion of LDC2004T07, LDC2004T08, and LDC2005T06 en\u2192fr fr\u2192en en\u2192de de\u2192en zh\u2192en Existing Unsupervised NMT Artetxe et al. (2018b) 15 in this paper. To pre-train cross-lingual embeddings, we utilize the monolingual corpora to train the embeddings for each language independently by using word2vec (Mikolov et al., 2013) . Then we apply the public implementation 4 proposed by Artetxe et al. (2017) to map these embeddings into a shared latent space and keep the mapped embeddings fixed during training. For NIST Chinese-to-English, we apply caseinsensitive NIST BLEU computed by the script mteval-v13a.pl to evaluate the translation performance. For English-German and English-French, we evaluate the translation performance with the script multi-belu.pl.", |
| "cite_spans": [ |
| { |
| "start": 184, |
| "end": 210, |
| "text": "NMT Artetxe et al. (2018b)", |
| "ref_id": null |
| }, |
| { |
| "start": 377, |
| "end": 399, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 456, |
| "end": 477, |
| "text": "Artetxe et al. (2017)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We set the following hyper-parameters: word embedding dimension as 512, hidden size of selfattention as 512, hidden size of fully connected layers as 1024 and the head number as 8. We share the last one layer of encoders in both languages. The dropout rate is set as 0.1, 0.3 and 0.2 during the training for En-Fr, En-De and Zhto-En, respectively. We perform a fixed number of iterations (500K) to train each model, and set n s = 300K, n e = 400K, for gradually increasing the effect of future rewarding. We use the Adam optimizer with a simple learning rate schedule: we start with a learning rate of 10 \u22124 , after 300K updates, we begin to halve the learning rate every 100K steps. We set the mini-batch size as 64. At decoding time, we use greedy search.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hyper-parameters", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Our method is compared with several previous unsupervised NMT systems (Artetxe et al., 2018b; 4 https://github.com/artetxem/vecmap Lample et al., 2018a,b; Yang et al., 2018; Wu et al., 2019; Song et al., 2019) . Although, Song et al. (2019) have achieved comparable results with supervised NMT systems with larger monolingual data (Wikipedia data) and bigger model 5 , we still list the results that obtained with the same data and model as ours for fair comparison. We also consider a \"Baseline\" model, with the same architecture as described in Section 2.1 except for the variational inference network and is trained using MLE only. We directly copy the experimental results of previous models reported in their papers and report the BLEU scores on English-French, English-German and NIST Chinese-to-English test sets in Table 1 .", |
| "cite_spans": [ |
| { |
| "start": 70, |
| "end": 93, |
| "text": "(Artetxe et al., 2018b;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 94, |
| "end": 95, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 131, |
| "end": 154, |
| "text": "Lample et al., 2018a,b;", |
| "ref_id": null |
| }, |
| { |
| "start": 155, |
| "end": 173, |
| "text": "Yang et al., 2018;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 174, |
| "end": 190, |
| "text": "Wu et al., 2019;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 191, |
| "end": 209, |
| "text": "Song et al., 2019)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 222, |
| "end": 240, |
| "text": "Song et al. (2019)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 823, |
| "end": 830, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Overall Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "As shown in Table 1 , our approach achieves BLEU score of 27.59 and 27.15 on En\u2192Fr and Fr\u2192En translations respectively, which outperforms Lample et al. (2018b) by more than 2 BLEU points on both En\u2192Fr and Fr\u2192En. For the En-De, we achieve 19.65 and 23.42 BLEU scores on En\u2192De and De\u2192En respectively, with up to +10.09 BLEU points improvement over previous unsupervised NMT models. For the Chinese-to-English translation, the proposed method leads to a substantial improvement (up to 54%) over the previous system showed in Yang et al. (2018) . Compared to baseline, our approach demonstrates significant improvements by more than 2 BLEU points over three benchmarks. These results indicate that the newly proposed training method that models future rewards to optimize global word predictions for unsupervised NMT is promising and enables the model to generate quality translations.", |
| "cite_spans": [ |
| { |
| "start": 522, |
| "end": 540, |
| "text": "Yang et al. (2018)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 19, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Overall Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In this section, we conduct some analysis over the proposed method by taking English-French translation as an example.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "To understand the importance of different components of the proposed system, we perform an ablation study by training multiple versions of our model with some missing components: the variational inference network and the future rewarding method. Results are reported in Table 2 . From the table, we can see that removing the future rewarding, and the accuracy drops by 0.98/1.02 BLEU points. Without the variational inference networks, the accuracy decreases with 0.62/0.69 BLEU points. These findings demonstrate that both the future rewarding and the VIN are important, and both contribute to the improvement of translation accuracy. The more critical component is the future rewarding technology, which is vital to optimize the global word predictions. Table 2 : Ablation study of our method on English-French translation task.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 270, |
| "end": 277, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 756, |
| "end": 763, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation Study", |
| "sec_num": "3.4.1" |
| }, |
| { |
| "text": "We perform qualitative evaluation on the pseudo parallel data generated with the back-translation method. To this end, we conduct a \"round-trip\" translation (e.g., src \u2192t gt \u2192\u015d rc), where src andt gt form a pseudo parallel corpus,\u015d rc is the reconstruction fromt gt. We explore three settings for qualitative evaluation: 1) UNKs, the ratio of the number of unknown words to the number of total words int gt; 2) the average over all sentences i\u00f1 tgt with respect of their semantic adequacy, denoted as SA; 3) the BLEU scores over the original inputs and their reconstructions, denoted as r-BLEU. All settings are finally averaged over two directions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Qualitative Comparison of Back-translating", |
| "sec_num": "3.4.2" |
| }, |
| { |
| "text": "Results are shown in Table 3 . The proposed training method introduces significant boosts in all of the three settings, with reducing 1.34% of unknown words, increasing the semantic adequacy UNKs SA r-BLEU Baseline 3.51% 0.794 54.23 +Future Rewarding 2.17% 0.882 60.08 Table 3 : Qualitative comparison of the generated pseudo parallel sentences from the models trained with MLE only and with the proposed training method on English-French test set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 21, |
| "end": 28, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 269, |
| "end": 276, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Qualitative Comparison of Back-translating", |
| "sec_num": "3.4.2" |
| }, |
| { |
| "text": "Better than Baseline S: He put together a real feast for his fans to mark the occasion. R: Pour l'occasion, il a concoct\u00e9 un vrai festin pour ses fans. B: Il a mis en sc\u00e8ne un vrai festin pour son public pour marquer le souvenir. O: Il a mis un vrai festin pour ses fans pour marquer la circonstance. S: Des scientifiques viennent de mettre en lumi\u00e8re la fa\u00e7on dont les mouvements de la queue d'un chien sont li\u00e9s\u00e0 son humeur. R: Scientists have shed more light on how the movements of a dog's tail are linked to its mood. B: Scientists come out of light the way the movements of the tail of a dog are linked to his spirits. O: Scientists come to light the way of the movements of a dog's tail are related to its mood. Worse than Baseline S: The recalled models were built between August 1 and September 10. R: Les mod\u00e8les rappel\u00e9s ont\u00e9t\u00e9 construits entre le 1er ao\u00fbt et le 10 septembre. B: Les mod\u00e8les rappel\u00e9s ont\u00e9t\u00e9 construits entre le 1er ao\u00fbt et le 10 septembre. O: Les mod\u00e8les de racont\u00e9 ont\u00e9t\u00e9 construits entre le 1er ao\u00fbt et le 10 septembre. S: Elles connaissent leur entreprise mieux que personne. R: They know their business better than anyone else. B: They know their business better than anyone else. O: They know their company better than anyone. Table 4 : Translation examples from English-French test set (English-to-French is above the dotted line and French-to-English is below the dotted line). B: the baseline model; O: our proposed model. by 0.088 and improving r-BLEU points by 5.85. This is in line with our expectations, as the proposed future rewarding method is not optimized to predict the next token, but rather to increase longterm reward. Table 4 shows four example translations. The first part shows examples for which the proposed model reached a higher BLEU score than the baseline model. We find that the translation produced by the baseline model doesn't adequately convey the meaning of the source sentence to the target. By contrast, the proposed future rewarding method enables the model to generate translations that are more diversity while ensuring the meaning of the source sentences, such as \"circonstance\" and \"come to light\". The possible reason is that we apply the semantic adequacy to reward translations that have different syntax structures and expressions but share the same meaning as the ground-truth sentence. The second part contains examples where the baseline achieved better BLEU score than our model, that is, in a few cases, our model chooses inappropriate words that under the same topic as reference words.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1260, |
| "end": 1267, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 1668, |
| "end": 1675, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Qualitative Comparison of Back-translating", |
| "sec_num": "3.4.2" |
| }, |
| { |
| "text": "In order to reduce the exposure bias and optimize the metrics used to evaluate sequence modeling tasks (like BLEU, ROUGE or METEOR) directly, reinforcement learning (RL) has been widely used in many of recent works on machine translation (Ranzato et al., 2016; Shen et al., 2016; He et al., 2017; Bahdanau et al., 2017; Li et al., 2017 ), text summarization (Paulus et al., 2018; Wu and Hu, 2018; Li et al., 2018; , dialogue generation (Li et al., 2016) , and question answering . However, our proposed method is the first use in combination with reinforcement learning for unsupervised NMT to explicitly enhance back-translation.", |
| "cite_spans": [ |
| { |
| "start": 238, |
| "end": 260, |
| "text": "(Ranzato et al., 2016;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 261, |
| "end": 279, |
| "text": "Shen et al., 2016;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 280, |
| "end": 296, |
| "text": "He et al., 2017;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 297, |
| "end": 319, |
| "text": "Bahdanau et al., 2017;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 320, |
| "end": 335, |
| "text": "Li et al., 2017", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 358, |
| "end": 379, |
| "text": "(Paulus et al., 2018;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 380, |
| "end": 396, |
| "text": "Wu and Hu, 2018;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 397, |
| "end": 413, |
| "text": "Li et al., 2018;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 436, |
| "end": 453, |
| "text": "(Li et al., 2016)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Recently, motivated by the success of crosslingual embeddings (Artetxe et al., 2016; Zhang et al., 2017; Conneau et al., 2017) , several works have tried to train NMT or SMT models using unsupervised setting, in which the model only has access to unlabeled data. For example, Lample et al. (2018a) propose a model that consists of a single encoder and a single decoder for both languages, respectively responsible for encoding source and target sentences to a shared latent space and to decode from that latent space to the source or target domain. Different from (Lample et al., 2018a) , Artetxe et al. (2018b) introduce a shared encoder but two independent decoders with one for each language. Both of these two works mentioned above utilize denoising auto-encoding to reconstruct their noisy inputs and incorporate back-translation into cross-language training procedure. Further, Yang et al. (2018) extend the single encoder by using two independent encoders but sharing some partial weights, which are responsible for alleviating the weakness in keeping language-specific characteristics of the shared encoder. And the entire system is fine-tuned by introducing two global GANs with one for each language. More recently, Artetxe et al. (2018a) and Lample et al. (2018b) propose an alternative approach based on phrase-based statistical machine translation, which profits from the modular architecture of SMT. In addition, Lample et al. (2018b) also introduce a novel cross-lingual embedding training method which is particularly suitable for related languages (e.g., English-French and English-German). Ren et al. 2019introduce SMT models as posterior regularization, in which SMT and NMT models boost each other through iterative back-translation in a unified EM training algorithm. Wu et al. (2019) propose an alternative for back-translation, , extract-edit, to extract and then edit real sentences from the target monolingual corpora. Lample and Conneau (2019) and Song et al. (2019) propose to pretrain cross-lingual language models for the initialization stage of unsupervised neural machine translation, which is critical to the performance of their proposed model. In contrast to theirs, we propose an effective training method for unsupervised NMT that models future rewards to optimize the global word predictions via neural policy reinforcement learning, which can be applied to arbitrary architectures and language pairs easily.", |
| "cite_spans": [ |
| { |
| "start": 62, |
| "end": 84, |
| "text": "(Artetxe et al., 2016;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 85, |
| "end": 104, |
| "text": "Zhang et al., 2017;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 105, |
| "end": 126, |
| "text": "Conneau et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 276, |
| "end": 297, |
| "text": "Lample et al. (2018a)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 564, |
| "end": 586, |
| "text": "(Lample et al., 2018a)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 589, |
| "end": 611, |
| "text": "Artetxe et al. (2018b)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 884, |
| "end": 902, |
| "text": "Yang et al. (2018)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 1226, |
| "end": 1248, |
| "text": "Artetxe et al. (2018a)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1253, |
| "end": 1274, |
| "text": "Lample et al. (2018b)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 1427, |
| "end": 1448, |
| "text": "Lample et al. (2018b)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 1789, |
| "end": 1805, |
| "text": "Wu et al. (2019)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 1944, |
| "end": 1969, |
| "text": "Lample and Conneau (2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 1974, |
| "end": 1992, |
| "text": "Song et al. (2019)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In this paper, we have proposed a novel learning paradigm for unsupervised NMT that models future rewards to optimize the global word predictions via reinforcement learning, in which we design an effective reward function that jointly accounts for the n-gram matching and the semantic adequacy of generated translations. To constrain the corresponding sentences in two languages have the same or similar semantic code, we also introduce a variational inference network into the proposed model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We test the proposed model on WMT'14 English-French, WMT'16 English-German and NIST Chinese-to-English translation tasks. Experiment results show that our approach leads to significant improvements over various language pairs, especially on distantly-related languages such as Chinese and English.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our model can also adopt such an advanced pre-training technique, we leave this for feature work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the anonymous reviewers for their valuable comments and suggestions. This work was supported by the National Key Research and Development Program of China (No. 2017YFB0803301). Yue Hu is the corresponding author.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": "6" |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Learning principled bilingual mappings of word embeddings while preserving monolingual invariance", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2289--2294", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2016. Learning principled bilingual mappings of word em- beddings while preserving monolingual invariance. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, EMNLP 2016, pages 2289-2294.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Learning bilingual word embeddings with (almost) no bilingual data", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "451--462", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1042" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2017. Learning bilingual word embeddings with (almost) no bilingual data. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics, ACL 2017, pages 451-462.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Unsupervised statistical machine translation", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "3632--3642", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2018a. Unsupervised statistical machine translation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, EMNLP 2018, pages 3632-3642.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Unsupervised neural machine translation", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2018b. Unsupervised neural ma- chine translation. In ICLR 2018.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "An actor-critic algorithm for sequence prediction", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Philemon", |
| "middle": [], |
| "last": "Brakel", |
| "suffix": "" |
| }, |
| { |
| "first": "Kelvin", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Anirudh", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Lowe", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Philemon Brakel, Kelvin Xu, Anirudh Goyal, Ryan Lowe, Joelle Pineau, Aaron Courville, and Yoshua Bengio. 2017. An actor-critic algorithm for sequence prediction. In ICLR 2017.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In ICLR 2015.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Word translation without parallel data", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Herv\u00e9", |
| "middle": [], |
| "last": "J\u00e9gou", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1710.04087" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Guillaume Lample, Marc'Aurelio Ranzato, Ludovic Denoyer, and Herv\u00e9 J\u00e9gou. 2017. Word translation without parallel data. In arXiv:1710.04087.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Convolutional sequence to sequence learning", |
| "authors": [ |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Gehring", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Denis", |
| "middle": [], |
| "last": "Yarats", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann N", |
| "middle": [], |
| "last": "Dauphin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1705.03122" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonas Gehring, Michael Auli, David Grangier, De- nis Yarats, and Yann N Dauphin. 2017. Con- volutional sequence to sequence learning. In arXiv:1705.03122.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Decoding with value networks for neural machine translation", |
| "authors": [ |
| { |
| "first": "Di", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanqing", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yingce", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Liwei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tieyan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "178--187", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Di He, Hanqing Lu, Yingce Xia, Tao Qin, Liwei Wang, and Tieyan Liu. 2017. Decoding with value net- works for neural machine translation. In Advances in Neural Information Processing Systems, NIPS 2017, pages 178-187.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Learning distributed representations of sentences from unlabelled data", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of NAACL-HLT 2016", |
| "volume": "", |
| "issue": "", |
| "pages": "1367--1377", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Hill, Kyunghyun Cho, and Anna Korhonen. 2016. Learning distributed representations of sentences from unlabelled data. In Proceedings of NAACL- HLT 2016, pages 1367-1377.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Reinforced mnemonic reader for machine reading comprehension", |
| "authors": [ |
| { |
| "first": "Minghao", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuxing", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xipeng", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI 2018", |
| "volume": "", |
| "issue": "", |
| "pages": "4099--4106", |
| "other_ids": { |
| "DOI": [ |
| "10.24963/ijcai.2018/570" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minghao Hu, Yuxing Peng, Zhen Huang, Xipeng Qiu, Furu Wei, and Ming Zhou. 2018. Reinforced mnemonic reader for machine reading comprehen- sion. In Proceedings of the Twenty-Seventh Inter- national Joint Conference on Artificial Intelligence, IJCAI 2018, pages 4099-4106.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Semi-supervised learning with deep generative models", |
| "authors": [ |
| { |
| "first": "Shakir", |
| "middle": [], |
| "last": "Diederik P Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "Danilo", |
| "middle": [], |
| "last": "Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Jimenez Rezende", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Welling", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in Neural Information Processing Systems, NIPS 2014", |
| "volume": "", |
| "issue": "", |
| "pages": "3581--3589", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma, Shakir Mohamed, Danilo Jimenez Rezende, and Max Welling. 2014. Semi-supervised learning with deep generative models. In Advances in Neural Information Processing Systems, NIPS 2014, pages 3581-3589.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Autoencoding variational bayes", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Welling", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Max Welling. 2014. Auto- encoding variational bayes. In ICLR 2014.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Cross-lingual language model pretraining", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1901.07291" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample and Alexis Conneau. 2019. Cross-lingual language model pretraining. In arXiv:1901.07291.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Unsupervised machine translation using monolingual corpora only", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample, Alexis Conneau, Ludovic De- noyer, and Marc'Aurelio Ranzato. 2018a. Unsu- pervised machine translation using monolingual cor- pora only. In ICLR 2018.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Phrase-based & neural unsupervised machine translation", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "5039--5049", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample, Myle Ott, Alexis Conneau, Lu- dovic Denoyer, and Marc'Aurelio Ranzato. 2018b. Phrase-based & neural unsupervised machine trans- lation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Process- ing, EMNLP 2018, pages 5039-5049.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Learning to decode for future success", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Monroe", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1701.06549" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Will Monroe, and Dan Jurafsky. 2017. Learning to decode for future success. In arXiv:1701.06549.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Deep reinforcement learning for dialogue generation", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Monroe", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1192--1202", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Will Monroe, Alan Ritter, Michel Galley, Jianfeng Gao, and Dan Jurafsky. 2016. Deep rein- forcement learning for dialogue generation. In Pro- ceedings of the 2016 Conference on Empirical Meth- ods in Natural Language Processing, ENMLP 2016, page 1192-1202.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Actorcritic based training framework for abstractive summarization", |
| "authors": [ |
| { |
| "first": "Piji", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Lidong", |
| "middle": [], |
| "last": "Bing", |
| "suffix": "" |
| }, |
| { |
| "first": "Wai", |
| "middle": [], |
| "last": "Lam", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1803.11070" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piji Li, Lidong Bing, and Wai Lam. 2018. Actor- critic based training framework for abstractive sum- marization. In arXiv:1803.11070.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in Neural Information Processing Systems, NIPS 2013", |
| "volume": "", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Cor- rado, and Jeffrey Dean. 2013. Distributed represen- tations of words and phrases and their composition- ality. In Advances in Neural Information Processing Systems, NIPS 2013, pages 3111-3119.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th annual meeting on association for computational linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting on association for compu- tational linguistics, ACL 2002, pages 311-318.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "A deep reinforced model for abstractive summarization", |
| "authors": [ |
| { |
| "first": "Romain", |
| "middle": [], |
| "last": "Paulus", |
| "suffix": "" |
| }, |
| { |
| "first": "Caiming", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Romain Paulus, Caiming Xiong, and Richard Socher. 2018. A deep reinforced model for abstractive sum- marization. In ICLR 2018.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Sequence level training with recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Aurelio", |
| "middle": [], |
| "last": "Marc", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Wojciech", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zaremba", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marc'Aurelio Ranzato, Sumit Chopra, Michael Auli, and Wojciech Zaremba. 2016. Sequence level train- ing with recurrent neural networks. In ICLR 2016.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Unsupervised neural machine translation with SMT as posterior regularization", |
| "authors": [ |
| { |
| "first": "Zhirui", |
| "middle": [], |
| "last": "Shuo Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Shujie", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuai", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1901.04112" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuo Ren, Zhirui Zhang, Shujie Liu, Ming Zhou, and Shuai Ma. 2019. Unsupervised neural machine translation with SMT as posterior regularization. In arXiv:1901.04112.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Self-critical sequence training for image captioning", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Steven", |
| "suffix": "" |
| }, |
| { |
| "first": "Etienne", |
| "middle": [], |
| "last": "Rennie", |
| "suffix": "" |
| }, |
| { |
| "first": "Youssef", |
| "middle": [], |
| "last": "Marcheret", |
| "suffix": "" |
| }, |
| { |
| "first": "Jarret", |
| "middle": [], |
| "last": "Mroueh", |
| "suffix": "" |
| }, |
| { |
| "first": "Vaibhava", |
| "middle": [], |
| "last": "Ross", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Goel", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "2017 IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "1179--1195", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/CVPR.2017.131" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven J Rennie, Etienne Marcheret, Youssef Mroueh, Jarret Ross, and Vaibhava Goel. 2017. Self-critical sequence training for image captioning. In 2017 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, pages 1179-1195.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Improving neural machine translation models with monolingual data", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, ACL 2016", |
| "volume": "", |
| "issue": "", |
| "pages": "86--96", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016a. Improving neural machine translation mod- els with monolingual data. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics, ACL 2016, pages 86-96.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, ACL 2016", |
| "volume": "", |
| "issue": "", |
| "pages": "1715--1725", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016b. Neural machine translation of rare words with subword units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics, ACL 2016, pages 1715-1725.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Minimum risk training for neural machine translation", |
| "authors": [ |
| { |
| "first": "Shiqi", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhongjun", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, ACL 2016", |
| "volume": "", |
| "issue": "", |
| "pages": "1683--1692", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shiqi Shen, Yong Cheng, Zhongjun He, Wei He, Hua Wu, Maosong Sun, and Yang Liu. 2016. Minimum risk training for neural machine translation. In Pro- ceedings of the 54th Annual Meeting of the Asso- ciation for Computational Linguistics, ACL 2016, pages 1683-1692.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "MASS: masked sequence to sequence pre-training for language generation", |
| "authors": [ |
| { |
| "first": "Kaitao", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1905.02450" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, and Tie- Yan Liu. 2019. MASS: masked sequence to se- quence pre-training for language generation. In arXiv:1905.02450.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in Neural Information Processing Systems, NIPS 2014", |
| "volume": "", |
| "issue": "", |
| "pages": "3104--3112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural net- works. In Advances in Neural Information Process- ing Systems, NIPS 2014, pages 3104-3112.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1706.03762" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In arXiv:1706.03762.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Extracting and composing robust features with denoising autoencoders", |
| "authors": [ |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Hugo", |
| "middle": [], |
| "last": "Larochelle", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierre", |
| "middle": [ |
| "Antoine" |
| ], |
| "last": "Manzagol", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Machine Learning, Proceedings of the Twenty-Fifth International Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "1096--1103", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/1390156.1390294" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pascal Vincent, Hugo Larochelle, Yoshua Bengio, and Pierre Antoine Manzagol. 2008. Extracting and composing robust features with denoising autoen- coders. In Machine Learning, Proceedings of the Twenty-Fifth International Conference, ICML 2008, page 1096-1103.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "A reinforced topic-aware convolutional sequence-to-sequence model for abstractive text summarization", |
| "authors": [ |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Junlin", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yunzhe", |
| "middle": [], |
| "last": "Tao", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Zhong", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiang", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1805.03616" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li Wang, Junlin Yao, Yunzhe Tao, Li Zhong, Wei Liu, and Qiang Du. 2018. A reinforced topic-aware con- volutional sequence-to-sequence model for abstrac- tive text summarization. In arXiv:1805.03616.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Extract and edit: An alternative to back-translation for unsupervised neural machine translation", |
| "authors": [ |
| { |
| "first": "Jiawei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "Yang" |
| ], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1904.02331" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiawei Wu, Xin Wang, and William Yang Wang. 2019. Extract and edit: An alternative to back-translation for unsupervised neural machine translation. In arXiv:1904.02331.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Google's neural machine translation system: Bridging the gap between human and machine translation", |
| "authors": [ |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Norouzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Qin", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Klaus", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1609.08144" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, and Klaus Macherey. 2016. Google's neural machine transla- tion system: Bridging the gap between human and machine translation. In arXiv:1609.08144.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Learning to extract coherent summary via deep reinforcement learning", |
| "authors": [ |
| { |
| "first": "Yuxiang", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Baotian", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, AAAI 2018", |
| "volume": "", |
| "issue": "", |
| "pages": "5602--5609", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuxiang Wu and Baotian Hu. 2018. Learning to extract coherent summary via deep reinforcement learning. In Proceedings of the Thirty-Second AAAI Confer- ence on Artificial Intelligence, AAAI 2018, pages 5602-5609.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Unsupervised neural machine translation with weight sharing", |
| "authors": [ |
| { |
| "first": "Zhen", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Feng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1804.09057" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhen Yang, Wei Chen, Feng Wang, and Bo Xu. 2018. Unsupervised neural machine translation with weight sharing. In arXiv:1804.09057.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Adversarial training for unsupervised bilingual lexicon induction", |
| "authors": [ |
| { |
| "first": "Meng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Huanbo", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1959--1970", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1179" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Meng Zhang, Yang Liu, Huanbo Luan, and Maosong Sun. 2017. Adversarial training for unsupervised bilingual lexicon induction. In Proceedings of the 55th Annual Meeting of the Association for Compu- tational Linguistics, ACL 2017, pages 1959-1970.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "text": "Illustration of Variational Denosing Auto-Encoding. The newly introduced VIN is highlighted in red. Two aspects of losses are respectively abbreviated as L l z and L l rec .", |
| "uris": null |
| }, |
| "TABREF0": { |
| "content": "<table><tr><td/><td>.13</td><td>15.56</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Lample et al. (2018a)</td><td>15.05</td><td>14.31</td><td>9.64</td><td>13.33</td><td>-</td></tr><tr><td>Yang et al. (2018)</td><td>16.97</td><td>15.58</td><td>10.86</td><td>14.62</td><td>14.52</td></tr><tr><td colspan=\"2\">Lample et al. (2018b), NMT 25.14</td><td>24.18</td><td>17.16</td><td>21.00</td><td>-</td></tr><tr><td>Wu et al. (2019)</td><td>27.56</td><td>26.90</td><td>19.55</td><td>23.29</td><td>-</td></tr><tr><td>Song et al. (2019)</td><td>27.41</td><td>27.09</td><td>18.21</td><td>23.37</td><td>-</td></tr><tr><td/><td colspan=\"2\">This work</td><td/><td/><td/></tr><tr><td>MLE</td><td>25.47</td><td>24.51</td><td>17.04</td><td>21.13</td><td>18.26</td></tr><tr><td>(+Future Rewarding)</td><td colspan=\"3\">27.59 27.15 19.65</td><td>23.42</td><td>22.40</td></tr></table>", |
| "num": null, |
| "text": "Results of the proposed method in comparison to existing unsupervised NMT systems (BLEU).", |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |