| { |
| "paper_id": "D18-1049", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:45:53.482821Z" |
| }, |
| "title": "Improving the Transformer Translation Model with Document-Level Context", |
| "authors": [ |
| { |
| "first": "Jiacheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Institute for Artificial Intelligence State Key Laboratory of Intelligent Technology and Systems", |
| "institution": "Tsinghua University", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Huanbo", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Institute for Artificial Intelligence State Key Laboratory of Intelligent Technology and Systems", |
| "institution": "Tsinghua University", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Institute for Artificial Intelligence State Key Laboratory of Intelligent Technology and Systems", |
| "institution": "Tsinghua University", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Feifei", |
| "middle": [], |
| "last": "Zhai #", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Jingfang", |
| "middle": [], |
| "last": "Xu #", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Soochow University", |
| "location": { |
| "settlement": "Suzhou", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Institute for Artificial Intelligence State Key Laboratory of Intelligent Technology and Systems", |
| "institution": "Tsinghua University", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Although the Transformer translation model (Vaswani et al., 2017) has achieved state-ofthe-art performance in a variety of translation tasks, how to use document-level context to deal with discourse phenomena problematic for Transformer still remains a challenge. In this work, we extend the Transformer model with a new context encoder to represent document-level context, which is then incorporated into the original encoder and decoder. As large-scale document-level parallel corpora are usually not available, we introduce a two-step training method to take full advantage of abundant sentence-level parallel corpora and limited document-level parallel corpora. Experiments on the NIST Chinese-English datasets and the IWSLT French-English datasets show that our approach improves over Transformer significantly. 1", |
| "pdf_parse": { |
| "paper_id": "D18-1049", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Although the Transformer translation model (Vaswani et al., 2017) has achieved state-ofthe-art performance in a variety of translation tasks, how to use document-level context to deal with discourse phenomena problematic for Transformer still remains a challenge. In this work, we extend the Transformer model with a new context encoder to represent document-level context, which is then incorporated into the original encoder and decoder. As large-scale document-level parallel corpora are usually not available, we introduce a two-step training method to take full advantage of abundant sentence-level parallel corpora and limited document-level parallel corpora. Experiments on the NIST Chinese-English datasets and the IWSLT French-English datasets show that our approach improves over Transformer significantly. 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The past several years have witnessed the rapid development of neural machine translation (NMT) (Sutskever et al., 2014; Bahdanau et al., 2015) , which investigates the use of neural networks to model the translation process. Showing remarkable superiority over conventional statistical machine translation (SMT), NMT has been recognized as the new de facto method and is widely used in commercial MT systems . A variety of NMT models have been proposed to map between natural languages such as RNNencdec (Sutskever et al., 2014) , RNNsearch (Bahdanau et al., 2015) , ConvS2S (Gehring et al., 2017) , and Transformer (Vaswani et al., 2017) . Among them, the Transformer model has achieved state-of-the-art translation performance. The ca-pability to minimize the path length between longdistance dependencies in neural networks contributes to its exceptional performance.", |
| "cite_spans": [ |
| { |
| "start": 96, |
| "end": 120, |
| "text": "(Sutskever et al., 2014;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 121, |
| "end": 143, |
| "text": "Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 505, |
| "end": 529, |
| "text": "(Sutskever et al., 2014)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 542, |
| "end": 565, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 576, |
| "end": 598, |
| "text": "(Gehring et al., 2017)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 617, |
| "end": 639, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "However, the Transformer model still suffers from a major drawback: it performs translation only at the sentence level and ignores documentlevel context. Document-level context has proven to be beneficial for improving translation performance, not only for conventional SMT (Gong et al., 2011; Hardmeier et al., 2012) , but also for NMT (Wang et al., 2017; Tu et al., 2018) . Bawden et al. (2018) indicate that it is important to exploit document-level context to deal with contextdependent phenomena which are problematic for machine translation such as coreference, lexical cohesion, and lexical disambiguation.", |
| "cite_spans": [ |
| { |
| "start": 274, |
| "end": 293, |
| "text": "(Gong et al., 2011;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 294, |
| "end": 317, |
| "text": "Hardmeier et al., 2012)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 337, |
| "end": 356, |
| "text": "(Wang et al., 2017;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 357, |
| "end": 373, |
| "text": "Tu et al., 2018)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 376, |
| "end": 396, |
| "text": "Bawden et al. (2018)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "While document-level NMT has attracted increasing attention from the community in the past two years (Jean et al., 2017; Kuang et al., 2017; Tiedemann and Scherrer, 2017; Wang et al., 2017; Maruf and Haffari, 2018; Bawden et al., 2018; Tu et al., 2018; Voita et al., 2018) , to the best of our knowledge, only one existing work has endeavored to model document-level context for the Transformer model (Voita et al., 2018) . Previous approaches to document-level NMT have concentrated on the RNNsearch model (Bahdanau et al., 2015) . It is challenging to adapt these approaches to Transformer because they are designed specifically for RNNsearch.", |
| "cite_spans": [ |
| { |
| "start": 101, |
| "end": 120, |
| "text": "(Jean et al., 2017;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 121, |
| "end": 140, |
| "text": "Kuang et al., 2017;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 141, |
| "end": 170, |
| "text": "Tiedemann and Scherrer, 2017;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 171, |
| "end": 189, |
| "text": "Wang et al., 2017;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 190, |
| "end": 214, |
| "text": "Maruf and Haffari, 2018;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 215, |
| "end": 235, |
| "text": "Bawden et al., 2018;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 236, |
| "end": 252, |
| "text": "Tu et al., 2018;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 253, |
| "end": 272, |
| "text": "Voita et al., 2018)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 401, |
| "end": 421, |
| "text": "(Voita et al., 2018)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 507, |
| "end": 530, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we propose to extend the Transformer model to take advantage of documentlevel context. The basic idea is to use multihead self-attention (Vaswani et al., 2017) to compute the representation of document-level context, which is then incorporated into the encoder and decoder using multi-head attention. Since largescale document-level parallel corpora are usually hard to acquire, we propose to train sentencelevel model parameters on sentence-level paral- (Vaswani et al., 2017) and (b) the extended Transformer translation model that exploits document-level context. The newly introduced modules are highlighted in red.", |
| "cite_spans": [ |
| { |
| "start": 151, |
| "end": 173, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 469, |
| "end": 491, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "lel corpora first and then estimate document-level model parameters on document-level parallel corpora while keeping the learned original sentencelevel Transformer model parameters fixed. Our approach has the following advantages:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "1. Increased capability to capture context: the use of multi-head attention, which significantly reduces the path length between longrange dependencies, helps to improve the capability to capture document-level context;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2. Small computational overhead: as all newly introduced modules are based on highly parallelizable multi-head attention, there is no significant slowdown in both training and decoding;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "3. Better use of limited labeled data: our approach is capable of maintaining the superiority over the sentence-level counterpart even when only small-scale document-level parallel corpora are available.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Experiments show that our approach achieves an improvement of 1.96 and 0.89 BLEU points over Transformer on Chinese-English and French-English translation respectively by exploiting document-level context. It also outperforms a state-of-the-art cache-based method (Kuang et al., 2017) adapted for Transformer.", |
| "cite_spans": [ |
| { |
| "start": 264, |
| "end": 284, |
| "text": "(Kuang et al., 2017)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our goal is to enable the Transformer translation model (Vaswani et al., 2017) as shown in Figure 1 (a) to exploit document-level context.", |
| "cite_spans": [ |
| { |
| "start": 56, |
| "end": 78, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 91, |
| "end": 100, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Formally, let X = x (1) , . . . , x (k) , . . . , x (K) be a source-language document composed of K source sentences. We use", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "x (k) = x (k) 1 , . . . , x (k) i , . . . , x (k) I", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "to denote the k-th source sentence containing I words. x (k) i denotes the i-th word in the k-th source sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Likewise, the corresponding target-language document is denoted by Y = y (1) , . . . , y (k) , . . . , y (K) and y (k) ", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 108, |
| "text": "(K)", |
| "ref_id": null |
| }, |
| { |
| "start": 115, |
| "end": 118, |
| "text": "(k)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "= y (k) 1 , . . . , y (k) j , . . . , y (k) J represents the k-th target sentence containing J words. y (k)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "j denotes the j-th word in the k-th target sentence. We assume that X, Y constitutes a parallel document and each x (k) , y (k) forms a parallel sentence. Therefore, the document-level translation probability is given by", |
| "cite_spans": [ |
| { |
| "start": 116, |
| "end": 119, |
| "text": "(k)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (Y|X; \u03b8) = K k=1 P (y (k) |X, Y <k ; \u03b8),", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Y <k = y (1) , . . . , y (k\u22121)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "is a partial translation. For generating y (k) , the source document X can be divided into three parts: (1) the k-th source sentence X =k = x (k) , (2) the source-side document-level context on the left X <k = x (1) , . . . , x (k\u22121) , and (3) the source-side document-level context on the right X >k = x (k+1) , . . . , x (K) . As the languages used in our experiments (i.e., Chinese and English) are written left to right, we omit X >k for simplicity.", |
| "cite_spans": [ |
| { |
| "start": 43, |
| "end": 46, |
| "text": "(k)", |
| "ref_id": null |
| }, |
| { |
| "start": 323, |
| "end": 326, |
| "text": "(K)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We also omit the target-side document-level context Y <k due to the translation error propagation problem (Wang et al., 2017) : errors made in translating one sentence will be propagated to the translation process of subsequent sentences. Interestingly, we find that using source-side documentlevel context X <k , which conveys the same information with Y <k , helps to compute better representations on the target side (see Table 8 ).", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 125, |
| "text": "(Wang et al., 2017)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 425, |
| "end": 432, |
| "text": "Table 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "As a result, the document-level translation probability can be approximated as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (Y|X; \u03b8) \u2248 K k=1 P (y (k) |X <k , x (k) ; \u03b8),", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "= K k=1 J j=1 P (y (k) j |X <k , x (k) , y (k) <j ; \u03b8),", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where y", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "(k) <j = y (k) 1 , . . . , y", |
| "eq_num": "(k)" |
| } |
| ], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "j\u22121 is a partial translation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In this way, the document-level translation model can still be defined at the sentence level without sacrificing efficiency except that the source-side document-level context X <k (or context for short) is taken into account.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In the following, we will introduce how to represent the context (Section 2.2), how to integrate the context (Section 2.3), and how to train the model especially when only limited training data is available (Section 2.4).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "As document-level context often includes several sentences, it is important to capture long-range dependencies and identify relevant information. We use multi-head self-attention (Vaswani et al., 2017) to compute the representation of documentlevel context because it is capable of reducing the maximum path length between long-range dependencies to O(1) (Vaswani et al., 2017) and determining the relative importance of different locations in the context (Bahdanau et al., 2015) . Because of this property, multi-head self-attention has proven to be effective in other NLP tasks such as constituency parsing (Kitaev and Klein, 2018) .", |
| "cite_spans": [ |
| { |
| "start": 179, |
| "end": 201, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 355, |
| "end": 377, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 456, |
| "end": 479, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 609, |
| "end": 633, |
| "text": "(Kitaev and Klein, 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "As shown in Figure 1 (b), we use a self-attentive encoder to compute the representation of X <k . The input to the self-attentive encoder is a sequence of context word embeddings, represented as a matrix. Suppose X <k is composed of M source words:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "X <k = x 1 , . . . , x m , . . . , x M .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We use x m \u2208 R D\u00d71 to denote the vector representation of x m that is the sum of word embedding and positional encoding (Vaswani et al., 2017) . Therefore, the matrix representation of X <k is given by", |
| "cite_spans": [ |
| { |
| "start": 120, |
| "end": 142, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "X c = [x 1 ; . . . ; x M ],", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where X c \u2208 R D\u00d7M is the concatenation of all vector representations of all source contextual words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The self-attentive encoder is composed of a stack of N c identical layers. Each layer has two sub-layers. The first sub-layer is a multi-head selfattention:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "A (1) = MultiHead(X c , X c , X c ),", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where A (1) \u2208 R D\u00d7M is the hidden state calculated by the multi-head self-attention at the first layer, MultiHead(Q, K, V) is a multi-head selfattention function that takes a query matrix Q, a key matrix K, and a value matrix V as inputs. In this case, Q = K = V = X c . This is why it is called self-attention. Please refer to (Vaswani et al., 2017) for more details. Note that we follow Vaswani et al. (2017) to use residual connection and layer normalization in each sub-layer, which are omitted in the presentation for simplicity. For example, the actual output of the first sub-layer is:", |
| "cite_spans": [ |
| { |
| "start": 328, |
| "end": 350, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 389, |
| "end": 410, |
| "text": "Vaswani et al. (2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "LayerNorm(A (1) + X c ).", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The second sub-layer is a simple, position-wise fully connected feed-forward network:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "C (1) = FNN(A (1) \u2022,1 ); . . . ; FNN(A (1) \u2022,M )", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where C (1) \u2208 R D\u00d7M is the annotation of X <k after the first layer, A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u2022,m \u2208 R D\u00d71 is the column vector for the m-th contextual word, and FNN(\u2022) is a position-wise fully connected feed-forward network (Vaswani et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 152, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "This process iterates N c times as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "A (n) = MultiHead C (n\u22121) , C (n\u22121) , C (n\u22121) ,", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "C (n) = FNN(A (n) \u2022,1 ); . . . ; FNN(A (n) \u2022,M ) ,", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where A (n) and C (n) (n = 1, . . . , N c ) are the hidden state and annotation at the n-th layer, respectively. Note that C (0) = X c .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document-level Context Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We use multi-head attention to integrate C (Nc) , which is the representation of X <k , into both the encoder and the decoder.", |
| "cite_spans": [ |
| { |
| "start": 43, |
| "end": 47, |
| "text": "(Nc)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document-level Context Integration", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Given the k-th source sentence x (k) , we use x", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Encoder", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "i \u2208 R D\u00d71 to denote the vector representation of the ith source word x", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Encoder", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "(k)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Encoder", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "i , which is a sum of word embedding and positional encoding. Therefore, the initial matrix representation of", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Encoder", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "x (k) is X = [x (k) 1 ; . . . ; x (k) I ],", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Integration into the Encoder", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "where X \u2208 R D\u00d7I is the concatenation of all vector representations of source words. As shown in Figure 1 (b), we follow (Vaswani et al., 2017) to use a stack of N s identical layers to encode x (k) . Each layer consists of three sub-layers. The first sub-layer is a multi-head selfattention:", |
| "cite_spans": [ |
| { |
| "start": 120, |
| "end": 142, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 194, |
| "end": 197, |
| "text": "(k)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 96, |
| "end": 104, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Integration into the Encoder", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "B (n) = MultiHead S (n\u22121) , S (n\u22121) , S (n\u22121) , (11)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Encoder", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "where S (0) = X. The second sub-layer is context attention that integrates document-level context into the encoder: Nc) . 12The third sub-layer is a position-wise fully connected feed-forward neural network:", |
| "cite_spans": [ |
| { |
| "start": 116, |
| "end": 119, |
| "text": "Nc)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Encoder", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "D (n) = MultiHead B (n) , C (Nc) , C (", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Encoder", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "S (n) = FNN(D (n) \u2022,1 ); . . . ; FNN(D (n) \u2022,I ) , (13)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Encoder", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "where S (n) \u2208 R D\u00d7I is the representation of the source sentence x (k) at the n-th layer (n = 1, . . . , N s ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Encoder", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "When generating the j-th target word y ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Y = [y (k) 0 , . . . , y (k) j\u22121 ],", |
| "eq_num": "(14)" |
| } |
| ], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "where y (k) 0 \u2208 R D\u00d71 is the vector representation of a begin-of-sentence token and Y \u2208 R D\u00d7j is the concatenation of all vectors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "As shown in Figure 1 (b), we follow (Vaswani et al., 2017) to use a stack of N t identical layers to compute target-side representations. Each layer is composed of four sub-layers. The first sub-layer is a multi-head self-attention:", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 58, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "E (n) = MultiHead T (n\u22121) , T (n\u22121) , T (n\u22121) , (15)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "where T (0) = Y . The second sub-layer is context attention that integrates document-level context into the decoder: Nc) . 16The third sub-layer is encoder-decoder attention that integrates the representation of the corresponding source sentence: Ns) , S (Ns) . 17The fourth sub-layer is a position-wise fully connected feed-forward neural network:", |
| "cite_spans": [ |
| { |
| "start": 117, |
| "end": 120, |
| "text": "Nc)", |
| "ref_id": null |
| }, |
| { |
| "start": 247, |
| "end": 250, |
| "text": "Ns)", |
| "ref_id": null |
| }, |
| { |
| "start": 255, |
| "end": 259, |
| "text": "(Ns)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "F (n) = MultiHead E (n) , C (Nc) , C (", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "G (n) = MultiHead F (n) , S (", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "T (n) = FNN(G (n) \u2022,1 ); . . . ; FNN(G (n) \u2022,j ), ,", |
| "eq_num": "(18)" |
| } |
| ], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "where T (n) \u2208 R D\u00d7j is the representation at the n-th layer (n = 1, . . . , N t ). Note that T (0) = Y .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "Finally, the probability distribution of generating the next target word y (k) j is defined using a softmax layer:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "P (y (k) j |X <k , x (k) , y (k) <j ; \u03b8) \u221d exp(W o T (Nt) \u2022,j ) (19)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "where W o \u2208 R |Vy|\u00d7D is a model parameter, V y is the target vocabulary, and T", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "(Nt) \u2022,j \u2208 R D\u00d71", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "is a column vector for predicting the j-th target word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration into the Decoder", |
| "sec_num": "2.3.2" |
| }, |
| { |
| "text": "In our model, we follow Vaswani et al. (2017) to use residual connections around each sub-layer to shortcut its input to its output:", |
| "cite_spans": [ |
| { |
| "start": 24, |
| "end": 45, |
| "text": "Vaswani et al. (2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context Gating", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Residual(H) = H + SubLayer(H),", |
| "eq_num": "(20)" |
| } |
| ], |
| "section": "Context Gating", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "where H is the input of the sub-layer. While residual connections prove to be effective for building deep architectures, there is one potential problem for our model: the residual connections after the context attention sub-layer might increase the influence of document-level context X <k in an uncontrolled way. This is undesirable because the source sentence x (k) usually plays a more important role in target word generation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context Gating", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "To address this problem, we replace the residual connections after the context attention sub-layer with a position-wise context gating sub-layer:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context Gating", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "Gating(H) = \u03bbH + (1 \u2212 \u03bb)SubLayer(H). 21The gating weight is given by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context Gating", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03bb = \u03c3(W i H + W s SubLayer(H)),", |
| "eq_num": "(22)" |
| } |
| ], |
| "section": "Context Gating", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "where \u03c3(\u2022) is a sigmoid function, W i and W s are model parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context Gating", |
| "sec_num": "2.3.3" |
| }, |
| { |
| "text": "Given a document-level parallel corpus D d , the standard training objective is to maximize the loglikelihood of the training data:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "\u03b8 = argmax \u03b8 X,Y \u2208D d log P (Y|X; \u03b8) . (23)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Unfortunately, large-scale document-level parallel corpora are usually unavailable, even for resource-rich languages such as English and Chinese.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Under small-data training conditions, document-level NMT is prone to underperform sentence-level NMT because of poor estimates of low-frequency events.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "To address this problem, we adopt the idea of freezing some parameters while tuning the remaining part of the model (Jean et al., 2015; Zoph et al., 2016) . We propose a two-step training strategy that uses an additional sentence-level parallel corpus D s , which can be larger than D d . We divide model parameters into two subsets: \u03b8 = \u03b8 s \u222a \u03b8 d , where \u03b8 s is a set of original sentencelevel model parameters (highlighted in blue in Figure 1(b) ) and \u03b8 d is a set of newly-introduced document-level model parameters (highlighted in red in Figure 1(b) ).", |
| "cite_spans": [ |
| { |
| "start": 116, |
| "end": 135, |
| "text": "(Jean et al., 2015;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 136, |
| "end": 154, |
| "text": "Zoph et al., 2016)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 436, |
| "end": 447, |
| "text": "Figure 1(b)", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 542, |
| "end": 553, |
| "text": "Figure 1(b)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "In the first step, sentence-level parameters \u03b8 s are estimated on the combined sentence-level parallel corpus", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "D s \u222a D d : 2 \u03b8 s = argmax \u03b8s x,y \u2208Ds\u222aD d log P (y|x; \u03b8 s ).", |
| "eq_num": "(24)" |
| } |
| ], |
| "section": "Training", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Note that the newly introduced modules (highlighted in red in Figure 1(b) ) are inactivated in this step. P (y|x; \u03b8 s ) is identical to the original Transformer model, which is a special case of our model. In the second step, document-level parameters \u03b8 d are estimated on the document-level parallel corpus D d only:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 62, |
| "end": 73, |
| "text": "Figure 1(b)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "\u03b8 d = argmax \u03b8 d X,Y \u2208D d log P (Y|X;\u03b8 s , \u03b8 d ). (25)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Our approach is also similar to pre-training which has been widely used in NMT (Shen et al., 2016; Tu et al., 2018) . The major difference is that our approach keeps\u03b8 s fixed when estimating \u03b8 d to prevent the model from overfitting on the relatively smaller document-level parallel corpora.", |
| "cite_spans": [ |
| { |
| "start": 79, |
| "end": 98, |
| "text": "(Shen et al., 2016;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 99, |
| "end": 115, |
| "text": "Tu et al., 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "We evaluate our approach on Chinese-English and French-English translation tasks. In Chinese-English translation task, the training set contains 2M Chinese-English sentence pairs with 54.8M Chinese words and 60.8M English words. 3 The document-level parallel corpus is a subset of the full training set, including 41K documents with 940K sentence pairs. On average, each document in the training set contains 22.9 sentences. We use the NIST 2006 dataset as the development set and the NIST 2002 NIST , 2003 NIST , 2004 NIST , 2005 NIST , 2008 datasets as test sets. The development and test sets contain 588 documents with 5,833 sentences. On average, each document contains 9.9 sentences.", |
| "cite_spans": [ |
| { |
| "start": 229, |
| "end": 230, |
| "text": "3", |
| "ref_id": null |
| }, |
| { |
| "start": 485, |
| "end": 494, |
| "text": "NIST 2002", |
| "ref_id": null |
| }, |
| { |
| "start": 495, |
| "end": 506, |
| "text": "NIST , 2003", |
| "ref_id": null |
| }, |
| { |
| "start": 507, |
| "end": 518, |
| "text": "NIST , 2004", |
| "ref_id": null |
| }, |
| { |
| "start": 519, |
| "end": 530, |
| "text": "NIST , 2005", |
| "ref_id": null |
| }, |
| { |
| "start": 531, |
| "end": 542, |
| "text": "NIST , 2008", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments 3.1 Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In French-English translation task, we use the IWSLT bilingual training data (Mauro et al., 2012) which contains 1,824 documents with 220K sentence pairs as training set. For development and testing, we use the IWSLT 2010 development and test sets, which contains 8 documents with 887 sentence pairs and 11 documents with 1,664 sentence pairs respectively. The evaluation metric for both tasks is case-insensitive BLEU score as calculated by the multi-bleu.perl script.", |
| "cite_spans": [ |
| { |
| "start": 77, |
| "end": 97, |
| "text": "(Mauro et al., 2012)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments 3.1 Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In preprocessing, we use byte pair encoding (Sennrich et al., 2016) with 32K merges to segment words into sub-word units for all languages. For the original Transformer model and our extended model, the hidden size is set to 512 and the # sent. 1 2 3 MT06 49.38 49.69 49.49 filter size is set to 2,048. The multi-head attention has 8 individual attention heads. We set N = N s = N t = 6. In training, we use Adam (Kingma and Ba, 2015) for optimization. Each mini-batch contains approximately 24K words. We use the learning rate decay policy described by Vaswani et al. (2017) . In decoding, the beam size is set to 4. We use the length penalty and set the hyper-parameter \u03b1 to 0.6. We use four Tesla P40 GPUs for training and one Tesla P40 GPU for decoding. We implement our approach on top of the open-source toolkit THUMT (Zhang et al., 2017 ). 4", |
| "cite_spans": [ |
| { |
| "start": 44, |
| "end": 67, |
| "text": "(Sennrich et al., 2016)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 554, |
| "end": 575, |
| "text": "Vaswani et al. (2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 824, |
| "end": 843, |
| "text": "(Zhang et al., 2017", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments 3.1 Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We first investigate the effect of context length (i.e., the number of preceding sentences) on our approach. As shown in Table 1 , using two preceding source sentences as document-level context achieves the best translation performance on the development set. Using more preceding sentences does not bring any improvement and increases computational cost. This confirms the finding of Tu et al. (2018) that long-distance context only has limited influence. Therefore, we set the number of preceding sentences to 2 in the following experiments. 5 Table 2 shows the effect of self-attention layer number for computing representations of document-level context (see Section 2.2) on translation quality. Surprisingly, using only one selfattention layer suffices to achieve good performance. Increasing the number of self-attention layers does not lead to any improvements. Therefore, we set N c to 1 for efficiency.", |
| "cite_spans": [ |
| { |
| "start": 385, |
| "end": 401, |
| "text": "Tu et al. (2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 121, |
| "end": 128, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 546, |
| "end": 553, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Context Length", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In Chinese-English translation task, we compare our approach with the following previous methods:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison with Previous Work", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "1. (Wang et al., 2017) : using a hierarchical RNN to integrate document-level context into the RNNsearch model. They use a documentlevel parallel corpus containing 1M sentence pairs. Table 3 gives the BLEU scores reported in their paper.", |
| "cite_spans": [ |
| { |
| "start": 3, |
| "end": 22, |
| "text": "(Wang et al., 2017)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 183, |
| "end": 190, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison with Previous Work", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "2. (Kuang et al., 2017) : using a cache which stores previous translated words and topical words to incorporate document-level context into the RNNsearch model. They use a document-level parallel corpus containing 2.8M sentence pairs. Table 3 gives the BLEU scores reported in their paper.", |
| "cite_spans": [ |
| { |
| "start": 3, |
| "end": 23, |
| "text": "(Kuang et al., 2017)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 235, |
| "end": 242, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison with Previous Work", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "3. (Vaswani et al., 2017) : the state-of-the-art NMT model that does not exploit documentlevel context. We use the open-source toolkit THUMT (Zhang et al., 2017) to train and evaluate the model. The training dataset is our sentence-level parallel corpus containing 2M sentence pairs. 4. (Kuang et al., 2017 )*: adapting the cachebased method to the Transformer model. We implement it on top of the open-source toolkit THUMT. We also use the same training data (i.e., 2M sentence pairs) and the same twostep training strategy to estimate sentenceand document-level parameters separately.", |
| "cite_spans": [ |
| { |
| "start": 3, |
| "end": 25, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 141, |
| "end": 161, |
| "text": "(Zhang et al., 2017)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 287, |
| "end": 306, |
| "text": "(Kuang et al., 2017", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison with Previous Work", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "As shown in Table 3 , using the same data, our approach achieves significant improvements over the original Transformer model (Vaswani et al., 2017) (p < 0.01). The gain on the concatenated test set (i.e., \"All\") is 1.96 BLEU points. It also outperforms the cache-based method (Kuang et al., 2017) adapted for Transformer significantly (p < 0.01), which also uses the two-step training strategy. Table 4 shows that our model also outperforms Transformer by 0.89 BLEU points on French-English translation task.", |
| "cite_spans": [ |
| { |
| "start": 126, |
| "end": 148, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 277, |
| "end": 297, |
| "text": "(Kuang et al., 2017)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 19, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 396, |
| "end": 403, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison with Previous Work", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Model MT06 MT02 MT03 MT04 MT05 MT08 All (Wang et al., 2017) RNNsearch 37.76 ---36.89 27.57 - (Kuang et al., 2017) RNNsearch -34.41 -38.40 32.90 31.86 - (Vaswani et al., 2017 ) Transformer 48.09 48.63 47.54 47.79 48.34 38.31 45.97 (Kuang et al., 2017 Table 3 : Comparison with previous works on Chinese-English translation task. The evaluation metric is caseinsensitive BLEU score. (Wang et al., 2017 ) use a hierarchical RNN to incorporate document-level context into RNNsearch. (Kuang et al., 2017) use a cache to exploit document-level context for RNNsearch. (Kuang et al., 2017) * is an adapted version of the cache-based method for Transformer. Note that \"MT06\" is not included in \"All\". > = < Human 1 24% 45% 31% Human 2 20% 55% 25% Human 3 12% 52% 36% Overall 19% 51% 31% Table 5 : Subjective evaluation of the comparison between the original Transformer model and our model. \">\" means that Transformer is better than our model, \"=\" means equal, and \"<\" means worse.", |
| "cite_spans": [ |
| { |
| "start": 40, |
| "end": 59, |
| "text": "(Wang et al., 2017)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 93, |
| "end": 113, |
| "text": "(Kuang et al., 2017)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 152, |
| "end": 173, |
| "text": "(Vaswani et al., 2017", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 174, |
| "end": 249, |
| "text": ") Transformer 48.09 48.63 47.54 47.79 48.34 38.31 45.97 (Kuang et al., 2017", |
| "ref_id": null |
| }, |
| { |
| "start": 381, |
| "end": 399, |
| "text": "(Wang et al., 2017", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 479, |
| "end": 499, |
| "text": "(Kuang et al., 2017)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 561, |
| "end": 581, |
| "text": "(Kuang et al., 2017)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 250, |
| "end": 257, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 778, |
| "end": 785, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": null |
| }, |
| { |
| "text": "We also conducted a subjective evaluation to validate the benefit of exploiting document-level context. All three human evaluators were asked to compare the outputs of the original Transformer model and our model of 20 documents containing 198 sentences, which were randomly sampled from the test sets. Table 5 shows the results of subjective evaluation. Three human evaluators generally made consistent judgements. On average, around 19% of Transformer's translations are better than that of our model, 51% are equal, and 31% are worse. This evaluation confirms that exploiting document-level context helps to improve translation quality.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 303, |
| "end": 310, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Subjective Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "We evaluated the efficiency of our approach. It takes the original Transformer model about 6.7", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation of Efficiency", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "Training Decoding Transformer 41K 872 this work 31K 364 Table 6 : Evaluation of training and decoding speed. The speed is measured in terms of word/second (wps).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 56, |
| "end": 63, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": null |
| }, |
| { |
| "text": "hours to converge during training and the training speed is 41K words/second. The decoding speed is 872 words/second. In contrast, it takes our model about 7.8 hours to converge in the second step of training. The training speed is 31K words/second. The decoding speed is 364 words/second. Therefore, the training speed is only reduced by 25% thanks to the high parallelism of multi-head attention used to incorporate document-level context. The gap is larger in decoding because target words are generated in an autoregressive way in Transformer. Table 7 shows the effect of the proposed twostep training strategy. The first two rows only use sentence-level parallel corpus to train the original Transformer model (see Eq. 24) and achieve BLEU scores of 39.53 and 45.97. The third row only uses the document-level parallel corpus to directly train our model (see Eq. 23) and achieves a BLEU score of 36.52. The fourth and fifth rows use the two-step strategy to take advantage of both sentence-and document-level parallel corpora and achieve BLEU scores of 40.22 and 47.93, respectively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 548, |
| "end": 555, |
| "text": "Table 7", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": null |
| }, |
| { |
| "text": "We find that document-level NMT achieves much worse results than sentence-level NMT (i.e., 36.52 vs. 39.53) when only small-scale documentlevel parallel corpora are available. Our two-step training method is capable of addressing this problem by exploiting sentence-level corpora, which Table 8 : Effect of context integration. \"none\" means that no document-level context is integrated, \"encoder\" means that the document-level context is integrated only into the encoder, \"decoder\" means that the documentlevel context is integrated only into the decoder, and \"both\" means that the context is integrated into both the encoder and the decoder. leads to significant improvements across all test sets. Table 8 shows the effect of integrating documentlevel context to the encoder and decoder (see Section 2.3). It is clear that integrating document-level context into the encoder (Eq. 12) brings significant improvements (i.e., 45.97 vs. 47.51) . Similarly, it is also beneficial to integrate document-level context into the decoder (Eq. 16). Combining both leads to further improvements. This observation suggests that documentlevel context does help to improve Transformer.", |
| "cite_spans": [ |
| { |
| "start": 917, |
| "end": 940, |
| "text": "(i.e., 45.97 vs. 47.51)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 287, |
| "end": 294, |
| "text": "Table 8", |
| "ref_id": null |
| }, |
| { |
| "start": 699, |
| "end": 706, |
| "text": "Table 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Two-Step Training", |
| "sec_num": "3.7" |
| }, |
| { |
| "text": "As shown in Table 9 , we also validated the effectiveness of context gating (see Section 2.3.3). We find that replacing residual connections with context gating leads to an overall improvement of 0.38 BLEU point.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 19, |
| "text": "Table 9", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Context Gating", |
| "sec_num": "3.9" |
| }, |
| { |
| "text": "We use an example to illustrate how documentlevel context helps translation (Table 10) . In order to translate the source sentence, NMT has to disambiguate the multi-sense word \"yundong\", which is actually impossible without the document-level context. The exact meaning of \"rezhong\" is also highly context dependent. Fortunately, the sense of \"yundong\" can be inferred from the word \"saiche\" (car racing) in the document-level context and \"rezhong\" is the antonym of \"yanjuan\" (tired of). This example shows that our model learns to resolve word sense ambiguity and lexical cohesion problems by integrating document-level context.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 76, |
| "end": 86, |
| "text": "(Table 10)", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "3.10" |
| }, |
| { |
| "text": "Developing document-level models for machine translation has been an important research direction, both for conventional SMT (Gong et al., 2011; Hardmeier et al., 2012; Xiong et al., 2013a,b; Garcia et al., 2014) and NMT (Jean et al., 2017; Kuang et al., 2017; Tiedemann and Scherrer, 2017; Wang et al., 2017; Maruf and Haffari, 2018; Bawden et al., 2018; Tu et al., 2018; Voita et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 144, |
| "text": "(Gong et al., 2011;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 145, |
| "end": 168, |
| "text": "Hardmeier et al., 2012;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 169, |
| "end": 191, |
| "text": "Xiong et al., 2013a,b;", |
| "ref_id": null |
| }, |
| { |
| "start": 192, |
| "end": 212, |
| "text": "Garcia et al., 2014)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 221, |
| "end": 240, |
| "text": "(Jean et al., 2017;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 241, |
| "end": 260, |
| "text": "Kuang et al., 2017;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 261, |
| "end": 290, |
| "text": "Tiedemann and Scherrer, 2017;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 291, |
| "end": 309, |
| "text": "Wang et al., 2017;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 310, |
| "end": 334, |
| "text": "Maruf and Haffari, 2018;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 335, |
| "end": 355, |
| "text": "Bawden et al., 2018;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 356, |
| "end": 372, |
| "text": "Tu et al., 2018;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 373, |
| "end": 392, |
| "text": "Voita et al., 2018)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Most existing work on document-level NMT has focused on integrating document-level context into the RNNsearch model (Bahdanau et al., Context \u2022 \u2022 \u2022ziji ye yinwei queshao jingzheng duishou er dui saiche youxie yanjuan shi\u2022 \u2022 \u2022 Source wo rengran feichang rezhong yu zhexiang yundong. Reference I'm still very fond of the sport. Transformer I am still very enthusiastic about this movement. Our work I am still very keen on this sport. Table 10 : An example of Chinese-English translation. In the source sentence, \"yundong\" (sport or political movement) is a multi-sense word and \"rezhong\" (fond of) is an emotional word whose meaning is dependent on its context. Our model takes advantage of the words \"saiche\" (car racing) and \"yanjuan\" (tired of) in the documentlevel context to translate the source words correctly.", |
| "cite_spans": [ |
| { |
| "start": 116, |
| "end": 141, |
| "text": "(Bahdanau et al., Context", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 433, |
| "end": 441, |
| "text": "Table 10", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "2015). These approaches can be roughly divided into two broad categories: computing the representation of the full document-level context (Jean et al., 2017; Tiedemann and Scherrer, 2017; Wang et al., 2017; Maruf and Haffari, 2018; Voita et al., 2018) and using a cache to memorize most relevant information in the document-level context (Kuang et al., 2017; Tu et al., 2018) . Our approach falls into the first category. We use multi-head attention to represent and integrate document-level context. Voita et al. (2018) also extended Transformer to model document-level context, but our work is different in modeling and training strategies. The experimental part is also different. While Voita et al. (2018) focus on anaphora resolution, our model is able to improve the overall translation quality by integrating document-level context.", |
| "cite_spans": [ |
| { |
| "start": 138, |
| "end": 157, |
| "text": "(Jean et al., 2017;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 158, |
| "end": 187, |
| "text": "Tiedemann and Scherrer, 2017;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 188, |
| "end": 206, |
| "text": "Wang et al., 2017;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 207, |
| "end": 231, |
| "text": "Maruf and Haffari, 2018;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 232, |
| "end": 251, |
| "text": "Voita et al., 2018)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 338, |
| "end": 358, |
| "text": "(Kuang et al., 2017;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 359, |
| "end": 375, |
| "text": "Tu et al., 2018)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 501, |
| "end": 520, |
| "text": "Voita et al. (2018)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 690, |
| "end": 709, |
| "text": "Voita et al. (2018)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We have presented a method for exploiting document-level context inside the state-of-the-art neural translation model Transformer. Experiments on Chinese-English and French-English translation tasks show that our method is able to improve over Transformer significantly. In the future, we plan to further validate the effectiveness of our approach on more language pairs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "It is easy to create a sentence-level parallel corpus from D d .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The training set consists of sentence-level parallel corpora LDC2002E18, LDC2003E07, LDC2003E14, news part of LDC2004T08 and document-level parallel corpora LDC2002T01, LDC2004T07, LDC2005T06, LDC2005T10, LDC2009T02, LDC2009T15, LDC2010T03.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/thumt/THUMT 5 If there is no preceding sentence, we simply use a single begin-of-sentence token.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, KyungHyun Cho, and Yoshua Bengio. 2015. Sequence to sequence learning with neural networks. In Proceedings of ICLR.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Evaluating discourse phenomena in neural machine translation", |
| "authors": [ |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Bawden", |
| "suffix": "" |
| }, |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rachel Bawden, Rico Sennrich, Alexandra Birch, and Barry Haddow. 2018. Evaluating discourse phe- nomena in neural machine translation. In Proceed- ings of NAACL.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Document-level machine translation with word vector models", |
| "authors": [ |
| { |
| "first": "Eva Mart\u00ednez", |
| "middle": [], |
| "last": "Garcia", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [ |
| "Esp\u00e3na" |
| ], |
| "last": "Bonet", |
| "suffix": "" |
| }, |
| { |
| "first": "Llu\u00edz", |
| "middle": [], |
| "last": "M\u00e0rquez", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eva Mart\u00ednez Garcia, Cristina Esp\u00e3na Bonet, and Llu\u00edz M\u00e0rquez. 2014. Document-level machine transla- tion with word vector models. In Proceedings of EACL.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Convolutional sequence to sequence learning", |
| "authors": [ |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Gehring", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Denis", |
| "middle": [], |
| "last": "Yarats", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann N", |
| "middle": [], |
| "last": "Dauphin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonas Gehring, Michael Auli, David Grangier, De- nis Yarats, and Yann N Dauphin. 2017. Convo- lutional sequence to sequence learning. CoRR, abs/1705.03122.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Cache-based document-level statistical machine translation", |
| "authors": [ |
| { |
| "first": "Zhengxian", |
| "middle": [], |
| "last": "Gong", |
| "suffix": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Guodong", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhengxian Gong, Min Zhang, and Guodong Zhou. 2011. Cache-based document-level statistical ma- chine translation. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Document-wide decoding for phrasebased statistical machine translation", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Hardmeier", |
| "suffix": "" |
| }, |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian Hardmeier, Joakim Nivre, and J\u00f6rg Tiede- mann. 2012. Document-wide decoding for phrase- based statistical machine translation. In Proceed- ings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Deep residual learning for image recognition", |
| "authors": [ |
| { |
| "first": "Kaiming", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiangyu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaoqing", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of CVPR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recog- nition. In Proceedings of CVPR.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "On using very large target vocabulary for neural machine translation", |
| "authors": [ |
| { |
| "first": "S\u00e9bastien", |
| "middle": [], |
| "last": "Jean", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Memisevic", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S\u00e9bastien Jean, Kyunghyun Cho, Roland Memisevic, and Yoshua Bengio. 2015. On using very large tar- get vocabulary for neural machine translation. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Does neural machine translation benefit from larger context? CoRR", |
| "authors": [ |
| { |
| "first": "Sebastien", |
| "middle": [], |
| "last": "Jean", |
| "suffix": "" |
| }, |
| { |
| "first": "Stanislas", |
| "middle": [], |
| "last": "Lauly", |
| "suffix": "" |
| }, |
| { |
| "first": "Orhan", |
| "middle": [], |
| "last": "Firat", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastien Jean, Stanislas Lauly, Orhan Firat, and Kyunghyun Cho. 2017. Does neural machine translation benefit from larger context? CoRR, abs/1704.05135.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Constituency parsing with a self-attentive encoder", |
| "authors": [ |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Kitaev", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikita Kitaev and Dan Klein. 2018. Constituency pars- ing with a self-attentive encoder.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Cache-based document-level neural machine translation", |
| "authors": [ |
| { |
| "first": "Shaohui", |
| "middle": [], |
| "last": "Kuang", |
| "suffix": "" |
| }, |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Weihua", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| }, |
| { |
| "first": "Guodong", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shaohui Kuang, Deyi Xiong, Weihua Luo, and Guodong Zhou. 2017. Cache-based document-level neural machine translation. CoRR, abs/1711.11221.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Document context neural machine translation with memory networks", |
| "authors": [ |
| { |
| "first": "Sameen", |
| "middle": [], |
| "last": "Maruf", |
| "suffix": "" |
| }, |
| { |
| "first": "Gholamreza", |
| "middle": [], |
| "last": "Haffari", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sameen Maruf and Gholamreza Haffari. 2018. Docu- ment context neural machine translation with mem- ory networks. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Wit3: Web inventory of transcribed and translated talks", |
| "authors": [ |
| { |
| "first": "Cettolo", |
| "middle": [], |
| "last": "Mauro", |
| "suffix": "" |
| }, |
| { |
| "first": "Girardi", |
| "middle": [], |
| "last": "Christian", |
| "suffix": "" |
| }, |
| { |
| "first": "Federico", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of EAMT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cettolo Mauro, Girardi Christian, and Federico Mar- cello. 2012. Wit3: Web inventory of transcribed and translated talks. In Proceedings of EAMT.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Minimum risk training for neural machine translation", |
| "authors": [ |
| { |
| "first": "Shiqi", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhongjun", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shiqi Shen, Yong Cheng, Zhongjun He, Wei He, Hua Wu, Maosong Sun, and Yang Liu. 2016. Minimum risk training for neural machine translation. In Pro- ceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural net- works. In Proceedings of NIPS.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Neural machine translation with extended context", |
| "authors": [ |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Scherrer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Third Workshop on Discourse in Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J\u00f6rg Tiedemann and Yves Scherrer. 2017. Neural ma- chine translation with extended context. In Proceed- ings of the Third Workshop on Discourse in Machine Translation.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Learning to remember translation history with a continuous cache", |
| "authors": [ |
| { |
| "first": "Zhaopeng", |
| "middle": [], |
| "last": "Tu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuming", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Tong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhaopeng Tu, Yang Liu, Shuming Shi, and Tong Zhang. 2018. Learning to remember translation his- tory with a continuous cache. Transactions of the Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Proceedings of NIPS.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Context-aware neural machine translation learns anaphora resolution", |
| "authors": [ |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Voita", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Serdyukov", |
| "suffix": "" |
| }, |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Titov", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elena Voita, Pavel Serdyukov, Rico Sennrich, and Ivan Titov. 2018. Context-aware neural machine transla- tion learns anaphora resolution. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Exploiting cross-sentence context for neural machine translation", |
| "authors": [ |
| { |
| "first": "Longyue", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhaopeng", |
| "middle": [], |
| "last": "Tu", |
| "suffix": "" |
| }, |
| { |
| "first": "Andy", |
| "middle": [], |
| "last": "Way", |
| "suffix": "" |
| }, |
| { |
| "first": "Liu", |
| "middle": [], |
| "last": "Qun", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Longyue Wang, Zhaopeng Tu, Andy Way, and Liu Qun. 2017. Exploiting cross-sentence context for neural machine translation. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Google's neural machine translation system: Bridging the gap between human and machine translation", |
| "authors": [ |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Norouzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1609.08144" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, et al. 2016. Google's neural ma- chine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Modeling lexical cohesion for document-level machine translation", |
| "authors": [ |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Guosheng", |
| "middle": [], |
| "last": "Ben", |
| "suffix": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yajuan", |
| "middle": [], |
| "last": "Lv", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deyi Xiong, Guosheng Ben, Min Zhang, Yajuan Lv, and Qun Liu. 2013a. Modeling lexical cohesion for document-level machine translation. In Proceedings of IJCAI.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Lexical chain based cohesion models for document-level statistical machine translation", |
| "authors": [ |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chew Lim", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deyi Xiong, Yang Ding, Min Zhang, and Chew Lim Tan. 2013b. Lexical chain based cohesion models for document-level statistical machine translation. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Thumt: An open source toolkit for neural machine translation", |
| "authors": [ |
| { |
| "first": "Jiacheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanzhuo", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Shiqi", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Huanbo", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1706.06415" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiacheng Zhang, Yanzhuo Ding, Shiqi Shen, Yong Cheng, Maosong Sun, Huanbo Luan, and Yang Liu. 2017. Thumt: An open source toolkit for neural ma- chine translation. arXiv preprint arXiv:1706.06415.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Transfer learning for low-resource neural machine translation", |
| "authors": [ |
| { |
| "first": "Barret", |
| "middle": [], |
| "last": "Zoph", |
| "suffix": "" |
| }, |
| { |
| "first": "Deniz", |
| "middle": [], |
| "last": "Yuret", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "May", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Barret Zoph, Deniz Yuret, Jonathan May, and Kevin Knight. 2016. Transfer learning for low-resource neural machine translation. In Proceedings of EMNLP.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "(a) The original Transformer translation model", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "text": ". We followVaswani et al. (2017) to offset the target word embeddings by one position, resulting in the following matrix representation of y (k) <j :", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "text": "Effect of context length on translation quality. The BLEU scores are calculated on the development set.", |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td colspan=\"2\"># Layer MT06</td></tr><tr><td>1</td><td>49.69</td></tr><tr><td>2</td><td>49.38</td></tr><tr><td>3</td><td>49.54</td></tr><tr><td>4</td><td>49.59</td></tr><tr><td>5</td><td>49.31</td></tr><tr><td>6</td><td>49.43</td></tr></table>", |
| "html": null |
| }, |
| "TABREF1": { |
| "text": "Effect of self-attention layer number (i.e., N c ) on translation quality. The BLEU scores are calculated on the development set.", |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF2": { |
| "text": "* Transformer 48.14 48.97 48.05 47.91 48.53 38.38 46.37 this work Transformer 49.69 50.96 50.21 49.73 49.46 39.69 47.93", |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF4": { |
| "text": "", |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>: Comparison with Transformer on French-</td></tr><tr><td>English translation task. The evaluation metric is case-</td></tr><tr><td>insensitive BLEU score.</td></tr></table>", |
| "html": null |
| }, |
| "TABREF5": { |
| "text": "42.41 43.12 41.02 40.93 31.49 39.53 2M -48.09 48.63 47.54 47.79 48.34 38.31 45.97 -940K 34.00 38.83 40.51 38.30 36.69 29.38 36.52 940K 940K 37.12 43.29 43.70 41.42 41.84 32.36 40.22 2M 940K 49.69 50.96 50.21 49.73 49.46 39.69 47.93", |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>sent.</td><td>doc.</td><td>MT06 MT02 MT03 MT04 MT05 MT08</td><td>All</td></tr><tr><td>940K</td><td>-</td><td>36.20</td><td/></tr></table>", |
| "html": null |
| }, |
| "TABREF6": { |
| "text": "Effect of two-step training. \"sent.\" denotes sentence-level parallel corpus and \"doc.\" denotes documentlevel parallel corpus.", |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td colspan=\"2\">Integration MT06 MT02 MT03 MT04 MT05 MT08</td><td>All</td></tr><tr><td>none</td><td colspan=\"2\">48.09 48.63 47.54 47.79 48.34 38.31 45.97</td></tr><tr><td>encoder</td><td colspan=\"2\">48.88 50.30 49.34 48.81 49.75 39.55 47.51</td></tr><tr><td>decoder</td><td colspan=\"2\">49.10 50.31 49.83 49.35 49.29 39.07 47.48</td></tr><tr><td>both</td><td colspan=\"2\">49.69 50.96 50.21 49.73 49.46 39.69 47.93</td></tr></table>", |
| "html": null |
| }, |
| "TABREF8": { |
| "text": "Effect of context gating.", |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |