| { |
| "paper_id": "U16-1001", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:10:48.276027Z" |
| }, |
| "title": "Improving Neural Translation Models with Linguistic Factors", |
| "authors": [ |
| { |
| "first": "Cong", |
| "middle": [], |
| "last": "Duy", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Melbourne Melbourne", |
| "location": { |
| "settlement": "VIC", |
| "country": "Australia" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Vu", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Melbourne Melbourne", |
| "location": { |
| "settlement": "VIC", |
| "country": "Australia" |
| } |
| }, |
| "email": "vhoang2@student.unimelb.edu.au" |
| }, |
| { |
| "first": "Gholamreza", |
| "middle": [], |
| "last": "Haffari", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Monash University", |
| "location": { |
| "addrLine": "Clayton, VIC", |
| "country": "Australia" |
| } |
| }, |
| "email": "gholamreza.haffari@monash.edu" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Melbourne Melbourne", |
| "location": { |
| "country": "Australia" |
| } |
| }, |
| "email": "t.cohn@unimelb.edu.au" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper presents an extension of neural machine translation (NMT) model to incorporate additional word-level linguistic factors. Adding such linguistic factors may be of great benefits to learning of NMT models, potentially reducing language ambiguity or alleviating data sparseness problem (Koehn and Hoang, 2007). We explore different linguistic annotations at the word level, including: lemmatization, word clusters, Part-of-Speech tags, and labeled dependency relations. We then propose different neural attention architectures to integrate these additional factors into the NMT framework. Evaluating on translating between English and German in two directions with a low resource setting in the domain of TED talks, we obtain promising results in terms of both perplexity reductions and improved BLEU scores over baseline methods.", |
| "pdf_parse": { |
| "paper_id": "U16-1001", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper presents an extension of neural machine translation (NMT) model to incorporate additional word-level linguistic factors. Adding such linguistic factors may be of great benefits to learning of NMT models, potentially reducing language ambiguity or alleviating data sparseness problem (Koehn and Hoang, 2007). We explore different linguistic annotations at the word level, including: lemmatization, word clusters, Part-of-Speech tags, and labeled dependency relations. We then propose different neural attention architectures to integrate these additional factors into the NMT framework. Evaluating on translating between English and German in two directions with a low resource setting in the domain of TED talks, we obtain promising results in terms of both perplexity reductions and improved BLEU scores over baseline methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Neural Machine Translation (NMT) (Devlin et al., 2014; Bahdanau et al., 2015 ) is a new paradigm in machine translation (MT) powered by recent advances in sequence to sequence learning frameworks (Graves, 2013; Sutskever et al., 2014) . NMT has already made remarkable results and improvements over conventional SMT (Luong et al., 2015) .", |
| "cite_spans": [ |
| { |
| "start": 33, |
| "end": 54, |
| "text": "(Devlin et al., 2014;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 55, |
| "end": 76, |
| "text": "Bahdanau et al., 2015", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 196, |
| "end": 210, |
| "text": "(Graves, 2013;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 211, |
| "end": 234, |
| "text": "Sutskever et al., 2014)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 316, |
| "end": 336, |
| "text": "(Luong et al., 2015)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The core idea of NMT is the encoder-decoder framework where an encoder encodes the source sequence into a vector representation, and then a decoder generates the target sequence sequentially via a recurrent neural network (RNN) . The use of a RNN provides the ability to memorize longer range dependencies that are impossible with standard n-gram modeling -a core component of the traditional Statistical Machine Translation (SMT) framework (Koehn et al., 2003; Lopez, 2008; Koehn, 2010) . Unlike the traditional SMT, NMT offers unique mechanisms to learn translation equivalence without extensive feature engineering efforts.", |
| "cite_spans": [ |
| { |
| "start": 222, |
| "end": 227, |
| "text": "(RNN)", |
| "ref_id": null |
| }, |
| { |
| "start": 441, |
| "end": 461, |
| "text": "(Koehn et al., 2003;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 462, |
| "end": 474, |
| "text": "Lopez, 2008;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 475, |
| "end": 487, |
| "text": "Koehn, 2010)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Though promising, NMT still lacks of the ability of modeling deeper semantic and syntactic aspects of the language. presented a factored translation model to address this issue for the traditional SMT framework , where the model incorporates various linguistic annotations for the surface level words. Particularly for low-resource conditions, these extra annotations can lead to better translation of OOVs (or low-count words) and resolve ambiguities, hence increase the generalization capabilities of the model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In machine translation with a low-resource setting, resolving data sparseness and semantic ambiguity problems can help improve its performance. In this paper, we investigate utilizing extra syntactic and semantic linguistic factors in the context of the NMT framework. Linguistic factors can include bundles of features, e.g., stems, roots, lemmas, morphological classes, data-driven clusters, syntactic analyses (part-of-speeches, constituency parsing, dependency parsing). Adding such extra factors may be of great benefits to NMT models, potentially reducing language ambiguity and alleviating data sparseness further. In this paper, we explore four word-level factor annotations, including: lemmatization, word clusters, Part-of-Speech tags, and relation labels in dependency parse trees (see Figure 1 for an example). We then propose different neural attention architec they 've expanded and enriched our lives . they 've expand and enrich our life . 011011 0100110 010111110 0111101 010111100 11100 1011 000 PRP VBP VBN CC VBN PRP$ NNS / nsubj aux ROOT cc conj nmod dobj none 1 1 0 0 0 1 0 -1 (text -lemma -word cluster -part-of-speech -labelled dependency) Figure 1 : An example of linguistic factor annotations for a source sentence in English.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 797, |
| "end": 805, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 875, |
| "end": 1129, |
| "text": "they 've expanded and enriched our lives . they 've expand and enrich our life . 011011 0100110 010111110 0111101 010111100 11100 1011 000 PRP VBP VBN CC VBN PRP$ NNS / nsubj aux ROOT cc conj nmod dobj none 1 1 0 0 0 1 0", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 1198, |
| "end": 1206, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "tures to integrate these additional factors into the NMT framework. Evaluating on translating between English and German in two directions with a low resource setting in the TED talks data, we obtain perplexity reductions and improved BLEU score over the baseline.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we investigate the feasibility of factored model idea into attentional neural translation model (Bahdanau et al., 2015) . As an initial work, we aim to find how the neural model can benefit from incorporating the additional linguistic factors in source language. Our work is an extension of (Bahdanau et al., 2015) with the integration of additional linguistic factors. A fully factored neural translation model for both source and target sides is considered as our future work. The following section will discuss our extensions of (Bahdanau et al., 2015) in \u00a72.1. Assume that we have L layers of linguistic factor annotations. The training data then consists of N training parallel sentences", |
| "cite_spans": [ |
| { |
| "start": 110, |
| "end": 133, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 305, |
| "end": 328, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 546, |
| "end": 569, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incorporating Linguistic Factors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "{({x (n, ) } L =0 , y (n) )} N n=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incorporating Linguistic Factors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where the word sequence of the nth sentence-pair is denoted in the layer zero x (n,0) , its length is denoted by |x (n) |, its L layers of annotations are denoted by {x (n, ) } L =1 , and the target sentence is denoted by y (n) . In what follows, we review and extend the attentional encoder-decoder neural machine translation for this setting, and explore various neural attention mechanisms operating on the multiple layers of linguistic factors over the source sentence.", |
| "cite_spans": [ |
| { |
| "start": 224, |
| "end": 227, |
| "text": "(n)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incorporating Linguistic Factors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Encoder. First, to encode the source-side information, we first run each layer of linguistic annotations through bidirectional RNNs (biRNN) for dynamically representing the sequence embeddings, i.e.,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Encoder-Decoder", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h j = biRNN ,\u03c8 enc x j , \u2212 \u2192 h j\u22121 ; \u2190 \u2212 h j+1 T ;", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Multi-Factor Encoder-Decoder", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where x j \u2208 R H is the word embedding at position j in sequence layer , and \u2212 \u2192 h j and \u2190 \u2212 h j are the RNN 1 hidden states. This encoding scheme captures not only the position specific information, but also the information coming from the left and right contexts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Encoder-Decoder", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Decoder. Next, a decoder operated by another RNN is used to predict the target y sequentially, from left to right:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Encoder-Decoder", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "g i = RNN \u03c6 dec (c i , y i\u22121 , g i\u22121 ) y i \u223c softmax (W o \u2022 MLP (c i , y i\u22121 , g i ) + b o ) ;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Encoder-Decoder", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where MLP is a single hidden layer neural network with tanh activation. The model parameters include \u03c6 the weight matrix W o \u2208 R Vy\u00d7H and the bias b o \u2208 R Vy , with V y and H denoting the target vocabulary size and hidden dimension size, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Encoder-Decoder", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Note that the state of the decoder g i is conditioned on its previous state g i\u22121 , the previously generated target word y i\u22121 , and the source side context c i summarizing the areas of the source sentence needs to be attended to. Finally, the model is trained end-to-end by minimizing the cross-entropy loss over the target sequence and stochastic gradient descent (SGD) is used for optimizing the model parameters .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Encoder-Decoder", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In what follows, we explore various attention mechanisms for our case where the input sentence is annotated with multiple linguistic factors, and show how the source context c i is constructed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Encoder-Decoder", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In this paper, we explore various attention mechanisms of integrating linguistic factors as briefly summarized in Figure 2 , including Global Attention, Local Attention, and hybrid Global-Local Attention.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 114, |
| "end": 122, |
| "text": "Figure 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Global Attention. Our first approach has one shared attention vector for all the annotation layers, forcing each layer to attend to the same positions. This essentially means stacking the representations of all the input embeddings x into one vector, i.e., x g representation is used in place of only word embedding x j to encode the input position (eqn 1) to h g j . It is then used to construct the source context for the decoder, using", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "j = x 0 j , . . . , x L j T . This stacked \u2026 \u2026 \u2026 \u2026 \u2026 \u2026 \u2026 y i c i h j g i-1 y i-1 \u2026 \u2026 \u2026 \u2026 \u2026 \u2026 \u2026 \u2026 y i c i h j g i-1 y i-1 c l i \u2026 \u2026 \u2026 \u2026 \u2026 \u2026 h l j L \u2026 h j g i-1 a) Global c) Global-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "c i g i-1 y i-1 {c l i } \u2026 \u2026 \u2026 \u2026 \u2026 h l j L \u2026 c l i b) Local", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "c i = |x| j=1 \u03b1 ij h g j with \u03b1 i = softmax(e i ) ; e ij = MLP g i\u22121 , h g j h g j = biRNN \u03b8 enc x g j , \u2212 \u2192 h g j\u22121 ; \u2190 \u2212 h g j+1 T ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where scalar e ij denotes the unnormalized alignment probability between the source word annotation j and target word i, which is produced by single hidden layer neural network with tanh activation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Local Attention. The model may benefit from different attentions learned for different layers. Thus, the second idea is to have multiple attentions for linguistic layers independently, and compute layer-specific context vectors {c i } L =0 and stack them up:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "c i = c 0 i , . . . , c L i T ; c i = Tx j=1 \u03b1 ij h j \u03b1 i = softmax(e i ) ; e ij = MLP g i\u22121 ; h j", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where e ij denotes the alignment score between the annotation at layer and the target word. The MLP for each layer has a different parameterization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Global-Local Attention. Finally, we consider a hybrid global-local attention mechanism which makes use of the global hidden representation h g across all of the layers in generating the local attentions, formulated as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "e ij = MLP g i\u22121 , h g j .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In contrast to the local attention the attention for layer depends on the global encoding, h g , rather than the local encoding for that layer, h l . In training, we encourage the model to have similar attentions across the layers by adding a penalty term to the cross-entropy training objective,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "N n=1 |y (n) | i=1 L =0 \u1fb1 (n) i \u2212 \u03b1 (n), i 2 2 where \u03b1 (n), i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "is the attention to the layer when generating the target word i, and we defin\u0113 \u03b1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "(n) i := 1 L+1 L =0 \u03b1 (n), i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "as the average attention across all layers. Essentially, our regularizer penalizes parameters which induce layer-specific attentions deviating from the average attention.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Factor Attention Architectures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Data. We conducted our experiments on TED Talks datasets (Cettolo et al., 2012) and translate between English (en) \u2194 German (de). For training, we used about 200K parallel sentences, and used tst2010 for tuning model parameters (phrasebased SMT) and early stopping (NMT). We evaluated on the official test sets tst2013 and tst2014, Table 2 : Perplexity scores for attentional model variants evaluated on en\u2194de translations, and \"#param\" refers to no. of model parameters (in millions). bold: \"statistically significantly better than vanilla attentional model\", \u2660 : best performance.", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 79, |
| "text": "(Cettolo et al., 2012)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 332, |
| "end": 339, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "following Cettolo et al. (2014) . We chose a word frequency cut-off of \u2265 5 for limiting the vocabulary when training neural models, resulting in 19K and 26K word types for English and German, respectively. All details of data statistics can be found in Table 1 . As linguistic factors, we annotated the source sentences with lemmas, 2 word clusters, 3 and POS tags. We also annotated with the labelled dependency, i.e. by taking the dependency label between each word and its head (together with its direction, i.e. left or right) 4 in the dependency parse tree. Also note that the POS tags and dependency parse trees were extracted from parsing results produced by Stanford Parser 5 and ParZu. 6", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 31, |
| "text": "Cettolo et al. (2014)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 253, |
| "end": 260, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Set-up and Baselines. We used the cnn library 7 for our implementation. All neural models were configured with 512 input embedding and hidden layer dimensions, and 384 alignment dimension, 2 NLTK, http://www.nltk.org/ 3 Brown clustering, https://github.com/ percyliang/brown-cluster 4 The direction is encoded effectively as 3-bit vector. 5 http://nlp.stanford.edu/software/ lex-parser.shtml (en)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "6 https://github.com/rsennrich/ParZu (de) 7 https://github.com/clab/cnn/tree/ master/cnn with 1 and 2 hidden layers in the source and target, respectively. We employed LSTM recurrent structure (Hochreiter and Schmidhuber, 1997) for both source and target RNN sequences. For the phrasebased SMT baseline, we used the Moses toolkit with its standard configuration. To encode the linguistic factors, we used 128, 64, 64, 64 embedding dimensions for each of lemma, word cluster, Part-of-Speech (POS), and labelled dependency sequences, respectively. For training our neural models, the best perplexity scores on tuning sets were used for early stopping of training, which was usually between 5-8 epochs. For decoding, we used a simple greedy algorithm with length normalization. For evaluation of translations, we applied bootstrapping resampling (Koehn, 2004) to measure the statistical significance (p < 0.05) of BLEU score differences between translation outputs of proposed models compared to the baselines.", |
| "cite_spans": [ |
| { |
| "start": 193, |
| "end": 227, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 843, |
| "end": 856, |
| "text": "(Koehn, 2004)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Results and Analysis. We report our experimental results based on standard perplexity and BLEU (Papineni et al., 2002) scores, as shown in Tables 2 and 3, respectively. Table 2 shows that the attentional model with our extensions is noticeably better than the vanilla NMT in terms of perplexity. Among the three attention architectures, the glo-loc attention outperformed others, giving significant improvement compared to the vanilla model. The use of the loc attention did not give much improvement. We suspect that the learned model itself has difficulties deciding which factors to attend to. The drawback of the glo attention is that it enforces only one attention mechanism for all of the layers. This may cause the loss of individual effects that potentially exist in each of layers. The glo-loc attention aims at taking advantage of glo attention and solving the limitation of loc attention with the penalty term, hence giving better performance. Table 3 shows the BLEU score results. Compared to Moses baseline, the vanilla attentional model is superior for en\u2192de and comparable for de\u2192en translation tasks. It is noticeable that the attentional model is capable of working remarkably well, despite the relatively small amounts of parallel data. However, table 3 shows the inconsistency, compared to the respective perplexity scores in Table 2 . For en\u2192de, both glo and glo-loc attention architectures worked competitively well, giving significantly better BLEU scores than the vanilla attentional model. Compared to glo, the glo-loc attention is superior in tst2013, but slightly detrimental in tst2014 although (its respective perplexity scores are better). These results show that reductions in perplexity scores do not guarantee improved BLEU scores, which is particularly true for de\u2192en translation.", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 118, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 139, |
| "end": 176, |
| "text": "Tables 2 and 3, respectively. Table 2", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 955, |
| "end": 962, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 1345, |
| "end": 1352, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "For the analysis, we further investigate the improvement of the translation quality versus sentence complexity. This would show the extent to which the extra linguistic layers have been helpful in resolving ambiguities of source sentences in translation. We formalize sentence complexity by taking either its length or the depth of its parse tree into consideration. Figure 3 and 4 plot the BLEU score versus these two measures of complexity in two evaluation sets. As seen, the extra linguistic layers has helped the translation quality of more complex sentences compared to the vanilla attentional model.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 367, |
| "end": 375, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Recent advances in deep learning research facilitate innovative ideas in machine translation. The attentional encoder-decoder framework pioneered by Bahdanau et al. (2015) is the core, opening a new trend in neural machine translation. Luong et al. (2015) followed the work of (Bahdanau et al., 2015) by experimenting various options on the generation of soft alignments with global and local attention mechanisms. Inspired by remarkable characteristics of state-of-the-art SMT models, Cohn et al. (2016) incorporated structural alignment biases inspired from conventional statistical alignment models (e.g. IBM models 1, 2) to encourage more linguistic structures in the alignment process. Similar in spirit to this, Feng et al. (2016) made use of additional RNN structure for the attention mechanism, hence likely capturing long range dependencies between the attention vectors. Tu et al. (2016) further proposed a socalled coverage vector to trace the attention history for flexibly adjusting future attentions.", |
| "cite_spans": [ |
| { |
| "start": 149, |
| "end": 171, |
| "text": "Bahdanau et al. (2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 277, |
| "end": 300, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 486, |
| "end": 504, |
| "text": "Cohn et al. (2016)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 718, |
| "end": 736, |
| "text": "Feng et al. (2016)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 881, |
| "end": 897, |
| "text": "Tu et al. (2016)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Though having been developed for almost 2 years, the NMT models are currently competitive with state-of-the-art SMT models. However, NMT models are still lacking of capabilities to modelling shallow language characteristics, e.g. the additional annotation at word level of linguistic factors. Such kinds of factors can provide extra dimensions for data sparseness problem as shown in earlier works in SMT models, e.g., (Zhang and Sumita, 2007; Rish\u00f8j and S\u00f8gaard, 2011; Wuebker et al., 2013) . The most closely related work to ours is the factored translation model for SMT framework proposed by . This model evaluated the effects of various linguistic factors (including lemma, POS, morphology) which are annotated for both source and target sides. Our work explored the same manner in the context of NMT framework though only considering source side. However, we further explored the annotation with labelled dependency which potentially inject syntactic information into neural model. Concurrent to our work, Sennrich and Haddow (2016) proposed similar idea for the NMT framework, however, their work has only explored the so-called global attention whereas we proposed more attention mechanisms with local and hybrid global-local attentions. Also, our ex-periments were conducted in a low-resourced setting in a different domain with TED talk data.", |
| "cite_spans": [ |
| { |
| "start": 419, |
| "end": 443, |
| "text": "(Zhang and Sumita, 2007;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 444, |
| "end": 469, |
| "text": "Rish\u00f8j and S\u00f8gaard, 2011;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 470, |
| "end": 491, |
| "text": "Wuebker et al., 2013)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1012, |
| "end": 1038, |
| "text": "Sennrich and Haddow (2016)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In this paper, we have presented a novel attentional encoder-decoder for translation capable of integrating linguistic factors in the source language. Four linguistic factors were evaluated, including lemmatization, word clustering, part-ofspeech tagging, and labeled dependencies. We proposed several neural attention mechanisms operating over the factors. Our experimental results on two language pairs show that the neural translation model with integrated linguistic factors can be improved, in terms of both perplexity and BLEU scores.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion & Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "As our future work, we aim to explore whether the attentional neural translation model can benefit from linguistic factors, operating over the target language. This work can be considered as the first work towards fully-factored neural translation model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion & Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Generally, an RNN can be employed as Long-Short Term Memory (LSTM)(Hochreiter and Schmidhuber, 1997) or Gated Recurrent Unit (GRU)(Cho et al., 2014). Since the RNN recurrent structure is not our focus, we ignored its formulation in this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Neural Machine Translation by Jointly Learning to Align and Translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. of 3rd International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural Machine Translation by Jointly Learning to Align and Translate. In Proc. of 3rd International Conference on Learning Representa- tions (ICLR2015).", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "WIT 3 : Web Inventory of Transcribed and Translated Talks", |
| "authors": [ |
| { |
| "first": "Mauro", |
| "middle": [], |
| "last": "Cettolo", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Girardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 16 th Conference of the European Association for Machine Translation (EAMT)", |
| "volume": "", |
| "issue": "", |
| "pages": "261--268", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mauro Cettolo, Christian Girardi, and Marcello Fed- erico. 2012. WIT 3 : Web Inventory of Transcribed and Translated Talks. In Proceedings of the 16 th Conference of the European Association for Ma- chine Translation (EAMT), pages 261-268, Trento, Italy, May.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Report on the 11th IWSLT Evaluation Campaign", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Cettolo", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Niehues", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Stuker", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Bentivogli", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. of The International Workshop on Spoken Language Translation (IWSLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Cettolo, J. Niehues, S. Stuker, L. Bentivogli, and M. Federico. 2014. Report on the 11th IWSLT Evaluation Campaign. In Proc. of The Interna- tional Workshop on Spoken Language Translation (IWSLT).", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Van Merrienboer", |
| "suffix": "" |
| }, |
| { |
| "first": "Caglar", |
| "middle": [], |
| "last": "Gulcehre", |
| "suffix": "" |
| }, |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Fethi", |
| "middle": [], |
| "last": "Bougares", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1724--1734", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyunghyun Cho, Bart van Merrienboer, Caglar Gul- cehre, Dzmitry Bahdanau, Fethi Bougares, Hol- ger Schwenk, and Yoshua Bengio. 2014. Learn- ing Phrase Representations using RNN Encoder- Decoder for Statistical Machine Translation. In Pro- ceedings of the 2014 Conference on Empirical Meth- ods in Natural Language Processing (EMNLP), pages 1724-1734, Doha, Qatar, October. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Incorporating Structural Alignment Biases into an Attentional Neural Translation Model", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "D V" |
| ], |
| "last": "Hoang", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Vymolova", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Haffari", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Cohn, C. D. V. Hoang, E. Vymolova, K. Yao, C. Dyer, and G. Haffari. 2016. Incorporating Struc- tural Alignment Biases into an Attentional Neural Translation Model. In Proceedings of the 2016 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, San Diego, California, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Fast and Robust Neural Network Joint Models for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Rabih", |
| "middle": [], |
| "last": "Zbib", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhongqiang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Lamar", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Makhoul", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1370--1380", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Rabih Zbib, Zhongqiang Huang, Thomas Lamar, Richard Schwartz, and John Makhoul. 2014. Fast and Robust Neural Network Joint Models for Statistical Machine Translation. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), pages 1370-1380, Baltimore, Maryland, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Implicit Distortion and Fertility Models for Attention-based Encoder-Decoder NMT Model", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Feng, S. Liu, M. Li, and M. Zhou. 2016. Implicit Distortion and Fertility Models for Attention-based Encoder-Decoder NMT Model. ArXiv e-prints, Jan- uary.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Generating Sequences With Recurrent Neural Networks. ArXiv e-prints", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Graves", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Graves. 2013. Generating Sequences With Recur- rent Neural Networks. ArXiv e-prints, August.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Long Short-Term Memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "Jurgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Comput", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and Jurgen Schmidhuber. 1997. Long Short-Term Memory. Neural Comput., 9(8):1735- 1780, November.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Factored Translation Models", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "868--876", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn and Hieu Hoang. 2007. Factored Trans- lation Models. In Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Lan- guage Processing and Computational Natural Lan- guage Learning (EMNLP-CoNLL), pages 868-876, Prague, Czech Republic, June. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Statistical Phrase-based Translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Franz", |
| "middle": [ |
| "Josef" |
| ], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 2003 Conference of the North American Chapter of the Association for Computational Linguistics on Human Language Technology -Volume 1, NAACL '03", |
| "volume": "", |
| "issue": "", |
| "pages": "48--54", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn, Franz Josef Och, and Daniel Marcu. 2003. Statistical Phrase-based Translation. In Pro- ceedings of the 2003 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics on Human Language Technology -Vol- ume 1, NAACL '03, pages 48-54, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Moses: Open Source Toolkit for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicola", |
| "middle": [], |
| "last": "Bertoldi", |
| "suffix": "" |
| }, |
| { |
| "first": "Brooke", |
| "middle": [], |
| "last": "Cowan", |
| "suffix": "" |
| }, |
| { |
| "first": "Wade", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christine", |
| "middle": [], |
| "last": "Moran", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Zens", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Constantin", |
| "suffix": "" |
| }, |
| { |
| "first": "Evan", |
| "middle": [], |
| "last": "Herbst", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the ACL on Interactive Poster and Demonstration Sessions, ACL '07", |
| "volume": "", |
| "issue": "", |
| "pages": "177--180", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ond\u0159ej Bojar, Alexandra Constantin, and Evan Herbst. 2007. Moses: Open Source Toolkit for Statistical Machine Translation. In Proceedings of the 45th Annual Meeting of the ACL on Interactive Poster and Demonstration Ses- sions, ACL '07, pages 177-180, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Statistical Significance Tests for Machine Translation Evaluation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of Conference on Empirical Methods on Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "388--395", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn. 2004. Statistical Significance Tests for Machine Translation Evaluation. In Dekang Lin and Dekai Wu, editors, Proceedings of Conference on Empirical Methods on Natural Language Process- ing (EMNLP), pages 388-395, Barcelona, Spain, July. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn. 2010. Statistical Machine Translation. Cambridge University Press, New York, NY, USA, 1st edition.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Lopez", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "ACM Comput. Surv", |
| "volume": "40", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Lopez. 2008. Statistical Machine Translation. ACM Comput. Surv., 40(3):8:1-8:49, August.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Effective Approaches to Attentionbased Neural Machine Translation", |
| "authors": [ |
| { |
| "first": "Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1412--1421", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thang Luong, Hieu Pham, and Christopher D. Man- ning. 2015. Effective Approaches to Attention- based Neural Machine Translation. In Proceed- ings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 1412-1421, Lisbon, Portugal, September. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "BLEU: A Method for Automatic Evaluation of Machine Translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics, ACL '02", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. BLEU: A Method for Automatic Evaluation of Machine Translation. In Proceedings of the 40th Annual Meeting on Association for Com- putational Linguistics, ACL '02, pages 311-318, Stroudsburg, PA, USA. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Factored Translation with Unsupervised Word Clusters", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Rish\u00f8j", |
| "suffix": "" |
| }, |
| { |
| "first": "Anders", |
| "middle": [], |
| "last": "S\u00f8gaard", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Sixth Workshop on Statistical Machine Translation, WMT '11", |
| "volume": "", |
| "issue": "", |
| "pages": "447--451", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian Rish\u00f8j and Anders S\u00f8gaard. 2011. Fac- tored Translation with Unsupervised Word Clusters. In Proceedings of the Sixth Workshop on Statisti- cal Machine Translation, WMT '11, pages 447-451, Stroudsburg, PA, USA. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Linguistic input features improve neural machine translation", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proc. of the First Conference on Machine Translation (WMT16)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich and Barry Haddow. 2016. Linguistic in- put features improve neural machine translation. In Proc. of the First Conference on Machine Transla- tion (WMT16).", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Sequence to Sequence Learning with Neural Networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "27", |
| "issue": "", |
| "pages": "3104--3112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to Sequence Learning with Neural Net- works. In Z. Ghahramani, M. Welling, C. Cortes, N. D. Lawrence, and K. Q. Weinberger, editors, Ad- vances in Neural Information Processing Systems 27, pages 3104-3112. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Coverage-based Neural Machine Translation", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Tu", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 4th International Conference on Learning Representations (ICLR 2016 Workshop Track), ICLR '16 Workshop Track", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Z. Tu, Z. Lu, Y. Liu, X. Liu, and H. Li. 2016. Coverage-based Neural Machine Translation. In Proceedings of the 4th International Conference on Learning Representations (ICLR 2016 Workshop Track), ICLR '16 Workshop Track.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Improving Statistical Machine Translation with Word Class Models", |
| "authors": [ |
| { |
| "first": "Joern", |
| "middle": [], |
| "last": "Wuebker", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Peitz", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Rietig", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1377--1381", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joern Wuebker, Stephan Peitz, Felix Rietig, and Her- mann Ney. 2013. Improving Statistical Machine Translation with Word Class Models. In Proceed- ings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1377-1381, Seattle, Washington, USA, October. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Boosting Statistical Machine Translation by Lemmatization and Linear Interpolation", |
| "authors": [ |
| { |
| "first": "Ruiqiang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the ACL on Interactive Poster and Demonstration Sessions, ACL '07", |
| "volume": "", |
| "issue": "", |
| "pages": "181--184", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruiqiang Zhang and Eiichiro Sumita. 2007. Boost- ing Statistical Machine Translation by Lemmatiza- tion and Linear Interpolation. In Proceedings of the 45th Annual Meeting of the ACL on Interac- tive Poster and Demonstration Sessions, ACL '07, pages 181-184, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "Proposed attention architectures of integrating linguistic factors for the NMT framework.", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "num": null, |
| "text": "dependency tree depth vs. BLEUFigure 3: Analysis based on the evaluation set tst2013 in en\u2192de translation. Analysis based on the evaluation set tst2014 in en\u2192de translation.", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "TABREF2": { |
| "num": null, |
| "html": null, |
| "text": "Statistics of the training & evaluation sets fromIWSLT'14,15 MT track (including en\u2194de) showing in each cell the count for the source language (left) and target language (right). \"#types\" refers to filtered vocabulary with word frequency cut-off 5.", |
| "type_str": "table", |
| "content": "<table><tr><td>configuration</td><td>tst2013</td><td colspan=\"2\">tst2014 #param (M)</td></tr><tr><td>en\u2192de</td><td/><td/><td/></tr><tr><td>Vanilla Attentional Model</td><td>8.20</td><td>10.98</td><td>47.80</td></tr><tr><td>w/ glo+all-factors</td><td>7.84</td><td>10.35</td><td>50.88</td></tr><tr><td>w/ loc+all-factors</td><td>8.02</td><td>10.80</td><td>52.06</td></tr><tr><td>w/ glo-loc+all-factors (w/o regularization penalty)</td><td>7.81</td><td>10.28</td><td>57.52</td></tr><tr><td>w/ glo-loc+all-factors (w/ regularization penalty)</td><td colspan=\"2\">7.48 \u2660 10.15 \u2660</td><td>57.52</td></tr><tr><td>de\u2192en</td><td/><td/><td/></tr><tr><td>Vanilla Attentional Model</td><td>8.76</td><td>11.81</td><td>44.46</td></tr><tr><td>w/ glo+all-factors</td><td>8.50</td><td>11.26</td><td>47.58</td></tr><tr><td>w/ loc+all-factors</td><td>8.50</td><td>11.48</td><td>48.76</td></tr><tr><td>w/ glo-loc+all-factors (w/ regularization penalty)</td><td colspan=\"2\">8.29 \u2660 10.95 \u2660</td><td>54.22</td></tr></table>" |
| }, |
| "TABREF4": { |
| "num": null, |
| "html": null, |
| "text": "BLEU scores for attentional model variants evaluated on en\u2194de translations.", |
| "type_str": "table", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |