| { |
| "paper_id": "P18-1007", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:39:47.165228Z" |
| }, |
| "title": "Subword Regularization: Improving Neural Network Translation Models with Multiple Subword Candidates", |
| "authors": [ |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "taku@google.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Subword units are an effective way to alleviate the open vocabulary problems in neural machine translation (NMT). While sentences are usually converted into unique subword sequences, subword segmentation is potentially ambiguous and multiple segmentations are possible even with the same vocabulary. The question addressed in this paper is whether it is possible to harness the segmentation ambiguity as a noise to improve the robustness of NMT. We present a simple regularization method, subword regularization, which trains the model with multiple subword segmentations probabilistically sampled during training. In addition, for better subword sampling, we propose a new subword segmentation algorithm based on a unigram language model. We experiment with multiple corpora and report consistent improvements especially on low resource and out-of-domain settings.", |
| "pdf_parse": { |
| "paper_id": "P18-1007", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Subword units are an effective way to alleviate the open vocabulary problems in neural machine translation (NMT). While sentences are usually converted into unique subword sequences, subword segmentation is potentially ambiguous and multiple segmentations are possible even with the same vocabulary. The question addressed in this paper is whether it is possible to harness the segmentation ambiguity as a noise to improve the robustness of NMT. We present a simple regularization method, subword regularization, which trains the model with multiple subword segmentations probabilistically sampled during training. In addition, for better subword sampling, we propose a new subword segmentation algorithm based on a unigram language model. We experiment with multiple corpora and report consistent improvements especially on low resource and out-of-domain settings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Neural Machine Translation (NMT) models (Bahdanau et al., 2014; Luong et al., 2015; Wu et al., 2016; Vaswani et al., 2017) often operate with fixed word vocabularies, as their training and inference depend heavily on the vocabulary size. However, limiting vocabulary size increases the amount of unknown words, which makes the translation inaccurate especially in an open vocabulary setting.", |
| "cite_spans": [ |
| { |
| "start": 40, |
| "end": 63, |
| "text": "(Bahdanau et al., 2014;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 64, |
| "end": 83, |
| "text": "Luong et al., 2015;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 84, |
| "end": 100, |
| "text": "Wu et al., 2016;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 101, |
| "end": 122, |
| "text": "Vaswani et al., 2017)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A common approach for dealing with the open vocabulary issue is to break up rare words into subword units (Schuster and Nakajima, 2012; Chitnis and DeNero, 2015; Sennrich et al., 2016; Wu et al., 2016 Table 1 : Multiple subword sequences encoding the same sentence \"Hello World\" (BPE) (Sennrich et al., 2016 ) is a de facto standard subword segmentation algorithm applied to many NMT systems and achieving top translation quality in several shared tasks (Denkowski and Neubig, 2017; Nakazawa et al., 2017) . BPE segmentation gives a good balance between the vocabulary size and the decoding efficiency, and also sidesteps the need for a special treatment of unknown words. BPE encodes a sentence into a unique subword sequence. However, a sentence can be represented in multiple subword sequences even with the same vocabulary. Table 1 illustrates an example. While these sequences encode the same input \"Hello World\", NMT handles them as completely different inputs. This observation becomes more apparent when converting subword sequences into id sequences (right column in Table 1 ). These variants can be viewed as a spurious ambiguity, which might not always be resolved in decoding process. At training time of NMT, multiple segmentation candidates will make the model robust to noise and segmentation errors, as they can indirectly help the model to learn the compositionality of words, e.g., \"books\" can be decomposed into \"book\" + \"s\".", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 135, |
| "text": "(Schuster and Nakajima, 2012;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 136, |
| "end": 161, |
| "text": "Chitnis and DeNero, 2015;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 162, |
| "end": 184, |
| "text": "Sennrich et al., 2016;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 185, |
| "end": 200, |
| "text": "Wu et al., 2016", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 285, |
| "end": 307, |
| "text": "(Sennrich et al., 2016", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 454, |
| "end": 482, |
| "text": "(Denkowski and Neubig, 2017;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 483, |
| "end": 505, |
| "text": "Nakazawa et al., 2017)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 201, |
| "end": 208, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 828, |
| "end": 835, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1076, |
| "end": 1083, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this study, we propose a new regularization method for open-vocabulary NMT, called subword regularization, which employs multiple subword segmentations to make the NMT model accurate and robust. Subword regularization consists of the following two sub-contributions:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We propose a simple NMT training algorithm to integrate multiple segmentation candidates. Our approach is implemented as an on-the-fly data sampling, which is not specific to NMT architecture. Subword regularization can be applied to any NMT system without changing the model structure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We also propose a new subword segmentation algorithm based on a language model, which provides multiple segmentations with probabilities. The language model allows to emulate the noise generated during the segmentation of actual data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Empirical experiments using multiple corpora with different sizes and languages show that subword regularization achieves significant improvements over the method using a single subword sequence. In addition, through experiments with out-of-domain corpora, we show that subword regularization improves the robustness of the NMT model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "multiple subword segmentations 2.1 NMT training with on-the-fly subword sampling", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Machine Translation with", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Given a source sentence X and a target sentence Y , let x = (x 1 , . . . , x M ) and y = (y 1 , . . . , y N ) be the corresponding subword sequences segmented with an underlying subword segmenter, e.g., BPE. NMT models the translation probability P (Y |X) = P (y|x) as a target language sequence model that generates target subword y n conditioning on the target history y <n and source input sequence x:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Machine Translation with", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (y|x; \u03b8) = N \u220f n=1 P (y n |x, y <n ; \u03b8),", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Neural Machine Translation with", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where \u03b8 is a set of model parameters. A common choice to predict the subword y n is to use a recurrent neural network (RNN) architecture. However, note that subword regularization is not specific to this architecture and can be applicable to other NMT architectures without RNN, e.g., (Vaswani et al., 2017; Gehring et al., 2017) . NMT is trained using the standard maximum likelihood estimation, i.e., maximizing the loglikelihood L(\u03b8) of a given parallel corpus D =", |
| "cite_spans": [ |
| { |
| "start": 285, |
| "end": 307, |
| "text": "(Vaswani et al., 2017;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 308, |
| "end": 329, |
| "text": "Gehring et al., 2017)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Machine Translation with", |
| "sec_num": "2" |
| }, |
| { |
| "text": "{\u27e8X (s) , Y (s) \u27e9} |D| s=1 = {\u27e8x (s) , y (s) \u27e9} |D| s=1 , \u03b8 M LE = arg max \u03b8 L(\u03b8) where, L(\u03b8) = |D| \u2211 s=1 log P (y (s) |x (s) ; \u03b8). (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Machine Translation with", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We here assume that the source and target sentences X and Y can be segmented into multiple subword sequences with the segmentation probabilities P (x|X) and P (y|Y ) respectively. In subword regularization, we optimize the parameter set \u03b8 with the marginalized likelihood as (3).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Machine Translation with", |
| "sec_num": "2" |
| }, |
| { |
| "text": "L marginal (\u03b8) = |D| \u2211 s=1 E x\u223cP (x|X (s) ) y\u223cP (y|Y (s) ) [log P (y|x; \u03b8)] (3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Machine Translation with", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Exact optimization of (3) is not feasible as the number of possible segmentations increases exponentially with respect to the sentence length. We approximate (3) with finite k sequences sampled from P (x|X) and P (y|Y ) respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Machine Translation with", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L marginal (\u03b8) \u223c = 1 k 2 |D| \u2211 s=1 k \u2211 i=1 k \u2211 j=1 log P (y j |x i ; \u03b8) x i \u223c P (x|X (s) ), y j \u223c P (y|Y (s) ).", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Neural Machine Translation with", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For the sake of simplicity, we use k = 1. Training of NMT usually uses an online training for efficiency, in which the parameter \u03b8 is iteratively optimized with respect to the smaller subset of D (mini-batch). When we have a sufficient number of iterations, subword sampling is executed via the data sampling of online training, which yields a good approximation of (3) even if k = 1. It should be noted, however, that the subword sequence is sampled on-the-fly for each parameter update.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Machine Translation with", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In the decoding of NMT, we only have a raw source sentence X. A straightforward approach for decoding is to translate from the best segmentation x * that maximizes the probability P (x|X), i.e., x * = argmax x P (x|X). Additionally, we can use the n-best segmentations of P (x|X) to incorporate multiple segmentation candidates. More specifically, given n-best segmentations (x 1 , . . . , x n ), we choose the best translation y * that maximizes the following score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "score(x, y) = log P (y|x)/|y| \u03bb ,", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Decoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where |y| is the number of subwords in y and \u03bb \u2208 R + is the parameter to penalize shorter sentences. \u03bb is optimized with the development data. In this paper, we call these two algorithms onebest decoding and n-best decoding respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "3 Subword segmentations with language model 3.1 Byte-Pair-Encoding (BPE)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Byte-Pair-Encoding (BPE) (Sennrich et al., 2016; Schuster and Nakajima, 2012 ) is a subword segmentation algorithm widely used in many NMT systems 1 . BPE first splits the whole sentence into individual characters. The most frequent 2 adjacent pairs of characters are then consecutively merged until reaching a desired vocabulary size. Subword segmentation is performed by applying the same merge operations to the test sentence. An advantage of BPE segmentation is that it can effectively balance the vocabulary size and the step size (the number of tokens required to encode the sentence). BPE trains the merged operations only with a frequency of characters. Frequent substrings will be joined early, resulting in common words remaining as one unique symbol. Words consisting of rare character combinations will be split into smaller units, e.g., substrings or characters. Therefore, only with a small fixed size of vocabulary (usually 16k to 32k), the number of required symbols to encode a sentence will not significantly increase, which is an important feature for an efficient decoding.", |
| "cite_spans": [ |
| { |
| "start": 25, |
| "end": 48, |
| "text": "(Sennrich et al., 2016;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 49, |
| "end": 76, |
| "text": "Schuster and Nakajima, 2012", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "One downside is, however, that BPE is based on a greedy and deterministic symbol replacement, which can not provide multiple segmentations with probabilities. It is not trivial to apply BPE to the subword regularization that depends on segmentation probabilities P (x|X).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In this paper, we propose a new subword segmentation algorithm based on a unigram language model, which is capable of outputing multiple subword segmentations with probabilities. The unigram language model makes an assumption that 1 Strictly speaking, wordpiece model (Schuster and Nakajima, 2012) is different from BPE. We consider wordpiece as a variant of BPE, as it also uses an incremental vocabulary generation with a different loss function.", |
| "cite_spans": [ |
| { |
| "start": 231, |
| "end": 232, |
| "text": "1", |
| "ref_id": null |
| }, |
| { |
| "start": 268, |
| "end": 297, |
| "text": "(Schuster and Nakajima, 2012)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "2 Wordpiece model uses a likelihood instead of frequency.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "each subword occurs independently, and consequently, the probability of a subword sequence", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "x = (x 1 , . . . , x M )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "is formulated as the product of the subword occurrence probabilities p(x i ) 3 :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "P (x) = M \u220f i=1 p(x i ), (6) \u2200i x i \u2208 V, \u2211 x\u2208V p(x) = 1,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where V is a pre-determined vocabulary. The most probable segmentation x * for the input sentence X is then given by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "x * = arg max x\u2208S(X) P (x),", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where S(X) is a set of segmentation candidates built from the input sentence X. x * is obtained with the Viterbi algorithm (Viterbi, 1967) .", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 138, |
| "text": "(Viterbi, 1967)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "If the vocabulary V is given, subword occurrence probabilities p(x i ) are estimated via the EM algorithm that maximizes the following marginal likelihood L assuming that p(", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "x i ) are hidden vari- ables. L = |D| \u2211 s=1 log(P (X (s) )) = |D| \u2211 s=1 log ( \u2211 x\u2208S(X (s) ) P (x) )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the real setting, however, the vocabulary set V is also unknown. Because the joint optimization of vocabulary set and their occurrence probabilities is intractable, we here seek to find them with the following iterative algorithm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "1. Heuristically make a reasonably big seed vocabulary from the training corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "2. Repeat the following steps until |V| reaches a desired vocabulary size.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "(a) Fixing the set of vocabulary, optimize p(x) with the EM algorithm. (b) Compute the loss i for each subword x i , where loss i represents how likely the likelihood L is reduced when the subword x i is removed from the current vocabulary. (c) Sort the symbols by loss i and keep top \u03b7 % of subwords (\u03b7 is 80, for example).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Note that we always keep the subwords consisting of a single character to avoid out-of-vocabulary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "There are several ways to prepare the seed vocabulary. The natural choice is to use the union of all characters and the most frequent substrings in the corpus 4 . Frequent substrings can be enumerated in O(T ) time and O(20T ) space with the Enhanced Suffix Array algorithm (Nong et al., 2009) , where T is the size of the corpus. Similar to (Sennrich et al., 2016) , we do not consider subwords that cross word boundaries. As the final vocabulary V contains all individual characters in the corpus, character-based segmentation is also included in the set of segmentation candidates S(X). In other words, subword segmentation with the unigram language model can be seen as a probabilsitic mixture of characters, subwords and word segmentations.", |
| "cite_spans": [ |
| { |
| "start": 274, |
| "end": 293, |
| "text": "(Nong et al., 2009)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 342, |
| "end": 365, |
| "text": "(Sennrich et al., 2016)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unigram language model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Subword regularization samples one subword segmentation from the distribution P (x|X) for each parameter update. A straightforward approach for an approximate sampling is to use the l-best segmentations. More specifically, we first obtain l-best segmentations according to the probability P (x|X). l-best search is performed in linear time with the Forward-DP Backward-A* algorithm (Nagata, 1994) . One segmentation x i is then sampled from the multinomial distribution", |
| "cite_spans": [ |
| { |
| "start": 382, |
| "end": 396, |
| "text": "(Nagata, 1994)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Subword sampling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "P (x i |X) \u223c = P (x i ) \u03b1 / \u2211 l i=1 P (x i ) \u03b1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Subword sampling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": ", where \u03b1 \u2208 R + is the hyperparameter to control the smoothness of the distribution. A smaller \u03b1 leads to sample x i from a more uniform distribution. A larger \u03b1 tends to select the Viterbi segmentation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Subword sampling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Setting l \u2192 \u221e, in theory, allows to take all possible segmentations into account. However, it is not feasible to increase l explicitly as the number of candidates increases exponentially with respect to the sentence length. In order to exactly sample from all possible segmentations, we use the Forward-Filtering and Backward-Sampling algorithm (FFBS) (Scott, 2002), a variant of the dynamic programming originally introduced by Bayesian hidden Markov model training. In FFBS, all segmentation candidates are represented in a compact lattice structure, where each node denotes a subword. In the first pass, FFBS computes a set of forward probabilities for all subwords in the lattice, which provide the probability of ending up in any particular subword w. In the second pass, traversing the nodes in the lattice from the end of the sentence to the beginning of the sentence, subwords are recursively sampled for each branch according to the forward probabilities.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Subword sampling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "BPE was originally introduced in the data compression literature (Gage, 1994) . BPE is a variant of dictionary (substitution) encoder that incrementally finds a set of symbols such that the total number of symbols for encoding the text is minimized. On the other hand, the unigram language model is reformulated as an entropy encoder that minimizes the total code length for the text. According to Shannon's coding theorem, the optimal code length for a symbol s is \u2212 log p s , where p s is the occurrence probability of s. This is essentially the same as the segmentation strategy of the unigram language model described as (7).", |
| "cite_spans": [ |
| { |
| "start": 65, |
| "end": 77, |
| "text": "(Gage, 1994)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BPE vs. Unigram language model", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "BPE and the unigram language model share the same idea that they encode a text using fewer bits with a certain data compression principle (dictionary vs. entropy). Therefore, we expect to see the same benefit as BPE with the unigram language model. However, the unigram language model is more flexible as it is based on a probabilistic language model and can output multiple segmentations with their probabilities, which is an essential requirement for subword regularization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BPE vs. Unigram language model", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Regularization by noise is a well studied technique in deep neural networks. A well-known example is dropout (Srivastava et al., 2014) , which randomly turns off a subset of hidden units during training. Dropout is analyzed as an ensemble training, where many different models are trained on different subsets of the data. Subword regularization trains the model on different data inputs randomly sampled from the original input sentences, and thus is regarded as a variant of ensemble training.", |
| "cite_spans": [ |
| { |
| "start": 109, |
| "end": 134, |
| "text": "(Srivastava et al., 2014)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The idea of noise injection has previously been used in the context of Denoising Auto-Encoders (DAEs) (Vincent et al., 2008) , where noise is added to the inputs and the model is trained to reconstruct the original inputs. There are a couple of studies that employ DAEs in natural language processing. (Lample et al., 2017; Artetxe et al., 2017) independently propose DAEs in the context of sequence-to-sequence learning, where they randomly alter the word order of the input sentence and the model is trained to reconstruct the original sentence. Their technique is applied to an unsupervised machine translation to make the encoder truly learn the compositionality of input sentences.", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 124, |
| "text": "(Vincent et al., 2008)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 302, |
| "end": 323, |
| "text": "(Lample et al., 2017;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 324, |
| "end": 345, |
| "text": "Artetxe et al., 2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Word dropout (Iyyer et al., 2015 ) is a simple approach for a bag-of-words representation, in which the embedding of a certain word sequence is simply calculated by averaging the word embeddings. Word dropout randomly drops words from the bag before averaging word embeddings, and consequently can see 2 |X| different token sequences for each input X. (Belinkov and Bisk, 2017) explore the training of character-based NMT with a synthetic noise that randomly changes the order of characters in a word. (Xie et al., 2017) also proposes a robust RNN language model that interpolates random unigram language model.", |
| "cite_spans": [ |
| { |
| "start": 13, |
| "end": 32, |
| "text": "(Iyyer et al., 2015", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 352, |
| "end": 377, |
| "text": "(Belinkov and Bisk, 2017)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 502, |
| "end": 520, |
| "text": "(Xie et al., 2017)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The basic idea and motivation behind subword regularization are similar to those of previous work. In order to increase the robustness, they inject noise to input sentences by randomly changing the internal representation of sentences. However, these previous approaches often depend on heuristics to generate synthetic noises, which do not always reflect the real noises on training and inference. In addition, these approaches can only be applied to source sentences (encoder), as they irreversibly rewrite the surface of sentences. Subword regularization, on the other hand, generates synthetic subword sequences with an underlying language model to better emulate the noises and segmentation errors. As subword regularization is based on an invertible conversion, we can safely apply it both to source and target sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Subword regularization can also be viewed as a data augmentation. In subword regularization, an input sentence is converted into multiple invariant sequences, which is similar to the data augmentation for image classification tasks, for example, random flipping, distorting, or cropping.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "There are several studies focusing on segmentation ambiguities in language modeling. Latent Sequence Decompositions (LSDs) (Chan et al., 2016) learns the mapping from the input and the output by marginalizing over all possible segmentations. LSDs and subword regularization do not assume a predetermined segmentation for a sentence, and take multiple segmentations by a sim-ilar marginalization technique. The difference is that subword regularization injects the multiple segmentations with a separate language model through an on-the-fly subword sampling. This approach makes the model simple and independent from NMT architectures.", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 142, |
| "text": "(Chan et al., 2016)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Lattice-to-sequence models (Su et al., 2017; Sperber et al., 2017) are natural extension of sequence-to-sequence models, which represent inputs uncertainty through lattices. Lattice is encoded with a variant of TreeLSTM (Tai et al., 2015) , which requires changing the model architecture. In addition, while subword regularization is applied both to source and target sentences, lattice-to-sequence models do not handle target side ambiguities.", |
| "cite_spans": [ |
| { |
| "start": 27, |
| "end": 44, |
| "text": "(Su et al., 2017;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 45, |
| "end": 66, |
| "text": "Sperber et al., 2017)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 220, |
| "end": 238, |
| "text": "(Tai et al., 2015)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "A mixed word/character model (Wu et al., 2016) addresses the out-of-vocabulary problem with a fixed vocabulary. In this model, out-ofvocabulary words are not collapsed into a single UNK symbol, but converted into the sequence of characters with special prefixes representing the positions in the word. Similar to BPE, this model also encodes a sentence into a unique fixed sequence, thus multiple segmentations are not taken into account.", |
| "cite_spans": [ |
| { |
| "start": 29, |
| "end": 46, |
| "text": "(Wu et al., 2016)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We conducted experiments using multiple corpora with different sizes and languages. Table 2 summarizes the evaluation data we used 5 6 7 8 9 10 . IWSLT15/17 and KFTT are relatively small corpora, which include a wider spectrum of languages with different linguistic properties. They can evaluate the language-agnostic property of subword regularization. ASPEC and WMT14 (en\u2194de) are medium-sized corpora. WMT14 (en\u2194cs) is a rather big corpus consisting of more than 10M parallel sentences.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 84, |
| "end": 91, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We used GNMT (Wu et al., 2016) as the implementation of the NMT system for all experiments. We generally followed the settings and training procedure described in (Wu et al., 2016) , however, we changed the settings according to the corpus size. Table 2 shows the hyperparameters we used in each experiment. As common settings, we set the dropout probability to be 0.2. For parameter estimation, we used a combination of Adam (Kingma and Adam, 2014) and SGD algorithms. Both length normalization and converge penalty parameters are set to 0.2 (see section 7 in (Wu et al., 2016) ). We set the decoding beam size to 4.", |
| "cite_spans": [ |
| { |
| "start": 13, |
| "end": 30, |
| "text": "(Wu et al., 2016)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 163, |
| "end": 180, |
| "text": "(Wu et al., 2016)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 561, |
| "end": 578, |
| "text": "(Wu et al., 2016)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 246, |
| "end": 253, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The data was preprocessed with Moses tokenizer before training subword models. It should be noted, however, that Chinese and Japanese have no explicit word boundaries and Moses tokenizer does not segment sentences into words, and hence subword segmentations are trained almost from unsegmented raw sentences in these languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We used the case sensitive BLEU score (Papineni et al., 2002) as an evaluation metric. As the output sentences are not segmented in Chinese and Japanese, we segment them with characters and KyTea 11 for Chinese and Japanese respectively before calculating BLEU scores.", |
| "cite_spans": [ |
| { |
| "start": 38, |
| "end": 61, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "BPE segmentation is used as a baseline system. We evaluate three test systems with different sampling strategies: (1) Unigram language model-based subword segmentation without subword regularization (l = 1), (2) with subword regularization (l = 64, \u03b1 = 0.1) and (3) (l = \u221e, \u03b1 = 0.2/0.5) 0.2: IWSLT, 0.5: others. These sampling parameters were determined with preliminary experiments. l = 1 is aimed at a pure comparison between BPE and the unigram language model. In addition, we compare one-best decoding and n-best decoding (See section 2.2). Because BPE is not able to provide multiple segmentations, we only evaluate one-best decoding for BPE. Consequently, we compare 7 systems (1 + 3 \u00d7 2) for each language pair. Table 3 shows the translation experiment results.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 719, |
| "end": 726, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "First, as can be seen in the table, BPE and unigram language model without subword regularization (l = 1) show almost comparable BLEU scores. This is not surprising, given that both BPE and the unigram language model are based on data compression algorithms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Main Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We can see that subword regularization (l > 1) boosted BLEU scores quite impressively (+1 to 2 points) in all language pairs except for WMT14 (en\u2192cs) dataset. The gains are larger especially in lower resource settings (IWSLT and KFTT). It can be considered that the positive effects of data augmentation with subword regularization worked better in lower resource settings, which is a common property of other regularization techniques.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Main Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "As for the sampling algorithm, (l = \u221e \u03b1 = 0.2/0.5) slightly outperforms (l = 64, \u03b1 = 0.1) on IWSLT corpus, but they show almost comparable results on larger data set. Detailed analysis is described in Section 5.5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Main Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "On top of the gains with subword regularization, n-best decoding yields further improvements in many language pairs. However, we should note that the subword regularization is mandatory for n-best decoding and the BLEU score is degraded in some language pairs without subword regularization (l = 1). This result indicates that the decoder is more confused for multiple segmentations when they are not explored at training time.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Main Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "To see the effect of subword regularization on a more open-domain setting, we evaluate the systems with out-of-domain in-house data consisting of multiple genres: Web, patents and query logs. Note that we did not conduct the comparison with KFTT and ASPEC corpora, as we found that the domains of these corpora are too specific 12 , and preliminary evaluations showed extremely poor BLEU scores (less than 5) on out-of-domain corpora. Table 4 shows the results. Compared to the gains obtained with the standard in-domain evaluations in Table 3 , subword regularization achieves significantly larger improvements (+2 points) in every domain of corpus. An interesting observation is that we have the same level of improvements even on large training data sets (WMT14), which showed marginal or small gains with the in-domain data. This result strongly supports our claim that subword regularization is more useful for open-domain settings. Table 5 shows the comparison on different segmentation algorithms: word, character, mixed word/character (Wu et al., 2016) , BPE Table 3 : Main Results (BLEU(%)) (l: sampling size in SR, \u03b1: smoothing parameter). * indicates statistically significant difference (p < 0.05) from baselines with bootstrap resampling (Koehn, 2004) . The same mark is used in Table 4 and 6. (Sennrich et al., 2016 ) and our unigram model with or without subword regularization. The BLEU scores of word, character and mixed word/character models are cited from (Wu et al., 2016) . As German is a morphologically rich language and needs a huge vocabulary for word models, subword-based algorithms perform a gain of more than 1 BLEU point than word model. Among subword-based algorithms, the unigram language model with subword regularization achieved the best BLEU score (25.04), which demonstrates the effectiveness of multiple subword segmentations.", |
| "cite_spans": [ |
| { |
| "start": 1043, |
| "end": 1060, |
| "text": "(Wu et al., 2016)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 1251, |
| "end": 1264, |
| "text": "(Koehn, 2004)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1307, |
| "end": 1329, |
| "text": "(Sennrich et al., 2016", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1476, |
| "end": 1493, |
| "text": "(Wu et al., 2016)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 435, |
| "end": 442, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 536, |
| "end": 543, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 938, |
| "end": 945, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 1067, |
| "end": 1074, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 1292, |
| "end": 1299, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results with out-of-domain corpus", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Subword regularization has two hyperparameters: l: size of sampling candidates, \u03b1: smoothing constant. Figure 1 shows the BLEU scores of various hyperparameters on IWSLT15 (en \u2192 vi) dataset.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 103, |
| "end": 111, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Impact of sampling hyperparameters", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "First, we can find that the peaks of BLEU scores against smoothing parameter \u03b1 are different de-pending on the sampling size l. This is expected, because l = \u221e has larger search space than l = 64, and needs to set \u03b1 larger to sample sequences close to the Viterbi sequence x * .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Impact of sampling hyperparameters", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "Another interesting observation is that \u03b1 = 0.0 leads to performance drops especially on l = \u221e. When \u03b1 = 0.0, the segmentation probability P (x|X) is virtually ignored and one segmentation is uniformly sampled. This result suggests that biased sampling with a language model is helpful to emulate the real noise in the actual translation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Impact of sampling hyperparameters", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "In general, larger l allows a more aggressive regularization and is more effective for low resource settings such as IWSLT. However, the estimation of \u03b1 is more sensitive and performance becomes even worse than baseline when \u03b1 is extremely small. To weaken the effect of regularization and avoid selecting invalid parameters, it might be more reasonable to use l = 64 for high resource languages. 24.50 Unigram w/ SR (l = 64, \u03b1 = 0.1) 25.04 Table 5 : Comparison of different segmentation algorithms (WMT14 en\u2192de)", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 441, |
| "end": 448, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Impact of sampling hyperparameters", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "Although we can see in general that the optimal hyperparameters are roughly predicted with the held-out estimation, it is still an open question how to choose the optimal size l in subword sampling. Table 6 summarizes the BLEU scores with subword regularization either on source or target sentence to figure out which components (encoder or decoder) are more affected. As expected, we can see that the BLEU scores with single side regularization are worse than full regularization. However, it should be noted that single side regularization still has positive effects. This result implies that subword regularization is not only helpful for encoder-decoder architectures, but applicable to other NLP tasks that only use an either encoder or decoder, including text classification Table 6 : Comparison on different regularization strategies (IWSLT15/17, l = 64, \u03b1 = 0.1) (Iyyer et al., 2015) and image caption generation .", |
| "cite_spans": [ |
| { |
| "start": 871, |
| "end": 891, |
| "text": "(Iyyer et al., 2015)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 199, |
| "end": 206, |
| "text": "Table 6", |
| "ref_id": null |
| }, |
| { |
| "start": 781, |
| "end": 788, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Impact of sampling hyperparameters", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "In this paper, we presented a simple regularization method, subword regularization 13 , for NMT, with no change to the network architecture. The central idea is to virtually augment training data with on-the-fly subword sampling, which helps to improve the accuracy as well as robustness of NMT models. In addition, for better subword sampling, we propose a new subword segmentation algorithm based on the unigram language model. Experiments on multiple corpora with different sizes and languages show that subword regularization leads to significant improvements especially on low resource and open-domain settings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Promising avenues for future work are to apply subword regularization to other NLP tasks based on encoder-decoder architectures, e.g., dialog generation (Vinyals and Le, 2015) and automatic summarization (Rush et al., 2015) . Compared to machine translation, these tasks do not have enough training data, and thus there could be a large room for improvement with subword regularization. Additionally, we would like to explore the application of subword regularization for machine learning, including Denoising Auto Encoder (Vincent et al., 2008) and Adversarial Training (Goodfellow et al., 2015) . 13 Implementation is available at https://github.com/google/sentencepiece", |
| "cite_spans": [ |
| { |
| "start": 153, |
| "end": 175, |
| "text": "(Vinyals and Le, 2015)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 204, |
| "end": 223, |
| "text": "(Rush et al., 2015)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 523, |
| "end": 545, |
| "text": "(Vincent et al., 2008)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 571, |
| "end": 596, |
| "text": "(Goodfellow et al., 2015)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 599, |
| "end": 601, |
| "text": "13", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Target sequence y = (y1, . . . , yN ) can also be modeled similarly.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "It is also possible to run BPE with a sufficient number of merge operations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "IWSLT15: http://workshop2015.iwslt.org/ 6 IWSLT17: http://workshop2017.iwslt.org/ 7 KFTT: http://www.phontron.com/kftt/ 8 ASPEC: http://lotus.kuee.kyoto-u.ac.jp/ASPEC/ 9 WMT14: http://statmt.org/wmt14/ 10 WMT14(en\u2194de) uses the same setting as(Wu et al., 2016).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.phontron.com/kytea", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "KFTT focuses on Wikipedia articles related to Kyoto, and ASPEC is a corpus of scientific paper domain. Therefore, it is hard to translate out-of-domain texts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Unsupervised neural machine translation", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1710.11041" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2017. Unsupervised neural ma- chine translation. arXive preprint arXiv:1710.11041 .", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1409.0473" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2014. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473 .", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Synthetic and natural noise both break neural machine translation", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Bisk", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1711.02173" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Belinkov and Yonatan Bisk. 2017. Synthetic and natural noise both break neural machine transla- tion. arXive preprint arXiv:1711.02173 .", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Variablelength word encodings for neural translation models", |
| "authors": [ |
| { |
| "first": "Rohan", |
| "middle": [], |
| "last": "Chitnis", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Denero", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "2088--2093", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rohan Chitnis and John DeNero. 2015. Variable- length word encodings for neural translation models. In Proc. of EMNLP. pages 2088-2093.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Stronger baselines for trustable results in neural machine translation", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Denkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proc. of Workshop on Neural Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Denkowski and Graham Neubig. 2017. Stronger baselines for trustable results in neural ma- chine translation. Proc. of Workshop on Neural Ma- chine Translation .", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A new algorithm for data compression", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Gage", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "C Users J", |
| "volume": "12", |
| "issue": "2", |
| "pages": "23--38", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip Gage. 1994. A new algorithm for data compres- sion. C Users J. 12(2):23-38.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Convolutional sequence to sequence learning", |
| "authors": [ |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Gehring", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Denis", |
| "middle": [], |
| "last": "Yarats", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann N", |
| "middle": [], |
| "last": "Dauphin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1705.03122" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonas Gehring, Michael Auli, David Grangier, De- nis Yarats, and Yann N Dauphin. 2017. Convolu- tional sequence to sequence learning. arXiv preprint arXiv:1705.03122 .", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Explaining and harnessing adversarial examples", |
| "authors": [ |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Goodfellow", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathon", |
| "middle": [], |
| "last": "Shlens", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Szegedy", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ian Goodfellow, Jonathon Shlens, and Christian Szegedy. 2015. Explaining and harnessing adver- sarial examples. In Proc. of ICLR.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Deep unordered composition rivals syntactic methods for text classification", |
| "authors": [ |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Varun", |
| "middle": [], |
| "last": "Manjunatha", |
| "suffix": "" |
| }, |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Boyd-Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohit Iyyer, Varun Manjunatha, Jordan Boyd-Graber, and Hal Daum\u00e9 III. 2015. Deep unordered compo- sition rivals syntactic methods for text classification. In Proc. of ACL.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6980" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba Adam. 2014. A method for stochastic optimization. arXiv preprint arXiv:1412.6980 .", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Statistical significance tests for machine translation evaluation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn. 2004. Statistical significance tests for machine translation evaluation. In Proc. of EMNLP.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Unsupervised machine translation using monolingual corpora only", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1711.00043" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample, Ludovic Denoyer, and Marc'Aurelio Ranzato. 2017. Unsupervised machine translation using monolingual corpora only. arXive preprint arXiv:1711.00043 .", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Effective approaches to attentionbased neural machine translation", |
| "authors": [ |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minh-Thang Luong, Hieu Pham, and Christopher D Manning. 2015. Effective approaches to attention- based neural machine translation. In Proc of EMNLP.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A stochastic japanese morphological analyzer using a forward-dp backward-a* nbest search algorithm", |
| "authors": [ |
| { |
| "first": "Masaaki", |
| "middle": [], |
| "last": "Nagata", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Proc. of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Masaaki Nagata. 1994. A stochastic japanese morpho- logical analyzer using a forward-dp backward-a* n- best search algorithm. In Proc. of COLING.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Overview of the 4th workshop on asian translation", |
| "authors": [ |
| { |
| "first": "Toshiaki", |
| "middle": [], |
| "last": "Nakazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Shohei", |
| "middle": [], |
| "last": "Higashiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenchen", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideya", |
| "middle": [], |
| "last": "Mino", |
| "suffix": "" |
| }, |
| { |
| "first": "Isao", |
| "middle": [], |
| "last": "Goto", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideto", |
| "middle": [], |
| "last": "Kazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Oda", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Sadao", |
| "middle": [], |
| "last": "Kurohashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 4th Workshop on Asian Translation (WAT2017)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--54", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Toshiaki Nakazawa, Shohei Higashiyama, Chenchen Ding, Hideya Mino, Isao Goto, Hideto Kazawa, Yusuke Oda, Graham Neubig, and Sadao Kurohashi. 2017. Overview of the 4th workshop on asian trans- lation. In Proceedings of the 4th Workshop on Asian Translation (WAT2017). pages 1-54.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Linear suffix array construction by almost pure inducedsorting", |
| "authors": [ |
| { |
| "first": "Ge", |
| "middle": [], |
| "last": "Nong", |
| "suffix": "" |
| }, |
| { |
| "first": "Sen", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wai", |
| "middle": [ |
| "Hong" |
| ], |
| "last": "Chan", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. of DCC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ge Nong, Sen Zhang, and Wai Hong Chan. 2009. Lin- ear suffix array construction by almost pure induced- sorting. In Proc. of DCC.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proc. of ACL.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A neural attention model for abstractive sentence summarization", |
| "authors": [ |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Alexander M Rush", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander M Rush, Sumit Chopra, and Jason Weston. 2015. A neural attention model for abstractive sen- tence summarization. In Proc. of EMNLP.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Japanese and korean voice search", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaisuke", |
| "middle": [], |
| "last": "Nakajima", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. of ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Schuster and Kaisuke Nakajima. 2012. Japanese and korean voice search. In Proc. of ICASSP.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Bayesian methods for hidden markov models: Recursive computing in the 21st century", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Steven", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Scott", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Journal of the American Statistical Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven L Scott. 2002. Bayesian methods for hidden markov models: Recursive computing in the 21st century. Journal of the American Statistical Asso- ciation .", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proc. of ACL.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Neural lattice-to-sequence models for uncertain inputs", |
| "authors": [ |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Sperber", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthias Sperber, Graham Neubig, Jan Niehues, and Alex Waibel. 2017. Neural lattice-to-sequence mod- els for uncertain inputs. In Proc. of EMNLP.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Dropout: a simple way to prevent neural networks from overfitting", |
| "authors": [ |
| { |
| "first": "Nitish", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "JMLR", |
| "volume": "15", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nitish Srivastava, Geoffrey E Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: a simple way to prevent neural networks from overfitting. JMLR 15(1).", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Lattice-based recurrent neural network encoders for neural machine translation", |
| "authors": [ |
| { |
| "first": "Jinsong", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhixing", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Rongrong", |
| "middle": [], |
| "last": "De Yi Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "3302--3308", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinsong Su, Zhixing Tan, De yi Xiong, Rongrong Ji, Xiaodong Shi, and Yang Liu. 2017. Lattice-based recurrent neural network encoders for neural ma- chine translation. In AAAI. pages 3302-3308.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Improved semantic representations from tree-structured long short-term memory networks", |
| "authors": [ |
| { |
| "first": "Kai Sheng", |
| "middle": [], |
| "last": "Tai", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Sheng Tai, Richard Socher, and Christopher D Manning. 2015. Improved semantic representations from tree-structured long short-term memory net- works. Proc. of ACL .", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1706.03762" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. arXive preprint arXiv:1706.03762 .", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Extracting and composing robust features with denoising autoencoders", |
| "authors": [ |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Hugo", |
| "middle": [], |
| "last": "Larochelle", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierre-Antoine", |
| "middle": [], |
| "last": "Manzagol", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proc. of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pascal Vincent, Hugo Larochelle, Yoshua Bengio, and Pierre-Antoine Manzagol. 2008. Extracting and composing robust features with denoising autoen- coders. In Proc. of ICML.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "A neural conversational model", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "ICML Deep Learning Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals and Quoc V. Le. 2015. A neural conver- sational model. In ICML Deep Learning Workshop.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Show and tell: A neural image caption generator", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Toshev", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Dumitru", |
| "middle": [], |
| "last": "Erhan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, Alexander Toshev, Samy Bengio, and Dumitru Erhan. 2015. Show and tell: A neural im- age caption generator. In Computer Vision and Pat- tern Recognition.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Error bounds for convolutional codes and an asymptotically optimum decoding algorithm", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Viterbi", |
| "suffix": "" |
| } |
| ], |
| "year": 1967, |
| "venue": "IEEE transactions on Information Theory", |
| "volume": "13", |
| "issue": "2", |
| "pages": "260--269", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew Viterbi. 1967. Error bounds for convolutional codes and an asymptotically optimum decoding al- gorithm. IEEE transactions on Information Theory 13(2):260-269.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Google's neural machine translation system: Bridging the gap between human and machine translation", |
| "authors": [ |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1609.08144" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonghui Wu, Mike Schuster, et al. 2016. Google's neural machine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144 .", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Data noising as smoothing in neural network language models", |
| "authors": [ |
| { |
| "first": "Ziang", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Sida", |
| "middle": [ |
| "I" |
| ], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "L\u00e9vy", |
| "suffix": "" |
| }, |
| { |
| "first": "Aiming", |
| "middle": [], |
| "last": "Nie", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proc. of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ziang Xie, Sida I. Wang, Jiwei Li, Daniel L\u00e9vy, Aim- ing Nie, Dan Jurafsky, and Andrew Y. Ng. 2017. Data noising as smoothing in neural network lan- guage models. In Proc. of ICLR.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "text": "Effect of sampling hyperparameters Regularization type en\u2192vi vi\u2192en en\u2192ar ar\u2192en No reg. (baseline)", |
| "type_str": "figure" |
| }, |
| "TABREF3": { |
| "html": null, |
| "content": "<table><tr><td>Source only</td><td>26.00 23.09* 13.46 28.16*</td></tr><tr><td>Target only</td><td>26.10 23.62* 14.34* 27.89*</td></tr><tr><td>Source and target</td><td>27.68* 24.73* 14.92* 28.47*</td></tr></table>", |
| "num": null, |
| "text": "25.49 22.32 13.04 27.09", |
| "type_str": "table" |
| } |
| } |
| } |
| } |