| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:06:45.831967Z" |
| }, |
| "title": "Balancing Cost and Benefit with Tied-Multi Transformers", |
| "authors": [ |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Institute of Information and Communications Technology", |
| "location": { |
| "addrLine": "3-5 Hikaridai, Seika-cho, Soraku-gun", |
| "postCode": "619-0289", |
| "settlement": "Kyoto", |
| "country": "Japan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Raphael", |
| "middle": [], |
| "last": "Rubino", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Institute of Information and Communications Technology", |
| "location": { |
| "addrLine": "3-5 Hikaridai, Seika-cho, Soraku-gun", |
| "postCode": "619-0289", |
| "settlement": "Kyoto", |
| "country": "Japan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Atsushi", |
| "middle": [], |
| "last": "Fujita", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Institute of Information and Communications Technology", |
| "location": { |
| "addrLine": "3-5 Hikaridai, Seika-cho, Soraku-gun", |
| "postCode": "619-0289", |
| "settlement": "Kyoto", |
| "country": "Japan" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We propose a novel procedure for training multiple Transformers with tied parameters which compresses multiple models into one enabling the dynamic choice of the number of encoder and decoder layers during decoding. In training an encoder-decoder model, typically, the output of the last layer of the N-layer encoder is fed to the M-layer decoder, and the output of the last decoder layer is used to compute loss. Instead, our method computes a single loss consisting of N \u00d7 M losses, where each loss is computed from the output of one of the M decoder layers connected to one of the N encoder layers. Such a model subsumes N \u00d7 M models with different number of encoder and decoder layers, and can be used for decoding with fewer than the maximum number of encoder and decoder layers. Given our flexible tied model, we also address to a-priori selection of the number of encoder and decoder layers for faster decoding, and explore recurrent stacking of layers and knowledge distillation for model compression. We present a cost-benefit analysis of applying the proposed approaches for neural machine translation and show that they reduce decoding costs while preserving translation quality.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We propose a novel procedure for training multiple Transformers with tied parameters which compresses multiple models into one enabling the dynamic choice of the number of encoder and decoder layers during decoding. In training an encoder-decoder model, typically, the output of the last layer of the N-layer encoder is fed to the M-layer decoder, and the output of the last decoder layer is used to compute loss. Instead, our method computes a single loss consisting of N \u00d7 M losses, where each loss is computed from the output of one of the M decoder layers connected to one of the N encoder layers. Such a model subsumes N \u00d7 M models with different number of encoder and decoder layers, and can be used for decoding with fewer than the maximum number of encoder and decoder layers. Given our flexible tied model, we also address to a-priori selection of the number of encoder and decoder layers for faster decoding, and explore recurrent stacking of layers and knowledge distillation for model compression. We present a cost-benefit analysis of applying the proposed approaches for neural machine translation and show that they reduce decoding costs while preserving translation quality.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Neural networks for sequence-to-sequence modeling typically consist of an encoder and a decoder coupled via an attention mechanism. Whereas the very first deep models used stacked recurrent neural networks (RNN) (Sutskever et al., 2014; Cho et al., 2014; Bahdanau et al., 2015) in the encoder and decoder, the recent Transformer model (Vaswani et al., 2017) constitutes the current state-of-the-art approach, owing to its better context modeling via multi-head self-and cross-attentions.", |
| "cite_spans": [ |
| { |
| "start": 212, |
| "end": 236, |
| "text": "(Sutskever et al., 2014;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 237, |
| "end": 254, |
| "text": "Cho et al., 2014;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 255, |
| "end": 277, |
| "text": "Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 335, |
| "end": 357, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Given an encoder-decoder architecture and its hyper-parameters, such as the number of encoder and decoder layers, vocabulary sizes (in the case of models for texts), and hidden layers, the parameters of the model, i.e., matrices and biases for non-linear transformations, are optimized by iteratively updating them so that the loss for the training data is minimized. The hyper-parameters can also be tuned, for instance, through maximizing the automatic evaluation score on the development data. However, in general, it is highly unlikely (or impossible) that a single optimized model suffices diverse cost-benefit demands at the same time. For instance, in practical low-latency scenarios, one may accept some performance drop for speed. However, a model used with a subset of optimized parameters might perform badly. Also, a single optimized model cannot guarantee the best performance for each individual input. An existing solution for these problems is to train multiple models and host them simultaneously. However, this approach is not very practical, because it requires a large number of resources. We also lack a well-established method for selecting appropriate models for each individual input prior to decoding.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As a more effective solution, we consider training a single model that subsumes multiple models which can be used for decoding with different hyper-parameter settings depending on the input or on the latency requirements. In this paper, we focus on the number of layers as an important hyperparameter that impacts both speed and quality of decoding, and propose a multi-layer softmaxing method, which uses the outputs of all layers during training. Conceptually, as illustrated in Figure 1 , this is the same as tying (sharing) the parameters of multiple models with different number of layers and is not specific to particular types of multi-layer neural models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 481, |
| "end": 489, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Despite the generality of our proposed method, in this paper, we focus on encoder-decoder models with N encoder and M decoder layers, and compress N \u00d7 M models 1 by updating the model with a total of N \u00d7 M losses computed by softmaxing the output of each of the M decoder layers, where it attends to the output of each of the N encoder layers. The number of parameters of the resultant encoder-decoder model is equivalent to that of the most complex subsumed model with N encoder and M decoder layers. Yet, we can now perform faster decoding using a fewer number of layers, given that shallower layers are also directly trained.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To evaluate our proposed method, we take the case study of neural machine translation (NMT) (Cho et al., 2014; Bahdanau et al., 2015) , using the Transformer model (Vaswani et al., 2017) , and demonstrate that a single model with N encoder and M decoder layers trained by our method can be used for flexibly decoding with fewer than N and M layers without appreciable quality loss. We evaluate our proposed method on WMT18 Englishto-German translation task, and give a cost-benefit analysis for translation quality vs. decoding speed.", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 110, |
| "text": "(Cho et al., 2014;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 111, |
| "end": 133, |
| "text": "Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 164, |
| "end": 186, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Given a flexible tied model, for saving decoding time, we then design mechanisms to choose, prior to decoding, the appropriate number of encoder and decoder layers depending on the input. We also focus on compact modeling, where we leverage other orthogonal types of parameter tying approaches. Compact models are faster to decode and will be useful in cases where a-priori layer prediction might be infeasible.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of the paper is organized as follows. Section 2 briefly reviews related work for compressing neural models. Section 3 covers our method that ties multiple models by softmaxing all encoder-decoder layer combinations. Section 4 describes our efforts towards designing and evaluating a mechanism for dynamically selecting encoderdecoder layer combinations prior to decoding. Section 5 describes two orthogonal extensions to our model aiming at further model compression and speeding-up of decoding. The paper ends with Section 6 containing conclusion and future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There are studies that exploit multiple layers simultaneously. fused hidden representations of multiple layers in order to improve the translation quality. Belinkov et al. (2017) and Dou et al. (2018) attempted to identify which layer can generate useful representations for different natural language processing tasks. Unlike them, we make all layers of the encoder and decoder usable for decoding with any encoder-decoder layer combination. In practical scenarios, we can save significant amounts of time by choosing shallower encoder and decoder layers for inference.", |
| "cite_spans": [ |
| { |
| "start": 156, |
| "end": 178, |
| "text": "Belinkov et al. (2017)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 183, |
| "end": 200, |
| "text": "Dou et al. (2018)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our method ties the parameters of multiple models, which is orthogonal to the work that ties parameters between layers (Dabre and Fujita, 2019) and/or between the encoder and decoder within a single model (Xia et al., 2019; Dabre and Fujita, 2019) . Parameter tying leads to compact models, but they usually suffer from drops in inference quality. In this paper, we counter such drops with knowledge distillation (Hinton et al., 2015; Kim and Rush, 2016; Freitag et al., 2017) . This approach utilizes smoothed data or smoothed training signals instead of the actual training data. A model with a large number of parameters and high per-formance provides smoothed distributions that are then used as labels for training small models instead of one-hot vectors.", |
| "cite_spans": [ |
| { |
| "start": 119, |
| "end": 143, |
| "text": "(Dabre and Fujita, 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 205, |
| "end": 223, |
| "text": "(Xia et al., 2019;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 224, |
| "end": 247, |
| "text": "Dabre and Fujita, 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 413, |
| "end": 434, |
| "text": "(Hinton et al., 2015;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 435, |
| "end": 454, |
| "text": "Kim and Rush, 2016;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 455, |
| "end": 476, |
| "text": "Freitag et al., 2017)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As one of the aims in this work is model size reduction, it is related to a growing body of work that addresses the computational requirement reduction. Pruning of pre-trained models (See et al., 2016) makes it possible to discard around 80% of the smallest weights of a model without deterioration in inference quality, given it is re-trained with appropriate hyper-parameters after pruning. Currently, most deep learning implementations use 32-bit floating point representations, but 16-bit floating point representations (Gupta et al., 2015; Ott et al., 2018) or aggressive binarization (Courbariaux et al., 2017) can be alternatives. Compact models are usually faster to decode; studies on quantization (Lin et al., 2016) and average attention networks (Xiong et al., 2018) address this topic.", |
| "cite_spans": [ |
| { |
| "start": 183, |
| "end": 201, |
| "text": "(See et al., 2016)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 524, |
| "end": 544, |
| "text": "(Gupta et al., 2015;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 545, |
| "end": 562, |
| "text": "Ott et al., 2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 707, |
| "end": 725, |
| "text": "(Lin et al., 2016)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 757, |
| "end": 777, |
| "text": "(Xiong et al., 2018)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "None of the above work has attempted to combine multi-model parameter tying, knowledge distillation, and dynamic layer selection for obtaining and exploiting highly-compressed and flexible deep neural models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "3 Multi-Layer Softmaxing", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Consider an N -layer encoder and M -layer decoder model. Let X be the embedded input to the encoder, Y the expected output of the decoder as well as the input to the decoder (for training), and\u0176 the output predicted by the decoder. Algorithm 1 shows the pseudo-code for our proposed method. Line 3 represents the process done by the i-th encoder layer, L enc i , and line 5 does the same for the j-th decoder layer, L dec j , given the embedded decoder input, dec 0 . In simple words, we compute a loss using the output of each of the M decoder layers which in turn is computed using the output of each of the N encoder layers. In line 8, the N \u00d7 M losses are aggregated 2 before backpropagation. Henceforth, we will refer to this as the Tied-Multi model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Method", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For a comparison, the vanilla model is formulated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Method", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "dec j = L dec j (dec j\u22121 , enc N ), Y = softmax (dec M )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Method", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": ", and overall loss = cross entropy(\u0176 , Y ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Method", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Algorithm 1: Training a tied-multi model", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Method", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "1 enc0 = X; 2 for i in 1 to N do 3 enci = L enc i (enci\u22121); 4 for j in 1 to M do 5 decj = L dec j (decj\u22121, enci); 6\u0176 = softmax (decj); 7 lossi,j = cross entropy(\u0176 , Y );", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Method", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "8 overall loss = aggregate(loss1,1, . . . , lossN,M ); 9 Back-propagate using overall loss;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Method", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We evaluated the utility of our multi-layer softmaxing method on a neural machine translation task. We experimented with the WMT18 Englishto-German (En\u2192De) translation task. We used all the parallel corpora available for WMT18, except ParaCrawl corpus, 3 consisting of 5.58M sentence pairs, as the training data and 2,998 sentences in newstest2018 as test data. The English and German sentences were pre-processed using the tokenizer.perl and truecase.perl in Moses. 4 The true-case models for English and German were trained on 10M sentences randomly extracted from the monolingual data made available for the WMT18 translation task, using the train-truecaser.perl in Moses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We evaluated the following two types of models on both translation quality and decoding speed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Vanilla models: 36 vanilla models with 1 to 6 encoder and 1 to 6 decoder layers, each trained referring only to the last layer for computing loss.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Tied-Multi model: A single tied-multi model with N = 6 encoder and M = 6 decoder layers, trained by our multi-layer softmaxing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Our multi-layer softmaxing method was implemented on top of an open-source toolkit of the Transformer model (Vaswani et al., 2017) in the version 1.6 branch of tensor2tensor. 5 For training, we used the default model settings corresponding to transformer base single gpu in the implementation, except what follows. We used a shared sub-word vocabulary of 32k determined using the internal sub-word segmenter of ten-sor2tensor. To ensure that each model sees roughly 27 BLEU score Decoding time (sec) 36 vanilla models", |
| "cite_spans": [ |
| { |
| "start": 108, |
| "end": 130, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Single tied-multi model n\\m 1 2 3 4 5 6 1 2 3 4 5 6 1 2 3 4 5 6 1 26 . Table 1 : BLEU scores of 36 separately trained vanilla models and our single tied-multi model used with n (1 \u2264 n \u2264 N ) encoder and m (1 \u2264 m \u2264 M ) decoder layers. One set of decoding times is also shown given the fact that vanilla and our tied-multi models have identical shapes when n and m for encoder and decoder layers are specified.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 24, |
| "end": 84, |
| "text": "n\\m 1 2 3 4 5 6 1 2 3 4 5 6 1 2 3 4 5 6 1", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 90, |
| "end": 97, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "the same number of examples during training, we trained the models for 300k iterations, with 1 GPU for the vanilla models and 2 GPUs with batch size halved for our tied-multi model. We averaged the last 10 checkpoints saved every after 1k updates, decoded the test sentences, fixing a beam size 7 of 4 and length penalty of 0.6, and post-processed the decoded results using the detokenizer.perl and detruecase.perl in Moses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We evaluated our models using the BLEU metric (Papineni et al., 2002) implemented in sacreBLEU (Post, 2018) . 8 We also present the time consumed to translate the test data, which includes times for the model instantiation, loading the checkpoints, sub-word splitting and indexing, decoding, and subword de-indexing and merging, whereas times for detokenization are not taken into account.", |
| "cite_spans": [ |
| { |
| "start": 46, |
| "end": 69, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 95, |
| "end": 107, |
| "text": "(Post, 2018)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 110, |
| "end": 111, |
| "text": "8", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Note that we did not use any development data for two reasons. First, we train all models for the same number of iterations. Second, we use checkpoint averaging before decoding, which does not require development data unlike early stopping. Table 1 summarizes the BLEU scores and the average decoding times 9 over 3 runs of all the models, exhibiting the cost-benefit property of our tiedmulti model in comparison with the results of the corresponding 36 vanilla models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 241, |
| "end": 248, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Even though the objective function for the tiedmulti model is substantially more complex than 6 This might lead to sub-optimal models, such as immature or over-fit ones, so we will examine the convergence in future.", |
| "cite_spans": [ |
| { |
| "start": 94, |
| "end": 95, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "7 One can realize faster decoding by narrowing down the beam width. This approach is orthogonal to ours and in this paper we do not insist which is superior to the other.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "8 https://github.com/mjpost/sacreBLEU signature:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "BLEU+case.mixed+lang.en-de+numrefs.1 +smooth.exp+test.wmt18+tok.13a+version.1.3.7", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "9 These numbers will vary depending on machine, model architecture, concurrent processes, implementation, hyperparameters, etc. For instance, decoding with a larger length penalty produces longer sentences consuming a longer time.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "the one for the vanilla model, when performing decoding with the 6 encoder and 6 decoder layers, it achieved a BLEU score of 35.0, which is approaching to the best BLEU score of 35.7 given by the vanilla model with 6 encoder and 4 decoder layers. Note that when using a single encoder layer and/or a single decoder layer, the vanilla models gave significantly higher BLEU score than the tied-multi model. However, when the number of layers is increased, there is no significant difference between the two types of models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Regarding the cost-benefit property of our tiedmulti model, two points must be noted:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 BLEU score and decoding time increase only slightly, when we use more encoder layers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 The bulk of the decoding time is consumed by the decoder, since it works in an autoregressive manner. We can substantially cut down decoding time by using fewer decoder layers which does lead to sub-optimal translation quality.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "One may argue that training a single vanilla model with optimal number of encoder and decoder layers is enough. However, as discussed in Section 1, it is impossible to know a priori which combination is the best for different input sentences. More importantly, a single vanilla model cannot suffice diverse cost-benefit demands and cannot guarantee the best translation for any input (see Section 3.4). Recall that we aim at a flexible model and that all the results in Table 1 have been obtained using a single tied-multi model, albeit using different number of encoder and decoder layers for decoding.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 470, |
| "end": 477, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We conducted an analysis from the perspective of training time, model size, and decoding behavior, in comparison with vanilla models. Training Time: Given that all our models were trained for the same number of iterations, we compared the training times between vanilla and tiedmulti models. As a reference, we use the vanilla model with 6 encoder and 6 decoder layers. The total training time for all the 36 vanilla models was 25.5 times 10 that of the reference model. In contrast, the training time for our tied-multi model was about 9.5 times that of the same reference model. This is because training of a tied-multi model can aggressively leverage GPU parallellism for its vast number of computations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis and Discussion", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Model Size: The number of parameters of our tied-multi model is exactly the same as the vanilla model with N encoder and M decoder layers. If we train a set of vanilla models with different numbers of encoder and decoder layers, we end up with significantly more parameters. For instance, in case of N = M = 6 in our experiment, we have 25.2 times more parameters: a total of 4,607M for the 36 vanilla models against 183M for our tied-multi model. In Section 5, we discuss the possibility of further model compression.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis and Discussion", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Decoding Behavior: To better understand the nature of our proposed method, we analyzed the distribution of oracle translations within 36 translations generated by each of the vanilla and our tied-multi models. Let (n, m) be an encoder-decoder layer combination of a given model with n encoder and m decoder layers. The oracle layer combination for an input sentence was determined by measuring the quality of the translation derived from each layer combination. We used a reference-based metric, chrF (Popovi\u0107, 2016) , since it has been particularity designed for sentence-level translation evaluation and was shown to have relatively high correlation with human judgment of translation quality at sentence level for the English-German pair (Ma et al., 2018) . In cases where multiple combinations have the highest score, we chose the fastest one following the overall trend of decoding time (Table 1) . Formally, we considered a combination (n 1 , m 1 ) is faster than another combination (n 2 , m 2 ) if the following holds.", |
| "cite_spans": [ |
| { |
| "start": 501, |
| "end": 516, |
| "text": "(Popovi\u0107, 2016)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 741, |
| "end": 758, |
| "text": "(Ma et al., 2018)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 892, |
| "end": 901, |
| "text": "(Table 1)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis and Discussion", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "(n 1 , m 1 ) < (n 2 , m 2 ) \u2261 m 1 < m 2 \u2228 (m 1 = m 2 \u2227 n 1 < n 2 ).", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Analysis and Discussion", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Figure 2 compares the distributions of oracle layer combinations for the vanilla and our tied-multi models, revealing that the shallower layer combinations in our tied-multi model often generates better translations than deeper ones unlike the vanilla models, despite the lower corpus-level BLEU scores. This sharp bias towards shallower layer combinations suggests the potential reduction of decoding time by dynamically selecting the layer combination per input sentence prior to decoding, ideally without performance drop. We address this task in Section 4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis and Discussion", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Motivated by the results shown in Figure 2 , we tackled an advanced problem: dynamic selection of one layer combination prior to decoding. 11", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 34, |
| "end": 42, |
| "text": "Figure 2", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dynamic Layer Selection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We formalize the encoder-decoder layer combination selection with a supervised learning approach where the objective is to minimize the following loss function (2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "arg min \u03b8 1 |S| s i \u2208S L(f (s i ; \u03b8), t i k ),", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where s i is the i-th input sentence (1 \u2264 i \u2264 |S|), t i k is the translation for s i derived from the k-th layer combination (1 \u2264 k \u2264 K) among K possible combinations, where K = N \u00d7 M in our case, f is the model with parameters \u03b8, and L is a loss function. Assuming that the independence of target labels (layer combinations) for a given input sentence allows for ties, the model is able to predict multiple layer combinations for the same input sentence. We implemented the model f with a multi-head self-attention neural network inspired by Vaswani et al. (2017) . The number of layers and attention heads are optimized during a hyper-parameter search, while the feed-forward layer dimensionality is fixed to 2,048. Input sequences of tokens are mapped to their corresponding embeddings, initialized by the embedding table of the tied-multi NMT model. Similarly to BERT (Devlin et al., 2019) , a specific token is prepended to input sequence before being fed to the classifier. This token is finally fed during the forward pass to the output linear layer for sentence classification. The output linear layer has K dimensions, allowing to output as many logits as the number of layer combinations in the tied-multi NMT model. Finally, a sigmoid function outputs probabilities for each layer combination among the K possible combinations.", |
| "cite_spans": [ |
| { |
| "start": 543, |
| "end": 564, |
| "text": "Vaswani et al. (2017)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 872, |
| "end": 893, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The parameters \u03b8 of the model f are learned using mini-batch stochastic gradient descent with Nesterov momentum (Sutskever et al., 2013) and the loss function L, implemented as a weighted binary cross-entropy (BCE) function (3).", |
| "cite_spans": [ |
| { |
| "start": 112, |
| "end": 136, |
| "text": "(Sutskever et al., 2013)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L BCE i k = \u2212w i k \u03b4 k y i k \u2022 log\u0177 i k + (1 \u2212 y i k ) \u2022 log(1 \u2212\u0177 i k ) ,", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where y i k is the reference class of the i-th input sentence s i ,\u0177 i k is the output of the network after the sigmoid layer given s i , and \u03b4 k = (1 \u2212 p(t k )) \u03b1 is the weight given to the k-th class based on class distribution prior. During our experiment, we have found that the classifier tends to favor recall in detriment to precision. To tackle this issue, we introduce another loss using an approximation of the macro F \u03b2 implemented following (4).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L i F \u03b2 = 1 \u2212 (1 + \u03b2 2 ) \u2022 P \u2022 R (\u03b2 2 \u2022 P ) + R ,", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "P = \u00b5/ k\u0177 i k , R = \u00b5/ k y i k , and \u00b5 = k (\u0177 i k \u2022 y i k )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": ". The final loss function is the linear interpolation of L BCE averaged over the K classes and L F \u03b2 with parameter \u03bb, both averaged over the batch:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u03bb \u00d7 L BCE + (1 \u2212 \u03bb) \u00d7 L F \u03b2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We tune \u03b1, \u03b2, and \u03bb during the classifier hyper-parameter search based on the validation loss.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The layer combination classifier was trained on a subset of the training data for NMT models (Section 3.2) containing 5.00M sentences, whereas the remaining sentences compose a development and a test sets each containing approximately 200k sentences. The two latter subsets were used for hyperparameter search and evaluation of the classifier, respectively. To allow for comparison and reproducibility, the final evaluation of the proposed approach in terms of translation quality and decoding speed were conducted on the official WMT development (newstest2017, 3,004 sentences) and test (newstest2018, 2,998 sentences) sets; the latter is the one also used in Section 3.2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The training, development, and test sets were translated by each layer combination of the tiedmulti NMT model. Each source sentence was thus aligned with 36 translations whose quality were measured by the chrF metric. Because several combinations can lead to the best score, the obtained dataset was labeled with multiple classes (36 layer combinations) and multiple labels (ties with regard to the metric). During inference, the ties were broken by selecting the layer combination with the highest value given by the sigmoid function, or backing-off to the deepest layer combination (6, 6) if no output value reaches 0.5. This tie breaking method differs from the oracle layer selection presented in Equation (1) and in Figure 2 which prioritizes shallowest layer combinations. In this experiment, decoding time was measured by processing one sentence at a time instead of batch decoding, the former being slower compared to the latter, but leads to precise results. The decoding times were 954s and 2,773s when using (1,1) and (6,6) layer combinations, respectively. By selecting the fastest encoder-decoder layer combinations", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 721, |
| "end": 729, |
| "text": "Figure 2", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Fine-tuning Time (s) BLEU Baseline (tied (6,6)) 2,773 35.0 Oracle (tied) 1,812 42.1 (#1) 8 layers, 8 heads 2,736 35.0 (#2) 2 layers, 4 heads 2,686 34.8 (#3) 2 layers, 4 heads 2,645 34.7 (#4) 4 layers, 2 heads 2,563 34.3 Table 2 : Dynamic layer combination selection results in decoding time (in seconds, batch size of 1) and BLEU, including the baseline and oracle for the WMT newstest2018 using the tied-multi model architecture.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 220, |
| "end": 227, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Classifier", |
| "sec_num": null |
| }, |
| { |
| "text": "according to an oracle, the decoding times went down to 1,918s and 1,812s for the individual and tied-multi models, respectively. However, our objective is to be faster than default setting, that is, where one would choose (6,6) combination. Several classifiers were trained and evaluated on the WMT test set, with or without fine-tuning on the WMT development set. Table 2 presents the results in terms of corpus-level BLEU and decoding speed. 12 Some classifiers maintain the translation quality (middle rows), whereas others show quality degradation but further gain in decoding speed (bottom rows). The classification results show that gains in decoding speed are possible with an apriori decision for which encoder-decoder combination to select, based on the information contained in the source sentence only. However, no BLEU gain has so far been observed, demonstrating a trade-off between decoding speed and translation quality. Our best configuration for decoding speed (#4) reduced 210s but leads to a 0.7 point BLEU degradation. On the other hand, when preserving the translation quality compared to the baseline configuration (#1) we saved only 37s. The oracle layer combination can achieve substantial gains both in terms of BLEU (7.1 points) and decoding speed (961s). These oracle results motivate possible future work in layer combination prediction for the tied-multi NMT model.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 366, |
| "end": 373, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Classifier", |
| "sec_num": null |
| }, |
| { |
| "text": "We examined the combination of our multi-layer softmaxing approach with another parameter-tying method in neural networks, called recurrent stacking (RS) (Dabre and Fujita, 2019), complemented by sequence-level knowledge distillation (Kim and Rush, 2016) , a specific type of knowledge distillation (Hinton et al., 2015) . We demonstrate that these existing techniques help reduce the number of parameters in our model even further.", |
| "cite_spans": [ |
| { |
| "start": 234, |
| "end": 254, |
| "text": "(Kim and Rush, 2016)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 299, |
| "end": 320, |
| "text": "(Hinton et al., 2015)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Further Model Compression", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In Section 2, we have discussed several model compression methods orthogonal to multi-layer softmaxing. Having already compressed N \u00d7 M models with our approach, we consider further compressing it using RS. However, models that use RS layers tend to suffer from performance drops due to the large reduction in the number of parameters. As a way of compensating the performance drop, we apply sequence-level knowledge distillation. First, we decode all source sentences in the training data to generate a pseudo-parallel corpus containing distillation target sentences, i.e., soft-targets for the child model which makes learning easier and hence is able to mimic the behavior of the parent model. Then, an RS child model is trained with multi-layer softmaxing on the generated pseudoparallel corpus. Among a variety of distillation techniques, we chose the simplest one to show the impact that distillation can have in our setting, leaving an extensive exploration of more complex methods for the future.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Distillation into a Recurrently Stacked Model", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We conducted an experiment to show that RS and sequence distillation can lead to an extremely compressed tied-multi model which no longer suffers from performance drops. We compared the following four variations of our tied-multi model trained with multi-layer softmaxing. Tied-multi model: A model that does not share the parameters across layers, trained on the original parallel corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Distilled tied-multi model: The same model as above but trained on the pseudo-parallel corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Tied-multi RS model: A tied-multi model that uses RS layers, trained on the original parallel corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Distilled tied-multi RS model: The same model as above but trained on the pseudo-parallel corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "First, we trained 5 vanilla models with 6 encoder and 6 decoder layers, because the performance of Table 3 : BLEU scores of the tied-multi models with (left block) and without (center and right blocks) RS layers, each trained with (bottom block) and without (top block) sequence distillation. n and m respectively denote the number of layers in the encoder and the decoder. The top-left block is identical to the middle block in Table 1. distilled models is affected by the quality of parent models, and NMT models vary vastly in performance (around 2.0 BLEU points) depending on parameter initialization. We then decode the source side (English side) of the entire training data (5.58M sentences) with the one 13 with the highest BLEU score on the newstest2017 (used in Section 4.2) in order to generate pseudo-parallel corpus for sequence distillation. Table 3 gives the BLEU scores for all models. Comparing top-left and top-right blocks of the table, we can see that the BLEU scores for RS models are higher than their non-RS counterparts when using fewer than 3 decoder layers. This shows the benefit of RS layers despite the large parameter reduction. However, the reduction in parameters negatively affects (up to 1.3 BLEU points) when decoding with more decoder layers, confirming the limitation of RS as expected.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 99, |
| "end": 106, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 429, |
| "end": 437, |
| "text": "Table 1.", |
| "ref_id": null |
| }, |
| { |
| "start": 855, |
| "end": 862, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Comparing the scores of the top and bottom halves of the table, we can see that distillation dramatically boosts the performance of the shallower encoder and decoder layers. For instance, without distillation, the tied-multi model gave a BLEU of 23.2 when decoding with 1 encoder and 1 decoder layers, but the same layer combination reaches 30.1 BLEU through distillation. Given that RS further improves performance using lower layers, the BLEU score increases to 31.2. As such, distillation enables decoding using fewer layers without substantial drops in performance. Furthermore, the BLEU scores did not vary significantly when the layers deeper than 3 were used, meaning that we might as well train shallower models using distil- 13 Ensemble of multiple models (Freitag et al., 2017) is commonly used for distillation, but we used a single model to save decoding time. lation. The performance of our final model, i.e., the distilled tied-multi RS model (bottom-right), was significantly lower than the non-RS model (up to 1.5 BLEU points) similarly to its non-distilled counterpart. However, given that it outperforms our original tied-multi model (top-left) in all the encoder-decoder layer combinations, we conclude that we can obtain a highly compact model with better performance. We now analyze the effect of RS and knowledge distillation on model size and decoding speed.", |
| "cite_spans": [ |
| { |
| "start": 734, |
| "end": 736, |
| "text": "13", |
| "ref_id": null |
| }, |
| { |
| "start": 765, |
| "end": 787, |
| "text": "(Freitag et al., 2017)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Model Size: Table 4 gives the sizes of several models and their ratio with respect to the tied-multi model. Training vanilla and RS models with 36 different encoder-decoder layer combinations required 25.2 and 14.3 times the number of parameters of a single tied-multi model, respectively. Although RS led to some parameter reduction, combining RS with our tied-multi model resulted in a further compressed single model. This model has 63.2 times and 36.0 times fewer parameters than all the individual vanilla and RS models, respectively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 19, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Decoding Speed: Table 5 : BLEU scores and decoding times of our distilled tied-multi RS model by beam and greedy search. The top-left block is identical to the bottom-right block in Table 3 . The top-right block is identical to the right-most block in Table 1. decoding is faster than beam decoding, but suffers from reduced performance. By using our distilled model, however, greedy decoding reduced the BLEU scores only by 0.5 points compared to beam decoding. For instance, whereas beam decoding with our tied-multi model without RS and distillation (top-left block in Table 3 ) achieved the highest BLEU score of 35.1 with 5 encoder and 6 decoder layers consuming 268.8s, greedy decoding with our distilled tied-multi RS model with 2 encoder and 2 decoder layers resulted in a comparable BLEU score of 35.0 in 68.0s, i.e., with a factor of 4.0 in decoding time thanks to RS and distillation. This happens because we have used translations generated by beam decoding as target sentences for knowledge distillation, which has the ability to loosely distill beam search behavior into greedy decoding behavior (Kim and Rush, 2016) .", |
| "cite_spans": [ |
| { |
| "start": 1110, |
| "end": 1130, |
| "text": "(Kim and Rush, 2016)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 16, |
| "end": 23, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 182, |
| "end": 189, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 252, |
| "end": 260, |
| "text": "Table 1.", |
| "ref_id": null |
| }, |
| { |
| "start": 572, |
| "end": 579, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In this paper, we have proposed a novel procedure for training encoder-decoder models, where the softmax function is applied to the output of each of the M decoder layers derived using the output of each of the N encoder layers. This compresses N \u00d7M models into a single model that can be used for decoding with a variable number of encoder (1 \u2264 n \u2264 N ) and decoder (1 \u2264 m \u2264 M ) layers. This model can be used in different latency scenarios and hence is highly versatile. We have made a cost-benefit analysis of our method, taking NMT as a case study of encoder-decoder models. We have proposed and evaluated two orthogonal extensions and show that we can (a) dynamically choose layer combinations for slightly faster decoding and (b) further compress models using recurrent stack-ing with knowledge distillation leading to models that also enable faster decoding. For further speed up in decoding as well as model compression, we plan to combine our approach with other techniques, such as those mentioned in Section 2. Although we have only tested our idea for NMT, it should be applicable to other tasks based on deep neural networks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Rather than casting the encoder-decoder model into a single column model with (N + M ) layers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We averaged multiple losses in our experiment, but there are a number of options, such as weighted averaging.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.statmt.org/wmt18/translation-task.html We excluded ParaCrawl following the instruction on the WMT18 website: \"BLEU score dropped by 1.0\" for this task.4 https://github.com/moses-smt/mosesdecoder 5 https://github.com/tensorflow/tensor2tensor", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We measured the collapsed time for a fair comparison, assuming that all vanilla models were trained on a single GPU one after another, even though one may be able to use multiple GPUs to train the 36 vanilla models in parallel.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "This is the crucial difference from two post-decoding processes: translation quality estimation(Specia et al., 2010) and n-best-list re-ranking(Kumar and Byrne, 2004).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Decoding time does not include the time spent for layer selection, which took up to 1.0 second for the entire test set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank all the reviewers for their insightful comments and suggestions. A part of this work was conducted under the program \"Research and Development of Enhanced Multilingual and Multipurpose Speech Translation System\" of the Ministry of Internal Affairs and Communications (MIC), Japan. Atsushi Fujita was partly supported by JSPS KAKENHI Grant Number 19H05660. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 3rd International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In Proceedings of the 3rd International Conference on Learning Rep- resentations, San Diego, USA.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Evaluating layers of representation in neural machine translation on part-of-speech and semantic tagging tasks", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Llu\u00eds", |
| "middle": [], |
| "last": "M\u00e0rquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Sajjad", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadir", |
| "middle": [], |
| "last": "Durrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Fahim", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Belinkov, Llu\u00eds M\u00e0rquez, Hassan Sajjad, Nadir Durrani, Fahim Dalvi, and James Glass. 2017. Evaluating layers of representation in neural ma- chine translation on part-of-speech and semantic tagging tasks. In Proceedings of the Eighth In- ternational Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1-10, Taipei, Taiwan.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Learning phrase representations using RNN encoder-decoder for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Van Merri\u00ebnboer", |
| "suffix": "" |
| }, |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Aglar G\u00fcl\u00e7ehre", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1724--1734", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyunghyun Cho, Bart van Merri\u00ebnboer, \u00c7 aglar G\u00fcl\u00e7ehre, Dzmitry Bahdanau, Fethi Bougares, Hol- ger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using RNN encoder-decoder for statistical machine translation. In Proceedings of the 2014 Conference on Empirical Methods in Nat- ural Language Processing, pages 1724-1734, Doha, Qatar.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Binarized neural networks: Training deep neural networks with weights and activations constrained to +1 or -1. CoRR", |
| "authors": [ |
| { |
| "first": "Matthieu", |
| "middle": [], |
| "last": "Courbariaux", |
| "suffix": "" |
| }, |
| { |
| "first": "Itay", |
| "middle": [], |
| "last": "Hubara", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Soudry", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthieu Courbariaux, Itay Hubara, Daniel Soudry, Ran El-Yaniv, and Yoshua Bengio. 2017. Bina- rized neural networks: Training deep neural net- works with weights and activations constrained to +1 or -1. CoRR, abs/1602.02830.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Recurrent stacking of layers for compact neural machine translation models", |
| "authors": [ |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "" |
| }, |
| { |
| "first": "Atsushi", |
| "middle": [], |
| "last": "Fujita", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "6292--6299", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raj Dabre and Atsushi Fujita. 2019. Recurrent stack- ing of layers for compact neural machine translation models. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 6292-6299, Honolulu, USA.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "BERT: Pre-training of deep bidirectional Transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional Transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, USA.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Exploiting deep representations for neural machine translation", |
| "authors": [ |
| { |
| "first": "Zi-Yi", |
| "middle": [], |
| "last": "Dou", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhaopeng", |
| "middle": [], |
| "last": "Tu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xing", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuming", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Tong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "4253--4262", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zi-Yi Dou, Zhaopeng Tu, Xing Wang, Shuming Shi, and Tong Zhang. 2018. Exploiting deep represen- tations for neural machine translation. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4253-4262, Brussels, Belgium.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Ensemble distillation for neural machine translation", |
| "authors": [ |
| { |
| "first": "Markus", |
| "middle": [], |
| "last": "Freitag", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaser", |
| "middle": [], |
| "last": "Al-Onaizan", |
| "suffix": "" |
| }, |
| { |
| "first": "Baskaran", |
| "middle": [], |
| "last": "Sankaran", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Markus Freitag, Yaser Al-Onaizan, and Baskaran Sankaran. 2017. Ensemble distillation for neural machine translation. CoRR, abs/1702.01802.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Deep learning with limited numerical precision", |
| "authors": [ |
| { |
| "first": "Suyog", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Agrawal", |
| "suffix": "" |
| }, |
| { |
| "first": "Kailash", |
| "middle": [], |
| "last": "Gopalakrishnan", |
| "suffix": "" |
| }, |
| { |
| "first": "Pritish", |
| "middle": [], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 32nd International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "1737--1746", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Suyog Gupta, Ankur Agrawal, Kailash Gopalakrish- nan, and Pritish Narayanan. 2015. Deep learning with limited numerical precision. In Proceedings of the 32nd International Conference on Machine Learning, pages 1737-1746, Lille, France.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Distilling the knowledge in a neural network", |
| "authors": [ |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Geoffrey Hinton, Oriol Vinyals, and Jeffrey Dean. 2015. Distilling the knowledge in a neural network. CoRR, abs/1503.02531.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Sequencelevel knowledge distillation", |
| "authors": [ |
| { |
| "first": "Yoon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "M" |
| ], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1317--1327", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoon Kim and Alexander M. Rush. 2016. Sequence- level knowledge distillation. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing, pages 1317-1327, Austin, USA.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Minimum Bayes-risk decoding for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Shankar", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Byrne", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics: HLT-NAACL 2004", |
| "volume": "", |
| "issue": "", |
| "pages": "169--176", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shankar Kumar and William Byrne. 2004. Minimum Bayes-risk decoding for statistical machine transla- tion. In Proceedings of the Human Language Tech- nology Conference of the North American Chapter of the Association for Computational Linguistics: HLT-NAACL 2004, pages 169-176, Boston, USA.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Fixed point quantization of deep convolutional networks", |
| "authors": [ |
| { |
| "first": "Darryl", |
| "middle": [ |
| "D" |
| ], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Sachin", |
| "middle": [ |
| "S" |
| ], |
| "last": "Talathi", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Sreekanth", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 33rd International Conference on International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "2849--2858", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Darryl D. Lin, Sachin S. Talathi, and V. Sreekanth An- napureddy. 2016. Fixed point quantization of deep convolutional networks. In Proceedings of the 33rd International Conference on International Confer- ence on Machine Learning, pages 2849-2858, New York, USA.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Results of the WMT18 metrics shared task", |
| "authors": [ |
| { |
| "first": "Qingsong", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Yvette", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "682--701", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qingsong Ma, Ond\u0159ej Bojar, and Yvette Graham. 2018. Results of the WMT18 metrics shared task. In Pro- ceedings of the Third Conference on Machine Trans- lation, Volume 2: Shared Task Papers, pages 682- 701, Brussels, Belgium.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Scaling neural machine translation", |
| "authors": [ |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Myle Ott, Sergey Edunov, David Grangier, and Michael Auli. 2018. Scaling neural machine trans- lation. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 1-9, Brussels, Belgium.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "BLEU: A method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1073083.1073135" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. BLEU: A method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting on Association for Com- putational Linguistics, pages 311-318, Philadelphia, USA.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "chrF deconstructed: \u03b2 parameters and n-gram weights", |
| "authors": [ |
| { |
| "first": "Maja", |
| "middle": [], |
| "last": "Popovi\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the First Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "499--504", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maja Popovi\u0107. 2016. chrF deconstructed: \u03b2 param- eters and n-gram weights. In Proceedings of the First Conference on Machine Translation: Volume 2, Shared Task Papers, pages 499-504, Berlin, Ger- many.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "A call for clarity in reporting BLEU scores", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "186--191", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186- 191, Brussels, Belgium.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Compression of neural machine translation models via pruning", |
| "authors": [ |
| { |
| "first": "Abigail", |
| "middle": [], |
| "last": "See", |
| "suffix": "" |
| }, |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 20th SIGNLL Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "291--301", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abigail See, Minh-Thang Luong, and Christopher D. Manning. 2016. Compression of neural machine translation models via pruning. In Proceedings of the 20th SIGNLL Conference on Computational Nat- ural Language Learning, pages 291-301, Berlin, Germany.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Machine translation evaluation versus quality estimation. Machine Translation", |
| "authors": [ |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhwaj", |
| "middle": [], |
| "last": "Raj", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Turchi", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "24", |
| "issue": "", |
| "pages": "39--50", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lucia Specia, Dhwaj Raj, and Marco Turchi. 2010. Ma- chine translation evaluation versus quality estima- tion. Machine Translation, 24(1):39-50.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "On the importance of initialization and momentum in deep learning", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Martens", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Dahl", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "1139--1147", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, James Martens, George Dahl, and Geof- frey Hinton. 2013. On the importance of initializa- tion and momentum in deep learning. In Proceed- ings of the International Conference on Machine Learning, pages 1139-1147, Atlanta, USA.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 27th Neural Information Processing Systems Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "3104--3112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural net- works. In Proceedings of the 27th Neural Infor- mation Processing Systems Conference, pages 3104- 3112, Montr\u00e9al, Canada.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 30th Neural Information Processing Systems Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Proceedings of the 30th Neural Infor- mation Processing Systems Conference, pages 5998- 6008, Long Beach, USA.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Multi-layer representation fusion for neural machine translation", |
| "authors": [ |
| { |
| "first": "Qiang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Fuxue", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Tong", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanyang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinqiao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingbo", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3015--3026", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qiang Wang, Fuxue Li, Tong Xiao, Yanyang Li, Yin- qiao Li, and Jingbo Zhu. 2018. Multi-layer repre- sentation fusion for neural machine translation. In Proceedings of the 27th International Conference on Computational Linguistics, pages 3015-3026, Santa Fe, USA.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Tied Transformers: Neural machine translation with shared encoder and decoder", |
| "authors": [ |
| { |
| "first": "Yingce", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianyu", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Tian", |
| "suffix": "" |
| }, |
| { |
| "first": "Di", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "5466--5473", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yingce Xia, Tianyu He, Xu Tan, Fei Tian, Di He, and Tao Qin. 2019. Tied Transformers: Neural machine translation with shared encoder and decoder. In Pro- ceedings of the AAAI Conference on Artificial Intel- ligence, pages 5466-5473, Honolulu, USA.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Accelerating neural Transformer via an average attention network", |
| "authors": [ |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Biao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinsong", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, Long Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "1789--1798", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deyi Xiong, Biao Zhang, and Jinsong Su. 2018. Accel- erating neural Transformer via an average attention network. In Proceedings of the 56th Annual Meet- ing of the Association for Computational Linguistics, Long Papers, pages 1789-1798, Melbourne, Aus- tralia.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Collapsing tied layers into one.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "text": "The general concept of multi-layer softmaxing for training multi-layer neural models with an example of a 4-layer model.Figure 1ais a depiction of our idea in the form of multiple vanilla models whose layers are tied together.Figure 1bshows the result of collapsing all tied layers into a single layer. The red lines indicate the flow of gradients and hence the shallowest layer in the stack receives the largest number of updates.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF3": { |
| "text": "Distribution of oracle translations determined by chrF scores between reference and each of the hypotheses derived from the 36 combinations of encoder and decoder layers (newstest2018, 2,998 sentences).", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF0": { |
| "num": null, |
| "content": "<table/>", |
| "text": "3 30.3 31.9 32.2 32.4 32.9 23.2 28.6 30.5 30.8 31.2 31.5 94.7 101.9 143.4 174.7 215.5 244.5 2 28.6 32.5 33.1 33.3 33.5 33.2 26.5 31.5 33.0 33.6 33.8 34.0 100.5 110.8 153.7 185.6 227.8 253.6 3 29.2 32.6 33.6 34.4 34.3 34.1 27.8 32.5 33.9 34.6 34.7 34.7 102.5 114.2 168.5 194.8 234.0 259.8 4 29.8 33.6 34.3 34.7 34.4 34.5 28.3 33.0 34.3 34.8 34.9 34.9 104.1 105.6 143.9 197.0 219.1 264.6 5 30.7 33.9 34.6 35.5 34.4 35.0 28.6 33.1 34.5 34.8 35.0 35.1 105.1 111.5 156.4 186.0 236.1 268.8 6 30.8 34.0 34.4 35.7 35.0 35.0 28.7 33.1 34.6 34.7 34.9 35.0 107.4 113.6 168.1 190.1 229.5 257.9", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "num": null, |
| "content": "<table/>", |
| "text": "Total model sizes for covering all 36 encoderdecoder layer combinations. The relative size is calculated regarding the tied-multi model as a standard. Similarly to \"36 vanilla models,\" \"36 RS models\" represents the total number of parameters of all models.", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "num": null, |
| "content": "<table><tr><td>compares results ob-</td></tr></table>", |
| "text": "33.5 34.1 34.2 34.3 34.3 94.7 101.9 143.4 174.7 215.5 244.5 2 33.7 35.5 35.7 35.7 35.8 35.8 100.5 110.8 153.7 185.6 227.8 253.6 3 34.1 35.8 36.1 36.1 36.2 36.2 102.5 114.2 168.5 194.8 234.0 259.8 4 34.3 36.0 36.2 36.2 36.3 36.3 104.1 105.6 143.9 197.0 219.1 264.6 5 34.5 36.1 36.2 36.3 36.3 36.3 105.1 111.5 156.4 186.0 236.1 268.8 6 34.6 36.1 36.2 36.2 36.3 36.2 107.4 113.6 168.1 190.1 229.5 257.9", |
| "html": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |