| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T04:34:00.222518Z" |
| }, |
| "title": "NICT's Neural Machine Translation Systems for the WAT21 Restricted Translation Task", |
| "authors": [ |
| { |
| "first": "Zuchao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Shanghai Jiao Tong University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Masao", |
| "middle": [], |
| "last": "Utiyama", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Institute of Information and Communications Technology (NICT)", |
| "location": { |
| "settlement": "Kyoto", |
| "country": "Japan" |
| } |
| }, |
| "email": "mutiyama@eiichiro.sumita" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Institute of Information and Communications Technology (NICT)", |
| "location": { |
| "settlement": "Kyoto", |
| "country": "Japan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Shanghai Jiao Tong University", |
| "location": {} |
| }, |
| "email": "zhaohai@cs.sjtu.edu.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes our system (Team ID: nictrb) for participating in the WAT'21 restricted machine translation task. In our submitted system, we designed a new training approach for restricted machine translation. By sampling from the translation target, we can solve the problem that ordinary training data does not have a restricted vocabulary. With the further help of constrained decoding in the inference phase, we achieved better results than the baseline, confirming the effectiveness of our solution. In addition, we also tried the vanilla and sparse Transformer as the backbone network of the model, as well as model ensembling, which further improved the final translation performance.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes our system (Team ID: nictrb) for participating in the WAT'21 restricted machine translation task. In our submitted system, we designed a new training approach for restricted machine translation. By sampling from the translation target, we can solve the problem that ordinary training data does not have a restricted vocabulary. With the further help of constrained decoding in the inference phase, we achieved better results than the baseline, confirming the effectiveness of our solution. In addition, we also tried the vanilla and sparse Transformer as the backbone network of the model, as well as model ensembling, which further improved the final translation performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The performance of machine translation has been greatly improved since it entered the era of Neural Machine Translation (NMT) (Bahdanau et al., 2015; Sutskever et al., 2014; Wu et al., 2016) . Different from traditional statistical machine translation (SMT) (Koehn et al., 2003) , NMT models are trained end-to-end with contextualized representations to alleviate the locality problem and dense representations to mitigate the sparsity issue. The incorporation of novel structures such as CNN (Gehring et al., 2017) and Transformer (Vaswani et al., 2017) into NMT has brought the performance one step closer to practical translation.", |
| "cite_spans": [ |
| { |
| "start": 126, |
| "end": 149, |
| "text": "(Bahdanau et al., 2015;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 150, |
| "end": 173, |
| "text": "Sutskever et al., 2014;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 174, |
| "end": 190, |
| "text": "Wu et al., 2016)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 258, |
| "end": 278, |
| "text": "(Koehn et al., 2003)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 493, |
| "end": 515, |
| "text": "(Gehring et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 532, |
| "end": 554, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Though NMT can more effectively exploit large parallel corpora, the performance is still insufficient to meet the requirements in some special translation scenarios. The end-to-end NMT models remove many approaches in the SMT paradigm for manually guiding the translation process. One attractiveness of the SMT method is that it provides explicit control over translation output, which is effective in a variety of translation settings, including interactive machine translation (Peris et al., 2017) and domain adaptation (Chu and Wang, 2018) , which is also crucial for the practical application of NMT.", |
| "cite_spans": [ |
| { |
| "start": 479, |
| "end": 499, |
| "text": "(Peris et al., 2017)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 522, |
| "end": 542, |
| "text": "(Chu and Wang, 2018)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Since there is still a need for manual interventions for the new NMT paradigm, much effort is spent in studying how to incorporate this explicit control into the end-to-end neural translation (Arthur et al., 2016) . Among these efforts, Constrained Decoding (CD) has gained a lot of attention in this research field, which is a modification to commonly adopted beam search in ordinary NMT models. Hokamp and Liu (2017) proposed grid beam search, which expands beam search to include pre-specified lexical constraints. Anderson et al. (2017) used constrained beam search to force the inclusion of restricted words in the output, and employed fixed pre-trained word embeddings to facilitate vocabulary expansion to unseen words in training.", |
| "cite_spans": [ |
| { |
| "start": 192, |
| "end": 213, |
| "text": "(Arthur et al., 2016)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 397, |
| "end": 418, |
| "text": "Hokamp and Liu (2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 518, |
| "end": 540, |
| "text": "Anderson et al. (2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "While these works accomplish the goal of explicit translation control, the time complexity of their decoding algorithm and resultant decoding speed falls short of the expectations. The complexity of grid beam search and constrained beam search is linear and exponential to the number of constraints, respectively. These algorithms are thus too inefficient to be practical for large-scale use. To alleviate the shortcomings in constrained decoding, Post and Vilar (2018) proposed a new constrained decoding algorithm with a claimed complexity of O(1) in the number of constraints -dynamic beam allocation which allocates the slots in a fixed-size beam. However, their approach still processes sentence constraints sequentially rather than batch processing, limiting the GPU's parallel processing capabilities. Based on Post and Vilar (2018) , a vectorized dynamic beam allocation approach was proposed in Hu et al. (2019) , which which vector-izes the dynamic beam allocation for batching and thus leading to improvement in throughput with parallelization. Based on Post and Vilar (2018) , Hu et al. (2019) proposed a vectorized dynamic beam allocation approach, which vectorizes the dynamic beam allocation for batching, resulting in increased throughput with parallelization.", |
| "cite_spans": [ |
| { |
| "start": 448, |
| "end": 469, |
| "text": "Post and Vilar (2018)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 818, |
| "end": 839, |
| "text": "Post and Vilar (2018)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 904, |
| "end": 920, |
| "text": "Hu et al. (2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1065, |
| "end": 1086, |
| "text": "Post and Vilar (2018)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 1089, |
| "end": 1105, |
| "text": "Hu et al. (2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Constrained decoding is a very general method for incorporating additional translation knowledge into the output without modifying the model parameters or training data. However, the model's prediction distribution can be skewed during the decoding process with hard constraints, resulting in poor translation results. When the model is exposed to the restricted translation paradigm during training, the gap between training and inference can be reduced, potentially improving performance. Therefore, in this paper, we propose a training method of Sampled Constraints as Concentration (SCC). In this method, training data is the same as the ordinary NMT; only minor modifications on the loss calculation are required to adapt the model to restricted translation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In our submission to WAT'21 (Nakazawa et al., 2021) restricted translation task, we chose Transformer (Vaswani et al., 2017) as our baseline because of its high performance and scalability. Although there are some variants, our previous experiments have shown there are not too many approaches that can be both concise and effective. At the same time, though multi-head self-attention in Transformer can model extremely long dependencies, deep layer attention tends to overconcentrate on a single token, resulting in inadequate use of local information and difficulty representing long sequences. To address this disadvantage, we employ the PRIME Transformer with a multi-scale sparse attention mechanism as a second baseline. The models in the two architectures are ensembled to improve the overall results. Our final system uses a combination of the SCC training method and the constrained decoding of Hu et al. (2019) , which makes our system leverages soft constrained (inside the model) and benefit from hard restrictions (external decoding).", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 51, |
| "text": "(Nakazawa et al., 2021)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 102, |
| "end": 124, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 904, |
| "end": 920, |
| "text": "Hu et al. (2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section, we describe the methods used in our system in detail. Our system is made up of four components: the Transformer model, the Sparse Transformer model, the SCC training ap-proach, and the constrained decoding algorithm. In translation, given the source input sequence X = {w 1 , w 2 , ..., w m }, its target translation is Y = {y 1 , y 2 , ..., y n }, the parameter of the NMT model is \u03b8, then the probability form of the translation process can be written as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our System", |
| "sec_num": "2" |
| }, |
| { |
| "text": "P (Y |X, \u03b8) = n i=1 P (y i |y <i , X, \u03b8),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our System", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where y <i denotes the tokens generated before time step i.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our System", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Transformer model (Li et al., 2021 ) is a encoderdecoder architecture entirely built on multi-head self-attention which is responsible for learning representations of global context. With an input representation H, a multi-head self-attention (MHA) layer first projects H into three representations, key K, query Q, and value V . Then, it uses a self-attention mechanism to get the output representation:", |
| "cite_spans": [ |
| { |
| "start": 18, |
| "end": 34, |
| "text": "(Li et al., 2021", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transformer Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "head k = Attn(H) = \u03c3(QW Q , KW K , V W V )W O MHA(H) = Concat(head1, \u2022 \u2022 \u2022 , headK)W O , where Q = Linear Q (H), K = Linear K (H), V = Linear V (H), W O , W Q , W K", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transformer Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": ", and W V are projection parameters. The self-attention operation \u03c3 is the dot-production between key, query, and value pairs:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transformer Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u03c3(Q1, K1, V1) = Softmax( Q1K T 1 \u221a d k )V1,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transformer Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transformer Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "d k = d model /K is the dimension of each head.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transformer Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The encoder of the Transformer model consists of a stack of multiple layers with MHA structure (Self-MHA enc ) where the residual mechanism and layer normalization are used to connect two adjacent layers. Similar to the encoder, each decoder layer decoder is composed of two MHA structures: Self-MHA dec and Cross-MHA, since it not only encodes the input sequence but also incorporates the source representation. Then the processing flow of the model can be written as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transformer Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Henc = Self-MHAenc(X), H dec = Self-MHA dec (IncMask([BOS, y1, \u2022 \u2022 \u2022 , yn\u22121])), P (Y |X) = Softmax(Linear(Cross-MHA(H dec , Henc)))),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transformer Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where IncMask(\u2022) represents the incremental masking strategy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transformer Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "According to the evaluation in recent research (Tang et al., 2018) , it has shown that the vanilla Transformer has surprising shortcomings in long sequence encoding even the Transformer is designed to modeling long dependencies. Vanilla Transformer works well for short sequence translation, but performance drops as the source sentence length increases because only a small number of tokens are represented by self-attention, resulting in difficulty for translation. Replacing the dense self-attention mechanism with a sparse attention mechanism will alleviate the difficulties in long sentence translation; we chose the PRIME Transformer as our another base model. Compared to vanilla Transformer, PRIME Transformer generates the output representation of layer i in a fusion way:", |
| "cite_spans": [ |
| { |
| "start": 47, |
| "end": 66, |
| "text": "(Tang et al., 2018)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sparse Transformer Model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "H i = H i\u22121 + MHA(H i\u22121 ) + Conv(H i\u22121 ) + Pointwise(H i\u22121 ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sparse Transformer Model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where H i\u22121 is the output of layer i \u2212 1. Conv(\u2022) refers to dynamic convolution with multiple kernel sizes, which is employed to capture local context:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sparse Transformer Model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Conv k (H) = DepthConv k (HW V )W out DepthConv k (H) = k j=1 Softmax( d c=1 W Q j,c Hi,c) \u2022 H i+j\u2212 k+1 2 ,c , Conv(H) = K i=1 exp (\u03b1i) n j=1 exp (\u03b1j) Conv k i (X)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sparse Transformer Model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "in which DepthConv(\u2022) is the depth convolution structure proposed in Wu et al. (2019) . And Pointwise(\u2022) refers to a position-wise feed-forward network:", |
| "cite_spans": [ |
| { |
| "start": 69, |
| "end": 85, |
| "text": "Wu et al. (2019)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sparse Transformer Model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Pointwise(H) = max(0, HW1 + b1)W2 + b2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sparse Transformer Model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where W 1 , b 1 , W 2 , and b 2 are learnable parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sparse Transformer Model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The predicted probability in ordinary NMT is y i \u223c P (y i |X, \u03b8). Because of the inclusion of the constrained word sequence C in restricted translation, the probability distribution becomes y i \u223c P (y i |X, C, \u03b8). To adapt the restricted translation for the NMT model rather than just influencing the search process, we expose the constrained word sequence C as additional context like source input.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sampled Constraints as Concentration Training", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Since the parallel training data only contains the source and target language sequences, we obtain the constrained word sequence for training via random dynamic sampling from the reference target translation. This not only alleviates the burden of constrained word annotation but also has the potential to minimize overfitting.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sampled Constraints as Concentration Training", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Specifically, in the model, we use the Self-MHA dec to encode the input constrained sequence to obtain its representation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sampled Constraints as Concentration Training", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Hcst = Self-MHA dec (C).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sampled Constraints as Concentration Training", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "It is worth noting that we remove the positional encoding of constrained sequence since the order of restricted word sequence is usually inconsistent with the target translation; additionally, we also remove the incremental mask because the whole sequence is exposed to the decoder as an additional context at the same time. The probabilistic form of restricted translation accordingly changes to:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sampled Constraints as Concentration Training", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "P (Y |X) = Softmax(Linear(Cross-MHA(H dec , Henc)+ Cross-MHA(H dec , Hcst)))).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sampled Constraints as Concentration Training", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Because sampled constrained words are exposed to the decoder, to enforce the inclusion of these words in the translation, we place additional penalties on the loss of these sampled positions to achieve the goal of restrict translation with soft constraints on the model:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sampled Constraints as Concentration Training", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "LSCC = \u2212 m i=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sampled Constraints as Concentration Training", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "(1 + \u03b31(yi \u2208 C)) logP (yi|X; C; y<i; \u03b8) ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sampled Constraints as Concentration Training", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "where 1(\u2022) is the indicator function and \u03b3 is the penalty factor.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sampled Constraints as Concentration Training", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Beam search (Koehn, 2010 ) is a common approximate search algorithm for sequence generation task. Lexically constrained decoding is a modification to the beam search algorithm, which is proposed to enforce hard constraints that force a given constrained sequence to appear in the generated sequence. Specifically, beam search maintains a beam B t on time step t, which contains only the b most likely partial sequences, where b is known as the beam size. The beam B t is updated by retaining the b most likely sequences in the candidate set E t generated by considering all possible next word predictions: where\u0176 t\u22121 is the generated sequence in time step t \u2212 1 and V is the target vocabulary.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 24, |
| "text": "(Koehn, 2010", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexically Constrained Decoding", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "In lexically constrained decoding, a finite-state machine (FSM) is used to impose the constraints. For each state s \u2208 S in the FSM, a corresponding search beam B s is maintained similar to the beam search:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexically Constrained Decoding", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "E s t = s \u2208S (\u0176t\u22121, w) |\u0176t\u22121 \u2208 B s t\u22121 , w \u2208 V, \u03b4(s , w) = s ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexically Constrained Decoding", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "where \u03b4 : S \u00d7 V \u2192 S is the FSM state-transition function that maps states and predicted words to states.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexically Constrained Decoding", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Our implementation of the Transformer models and lexically constrained decoding algorithm are based on the Fairseq toolkit 1 . We follow the settings and pre-processing methods in our previous models and systems He et al., 2019; Li et al., 2020b,d,c; . We use Transformer-big as our basic model, which has 6 layers in both the encoder and decoder, respectively. For each layer, it consists of a multi-head attention sublayer with 16 heads and a feed-forward sublayer with an inner dimension 4096. The word embedding dimensions and the hidden state dimensions are set to 1024 for both the encoder and decoder. In the training phase, the dropout rate is set to 0.1. Our model training consists of two phases. In the first NMT pre-training phase, the ParaCrawl-v5.1 (Espl\u00e0 et al., 2019) and Wiki Titles v2 datasets are used. Then we finetune the model using the ASPEC training data in the second domain finetune phase. Table 2 shows the data statistics for each dataset. In both phases, cross-entropy with label smoothing of 0.1 and D2GPo (Li et al., 2020a) are employed as the training loss criterions. We use Adam (Kingma and Ba, 2015) as our optimizer, with parameters settings \u03b2 1 = 0.9, \u03b2 2 = 0.98 and = 10 \u22128 . The initial learning rate is set to 10 \u22124 for NMT pre-training and 10 \u22125 for domain finetuning. The models are trained on 8 GPUs for about 500,000 steps. In our systems, we follow standard practice and learn a subword (Sennrich et al., 2016) encoding with 40K joint merge operations. Table 1 shows the official results evaluated on ASPEC En\u2192Ja test set. Comparing the results of the vanilla Transformer-big model and Transformer-big+SCC+CD, restricted translation under +SCC+CD has brought a very large performance improvement, which illustrates the performance advantage of restricted translation. Similar to ordinary NMT, sparse Transformer achieves better results than Transformer-big in restricted translation, which demonstrates that Sparse Transformer is a general model structure. A further increase in performance is achieved after ensembling on these two models. This benefits from the models of the distinct architectures of the two models. In general, the improvement brought about by the same architecture is less. We show the results of ASPEC En\u2192Ja test set in Table 3 . By comparison, the conclusion is essentially consistent with Table 2 .", |
| "cite_spans": [ |
| { |
| "start": 212, |
| "end": 228, |
| "text": "He et al., 2019;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 229, |
| "end": 250, |
| "text": "Li et al., 2020b,d,c;", |
| "ref_id": null |
| }, |
| { |
| "start": 763, |
| "end": 783, |
| "text": "(Espl\u00e0 et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1036, |
| "end": 1054, |
| "text": "(Li et al., 2020a)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1432, |
| "end": 1455, |
| "text": "(Sennrich et al., 2016)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 916, |
| "end": 923, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 1498, |
| "end": 1505, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 2288, |
| "end": 2295, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 2359, |
| "end": 2366, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "System Details", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "In this paper, we present our NMT systems for WAT21 restricted translation shared tasks in English \u2194 English. By integrating the following techniques: Sparse Transformer, Sampled Constraints as Concentration, and Lexically Constrained Decoding, our final system achieves substantial improvement over baseline systems which show the effectiveness of our approaches.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Et = (\u0176t\u22121, w) |\u0176t\u22121 \u2208 Bt\u22121, w \u2208 V ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Guided open vocabulary image captioning with constrained beam search", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Anderson", |
| "suffix": "" |
| }, |
| { |
| "first": "Basura", |
| "middle": [], |
| "last": "Fernando", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Gould", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "936--945", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1098" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Anderson, Basura Fernando, Mark Johnson, and Stephen Gould. 2017. Guided open vocabulary im- age captioning with constrained beam search. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 936-945, Copenhagen, Denmark. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Incorporating discrete translation lexicons into neural machine translation", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Arthur", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Satoshi", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1557--1567", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip Arthur, Graham Neubig, and Satoshi Nakamura. 2016. Incorporating discrete translation lexicons into neural machine translation. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing, pages 1557-1567, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "3rd International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In 3rd Inter- national Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A survey of domain adaptation for neural machine translation", |
| "authors": [ |
| { |
| "first": "Chenhui", |
| "middle": [], |
| "last": "Chu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1304--1319", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chenhui Chu and Rui Wang. 2018. A survey of do- main adaptation for neural machine translation. In Proceedings of the 27th International Conference on Computational Linguistics, pages 1304-1319, Santa Fe, New Mexico, USA. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "ParaCrawl: Web-scale parallel corpora for the languages of the EU", |
| "authors": [ |
| { |
| "first": "Miquel", |
| "middle": [], |
| "last": "Espl\u00e0", |
| "suffix": "" |
| }, |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Forcada", |
| "suffix": "" |
| }, |
| { |
| "first": "Gema", |
| "middle": [], |
| "last": "Ram\u00edrez-S\u00e1nchez", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of Machine Translation Summit XVII", |
| "volume": "2", |
| "issue": "", |
| "pages": "118--119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Miquel Espl\u00e0, Mikel Forcada, Gema Ram\u00edrez-S\u00e1nchez, and Hieu Hoang. 2019. ParaCrawl: Web-scale paral- lel corpora for the languages of the EU. In Proceed- ings of Machine Translation Summit XVII Volume 2: Translator, Project and User Tracks, pages 118-119, Dublin, Ireland. European Association for Machine Translation.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Convolutional sequence to sequence learning", |
| "authors": [ |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Gehring", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Denis", |
| "middle": [], |
| "last": "Yarats", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [ |
| "N" |
| ], |
| "last": "Dauphin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 34th International Conference on Machine Learning", |
| "volume": "70", |
| "issue": "", |
| "pages": "1243--1252", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonas Gehring, Michael Auli, David Grangier, De- nis Yarats, and Yann N. Dauphin. 2017. Convolu- tional sequence to sequence learning. In Proceed- ings of the 34th International Conference on Ma- chine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Ma- chine Learning Research, pages 1243-1252. PMLR.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Syntaxaware multilingual semantic role labeling", |
| "authors": [ |
| { |
| "first": "Shexia", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Zuchao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5350--5359", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1538" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shexia He, Zuchao Li, and Hai Zhao. 2019. Syntax- aware multilingual semantic role labeling. In Pro- ceedings of the 2019 Conference on Empirical Meth- ods in Natural Language Processing and the 9th In- ternational Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5350-5359, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Syntax for semantic role labeling, to be, or not to be", |
| "authors": [ |
| { |
| "first": "Shexia", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Zuchao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongxiao", |
| "middle": [], |
| "last": "Bai", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2061--2071", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1192" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shexia He, Zuchao Li, Hai Zhao, and Hongxiao Bai. 2018. Syntax for semantic role labeling, to be, or not to be. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 2061-2071, Melbourne, Australia. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Lexically constrained decoding for sequence generation using grid beam search", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Hokamp", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1535--1546", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1141" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Hokamp and Qun Liu. 2017. Lexically con- strained decoding for sequence generation using grid beam search. In Proceedings of the 55th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), pages 1535-1546, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Improved lexically constrained decoding for translation and monolingual rewriting", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "Edward" |
| ], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Huda", |
| "middle": [], |
| "last": "Khayrallah", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Culkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Tongfei", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "839--850", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1090" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Edward Hu, Huda Khayrallah, Ryan Culkin, Patrick Xia, Tongfei Chen, Matt Post, and Benjamin Van Durme. 2019. Improved lexically constrained decoding for translation and monolingual rewriting. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 839-850, Minneapolis, Minnesota. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "3rd International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd Inter- national Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn. 2010. Statistical Machine Translation. Cambridge University Press.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Statistical phrase-based translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Franz", |
| "middle": [ |
| "J" |
| ], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 2003 Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "127--133", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn, Franz J. Och, and Daniel Marcu. 2003. Statistical phrase-based translation. In Proceedings of the 2003 Human Language Technology Confer- ence of the North American Chapter of the Associa- tion for Computational Linguistics, pages 127-133.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A unified syntax-aware framework for semantic role labeling", |
| "authors": [ |
| { |
| "first": "Zuchao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Shexia", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiaxun", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhuosheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Gongshen", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Linlin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Luo", |
| "middle": [], |
| "last": "Si", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2401--2411", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1262" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zuchao Li, Shexia He, Jiaxun Cai, Zhuosheng Zhang, Hai Zhao, Gongshen Liu, Linlin Li, and Luo Si. 2018. A unified syntax-aware framework for se- mantic role labeling. In Proceedings of the 2018 Conference on Empirical Methods in Natural Lan- guage Processing, pages 2401-2411, Brussels, Bel- gium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Data-dependent gaussian prior objective for language generation", |
| "authors": [ |
| { |
| "first": "Zuchao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kehai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Masao", |
| "middle": [], |
| "last": "Utiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhuosheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "8th International Conference on Learning Representations, ICLR 2020, Addis Ababa", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zuchao Li, Rui Wang, Kehai Chen, Masao Utiyama, Eiichiro Sumita, Zhuosheng Zhang, and Hai Zhao. 2020a. Data-dependent gaussian prior objective for language generation. In 8th International Confer- ence on Learning Representations, ICLR 2020, Ad- dis Ababa, Ethiopia, April 26-30, 2020. OpenRe- view.net.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Explicit sentence compression for neural machine translation", |
| "authors": [], |
| "year": 2020, |
| "venue": "The Thirty-Second Innovative Applications of Artificial Intelligence Conference", |
| "volume": "2020", |
| "issue": "", |
| "pages": "8311--8318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "2020b. Explicit sentence compression for neural ma- chine translation. In The Thirty-Fourth AAAI Con- ference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial In- telligence, EAAI 2020, New York, NY, USA, Febru- ary 7-12, 2020, pages 8311-8318. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Text compression-aided transformer encoding", |
| "authors": [ |
| { |
| "first": "Zuchao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhuosheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kehai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Masao", |
| "middle": [], |
| "last": "Utiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zuchao Li, Zhuosheng Zhang, Hai Zhao, Rui Wang, Kehai Chen, Masao Utiyama, and Eiichiro Sumita. 2021. Text compression-aided transformer encod- ing. IEEE Transactions on Pattern Analysis and Ma- chine Intelligence.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "SJTU-NICT's supervised and unsupervised neural machine translation systems for the WMT20 news translation task", |
| "authors": [ |
| { |
| "first": "Zuchao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kehai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Masao", |
| "middle": [], |
| "last": "Utiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifth Conference on Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "218--229", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zuchao Li, Hai Zhao, Rui Wang, Kehai Chen, Masao Utiyama, and Eiichiro Sumita. 2020c. SJTU- NICT's supervised and unsupervised neural ma- chine translation systems for the WMT20 news translation task. In Proceedings of the Fifth Confer- ence on Machine Translation, pages 218-229, On- line. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Reference language based unsupervised neural machine translation", |
| "authors": [ |
| { |
| "first": "Zuchao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Masao", |
| "middle": [], |
| "last": "Utiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "4151--4162", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.findings-emnlp.371" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zuchao Li, Hai Zhao, Rui Wang, Masao Utiyama, and Eiichiro Sumita. 2020d. Reference language based unsupervised neural machine translation. In Find- ings of the Association for Computational Linguis- tics: EMNLP 2020, pages 4151-4162, Online. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "SJTU-NICT at MRP 2019: Multi-task learning for end-toend uniform semantic graph parsing", |
| "authors": [ |
| { |
| "first": "Zuchao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhuosheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Masao", |
| "middle": [], |
| "last": "Utiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Shared Task on Cross-Framework Meaning Representation Parsing at the 2019 Conference on Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "45--54", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K19-2004" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zuchao Li, Hai Zhao, Zhuosheng Zhang, Rui Wang, Masao Utiyama, and Eiichiro Sumita. 2019. SJTU- NICT at MRP 2019: Multi-task learning for end-to- end uniform semantic graph parsing. In Proceed- ings of the Shared Task on Cross-Framework Mean- ing Representation Parsing at the 2019 Conference on Natural Language Learning, pages 45-54, Hong Kong. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Overview of the 8th workshop on Asian translation", |
| "authors": [ |
| { |
| "first": "Toshiaki", |
| "middle": [], |
| "last": "Nakazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideki", |
| "middle": [], |
| "last": "Nakayama", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenchen", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "" |
| }, |
| { |
| "first": "Shohei", |
| "middle": [], |
| "last": "Higashiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideya", |
| "middle": [], |
| "last": "Mino", |
| "suffix": "" |
| }, |
| { |
| "first": "Isao", |
| "middle": [], |
| "last": "Goto", |
| "suffix": "" |
| }, |
| { |
| "first": "Win", |
| "middle": [ |
| "Pa" |
| ], |
| "last": "Pa", |
| "suffix": "" |
| }, |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Kunchukuttan", |
| "suffix": "" |
| }, |
| { |
| "first": "Shantipriya", |
| "middle": [], |
| "last": "Parida", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenhui", |
| "middle": [], |
| "last": "Chu", |
| "suffix": "" |
| }, |
| { |
| "first": "Akiko", |
| "middle": [], |
| "last": "Eriguchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaori", |
| "middle": [], |
| "last": "Abe", |
| "suffix": "" |
| }, |
| { |
| "first": "Sadao", |
| "middle": [], |
| "last": "Oda", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Kurohashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 8th Workshop on Asian Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Toshiaki Nakazawa, Hideki Nakayama, Chenchen Ding, Raj Dabre, Shohei Higashiyama, Hideya Mino, Isao Goto, Win Pa Pa, Anoop Kunchukut- tan, Shantipriya Parida, Ond\u0159ej Bojar, Chenhui Chu, Akiko Eriguchi, Kaori Abe, and Sadao Oda, Yusuke Kurohashi. 2021. Overview of the 8th work- shop on Asian translation. In Proceedings of the 8th Workshop on Asian Translation, Bangkok, Thailand. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Interactive neural machine translation", |
| "authors": [ |
| { |
| "first": "Alvaro", |
| "middle": [], |
| "last": "Peris", |
| "suffix": "" |
| }, |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Domingo", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Casacuberta", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Comput. Speech Lang", |
| "volume": "45", |
| "issue": "", |
| "pages": "201--220", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.csl.2016.12.003" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alvaro Peris, Miguel Domingo, and Francisco Casacu- berta. 2017. Interactive neural machine translation. Comput. Speech Lang., 45:201-220.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Fast lexically constrained decoding with dynamic beam allocation for neural machine translation", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Vilar", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1314--1324", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1119" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Post and David Vilar. 2018. Fast lexically con- strained decoding with dynamic beam allocation for neural machine translation. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long Pa- pers), pages 1314-1324, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1715--1725", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715- 1725, Berlin, Germany. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in Neural Information Processing Systems 27: Annual Conference on Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3104--3112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural networks. In Advances in Neural Information Processing Sys- tems 27: Annual Conference on Neural Informa- tion Processing Systems 2014, December 8-13 2014, Montreal, Quebec, Canada, pages 3104-3112.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Why self-attention? a targeted evaluation of neural machine translation architectures", |
| "authors": [ |
| { |
| "first": "Gongbo", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mathias", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| }, |
| { |
| "first": "Annette", |
| "middle": [], |
| "last": "Rios", |
| "suffix": "" |
| }, |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "4263--4272", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1458" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gongbo Tang, Mathias M\u00fcller, Annette Rios, and Rico Sennrich. 2018. Why self-attention? a targeted evaluation of neural machine translation architec- tures. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4263-4272, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4- 9, 2017, Long Beach, CA, USA, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Pay less attention with lightweight and dynamic convolutions", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Angela", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexei", |
| "middle": [], |
| "last": "Baevski", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [ |
| "N" |
| ], |
| "last": "Dauphin", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "7th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Wu, Angela Fan, Alexei Baevski, Yann N. Dauphin, and Michael Auli. 2019. Pay less atten- tion with lightweight and dynamic convolutions. In 7th International Conference on Learning Represen- tations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Google's neural machine translation system: Bridging the gap between human and machine translation", |
| "authors": [ |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Norouzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Qin", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Klaus", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Klingner", |
| "suffix": "" |
| }, |
| { |
| "first": "Apurva", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| }, |
| { |
| "first": "Melvin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaobing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Gouws", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshikiyo", |
| "middle": [], |
| "last": "Kato", |
| "suffix": "" |
| }, |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideto", |
| "middle": [], |
| "last": "Kazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Keith", |
| "middle": [], |
| "last": "Stevens", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Kurian", |
| "suffix": "" |
| }, |
| { |
| "first": "Nishant", |
| "middle": [], |
| "last": "Patil", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Oriol Vinyals", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, Jeff Klingner, Apurva Shah, Melvin John- son, Xiaobing Liu, Lukasz Kaiser, Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Jason Riesa, Alex Rud- nick, Oriol Vinyals, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2016. Google's neural machine translation system: Bridging the gap between human and machine translation. CoRR, abs/1609.08144.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Semantics-aware bert for language understanding", |
| "authors": [ |
| { |
| "first": "Zhuosheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuwei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Zuchao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuailiang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xi", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiang", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "34", |
| "issue": "", |
| "pages": "9628--9635", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhuosheng Zhang, Yuwei Wu, Hai Zhao, Zuchao Li, Shuailiang Zhang, Xi Zhou, and Xiang Zhou. 2020. Semantics-aware bert for language understanding. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 9628-9635.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "MUSE: parallel multi-scale attention for sequence to sequence learning", |
| "authors": [ |
| { |
| "first": "Guangxiang", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingjing", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Liangchen", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guangxiang Zhao, Xu Sun, Jingjing Xu, Zhiyuan Zhang, and Liangchen Luo. 2019. MUSE: parallel multi-scale attention for sequence to sequence learn- ing. CoRR, abs/1911.09483.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Parsing all: Syntax and semantics, dependencies and spans", |
| "authors": [ |
| { |
| "first": "Junru", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Zuchao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "4438--4449", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.findings-emnlp.398" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Junru Zhou, Zuchao Li, and Hai Zhao. 2020. Parsing all: Syntax and semantics, dependencies and spans. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 4438-4449, On- line. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "num": null, |
| "text": "48.92 49.24 49.25 82.79 83.15 83.57 79.15 Sparse Transformer-big + SCC + CD * 50.93 51.18 51.21 83.27 83.52 84.00 79.91 Ensemble * 51.07 51.32 51.36 83.68 83.99 84.41 79.99", |
| "html": null, |
| "content": "<table><tr><td>Model</td><td>jum</td><td>BLEU kyt</td><td>mec</td><td>jum</td><td>RIBES kyt</td><td>mec</td><td>AMFM \u2212</td></tr><tr><td>Transformer-big</td><td colspan=\"6\">41.67 41.82 41.84 81.05 81.32 81.50</td><td>74.95</td></tr><tr><td>Transformer-big + SCC + CD</td><td/><td/><td/><td/><td/><td/><td/></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "num": null, |
| "text": "Results on ASPECT En\u2192Ja test sets. * indicates that the official evaluation results are reported.", |
| "html": null, |
| "content": "<table><tr><td>Dataset</td><td>Sentences</td></tr><tr><td>ParaCrawl-v5.1</td><td>10.12M</td></tr><tr><td>Wiki Titles v2</td><td>3.64M</td></tr><tr><td>ASPEC</td><td>3.01M</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "num": null, |
| "text": "Training data statistics.", |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "num": null, |
| "text": "Results on ASPECT Ja\u2192En test sets. * indicates that the official evaluation results are reported.", |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |