| { |
| "paper_id": "K17-1044", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:07:50.827698Z" |
| }, |
| "title": "Natural Language Generation for Spoken Dialogue System using RNN Encoder-Decoder Networks", |
| "authors": [ |
| { |
| "first": "Van-Khanh", |
| "middle": [], |
| "last": "Tran", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Japan Advanced Institute of Science and Technology", |
| "location": { |
| "addrLine": "JAIST 1-1 Asahidai", |
| "postCode": "923-1292", |
| "settlement": "Nomi, Ishikawa", |
| "country": "Japan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Le-Minh", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Japan Advanced Institute of Science and Technology", |
| "location": { |
| "addrLine": "JAIST 1-1 Asahidai", |
| "postCode": "923-1292", |
| "settlement": "Nomi, Ishikawa", |
| "country": "Japan" |
| } |
| }, |
| "email": "nguyenml@jaist.ac.jp" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Natural language generation (NLG) is a critical component in a spoken dialogue system. This paper presents a Recurrent Neural Network based Encoder-Decoder architecture, in which an LSTM-based decoder is introduced to select, aggregate semantic elements produced by an attention mechanism over the input elements, and to produce the required utterances. The proposed generator can be jointly trained both sentence planning and surface realization to produce natural language sentences. The proposed model was extensively evaluated on four different NLG datasets. The experimental results showed that the proposed generators not only consistently outperform the previous methods across all the NLG domains but also show an ability to generalize from a new, unseen domain and learn from multi-domain datasets.", |
| "pdf_parse": { |
| "paper_id": "K17-1044", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Natural language generation (NLG) is a critical component in a spoken dialogue system. This paper presents a Recurrent Neural Network based Encoder-Decoder architecture, in which an LSTM-based decoder is introduced to select, aggregate semantic elements produced by an attention mechanism over the input elements, and to produce the required utterances. The proposed generator can be jointly trained both sentence planning and surface realization to produce natural language sentences. The proposed model was extensively evaluated on four different NLG datasets. The experimental results showed that the proposed generators not only consistently outperform the previous methods across all the NLG domains but also show an ability to generalize from a new, unseen domain and learn from multi-domain datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Natural Language Generation (NLG) plays a critical role in Spoken Dialogue Systems (SDS) with task is to convert a meaning representation produced by the Dialogue Manager into natural language utterances. Conventional approaches still rely on comprehensive hand-tuning templates and rules requiring expert knowledge of linguistic representation, including rule-based (Mirkovic et al., 2011) , corpus-based n-gram models (Oh and Rudnicky, 2000) , and a trainable generator (Stent et al., 2004) .", |
| "cite_spans": [ |
| { |
| "start": 367, |
| "end": 390, |
| "text": "(Mirkovic et al., 2011)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 420, |
| "end": 443, |
| "text": "(Oh and Rudnicky, 2000)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 472, |
| "end": 492, |
| "text": "(Stent et al., 2004)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recently, Recurrent Neural Networks (RNNs) based approaches have shown promising performance in tackling the NLG problems. The RNNbased models have been applied for NLG as a joint training model (Wen et al., 2015a,b) and an endto-end training model (Wen et al., 2016c) . A recurring problem in such systems is requiring annotated datasets for particular dialogue acts 1 (DAs). To ensure that the generated utterance representing the intended meaning of the given DA, the previous RNN-based models were further conditioned on a 1-hot vector representation of the DA. Wen et al. (2015a) introduced a heuristic gate to ensure that all the slot-value pair was accurately captured during generation. Wen et al. (2015b) subsequently proposed a Semantically Conditioned Long Short-term Memory generator (SC-LSTM) which jointly learned the DA gating signal and language model. More recently, Encoder-Decoder networks , especially the attentional based models (Wen et al., 2016b; Mei et al., 2015) have been explored to solve the NLG tasks. The Attentional RNN Encoder-Decoder (Bahdanau et al., 2014 ) (ARED) based approaches have also shown improved performance on a variety of tasks, e.g., image captioning (Xu et al., 2015; Yang et al., 2016 ), text summarization (Rush et al., 2015; Nallapati et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 195, |
| "end": 216, |
| "text": "(Wen et al., 2015a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 249, |
| "end": 268, |
| "text": "(Wen et al., 2016c)", |
| "ref_id": null |
| }, |
| { |
| "start": 562, |
| "end": 584, |
| "text": "DA. Wen et al. (2015a)", |
| "ref_id": null |
| }, |
| { |
| "start": 951, |
| "end": 970, |
| "text": "(Wen et al., 2016b;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 971, |
| "end": 988, |
| "text": "Mei et al., 2015)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1068, |
| "end": 1090, |
| "text": "(Bahdanau et al., 2014", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1200, |
| "end": 1217, |
| "text": "(Xu et al., 2015;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 1218, |
| "end": 1235, |
| "text": "Yang et al., 2016", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 1258, |
| "end": 1277, |
| "text": "(Rush et al., 2015;", |
| "ref_id": null |
| }, |
| { |
| "start": 1278, |
| "end": 1301, |
| "text": "Nallapati et al., 2016)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "While the RNN-based generators with DA gating-vector can prevent the undesirable semantic repetitions, the ARED-based generators show signs of better adapting to a new domain. However, none of the models show significant advantage from out-of-domain data. To better analyze model generalization to an unseen, new domain as well as model leveraging the out-of-domain sources, we propose a new architecture which is an extension of the ARED model. In order to better select, aggregate and control the semantic information, a Refinement Adjustment LSTMbased component (RALSTM) is introduced to the decoder side. The proposed model can learn from unaligned data by jointly training the sentence planning and surface realization to produce natural language sentences. We conducted experiments on four different NLG domains and found that the proposed methods significantly outperformed the state-of-the-art methods regarding BLEU (Papineni et al., 2002) and slot error rate ERR scores (Wen et al., 2015b) . The results also showed that our generators could scale to new domains by leveraging the out-of-domain data. To sum up, we make three key contributions in this paper:", |
| "cite_spans": [ |
| { |
| "start": 925, |
| "end": 948, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 980, |
| "end": 999, |
| "text": "(Wen et al., 2015b)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We present an LSTM-based component called RALSTM cell applied on the decoder side of an ARED model, resulting in an endto-end generator that empirically shows significant improved performances in comparison with the previous approaches.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We extensively conduct the experiments to evaluate the models training from scratch on each in-domain dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We empirically assess the models' ability to: learn from multi-domain datasets by pooling all available training datasets; and adapt to a new, unseen domain by limited feeding amount of in-domain data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We review related works in Section 2. Following a detail of proposed model in Section 3, Section 4 describes datasets, experimental setups, and evaluation metrics. The resulting analysis is presented in Section 5. We conclude with a brief summary and future work in Section 6.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recently, RNNs-based models have shown promising performance in tackling the NLG problems. Zhang and Lapata (2014) proposed a generator using RNNs to create Chinese poetry. Xu et al. (2015) ; Karpathy and Fei-Fei (2015) ; also used RNNs in a multi-modal setting to solve image captioning tasks. The RNN-based Sequence to Sequence models have applied to solve variety of tasks: conversational modeling , machine translation (Luong et al., 2015; Li and Jurafsky, 2016) For task-oriented dialogue systems, Wen et al. (2015a) combined a forward RNN generator, a CNN reranker, and a backward RNN reranker to generate utterances. Wen et al. (2015b) proposed SC-LSTM generator which introduced a control sigmoid gate to the LSTM cell to jointly learn the gating mechanism and language model. A recurring problem in such systems is the lack of sufficient domain-specific annotated data. Wen et al. 2016aproposed an out-of-domain model which was trained on counterfeited data by using semantically similar slots from the target domain instead of the slots belonging to the out-of-domain dataset. The results showed that the model can achieve a satisfactory performance with a small amount of in-domain data by fine tuning the target domain on the out-of-domain trained model.", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 114, |
| "text": "Zhang and Lapata (2014)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 173, |
| "end": 189, |
| "text": "Xu et al. (2015)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 192, |
| "end": 219, |
| "text": "Karpathy and Fei-Fei (2015)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 423, |
| "end": 443, |
| "text": "(Luong et al., 2015;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 444, |
| "end": 466, |
| "text": "Li and Jurafsky, 2016)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "More recently, RNN encoder-decoder based models with attention mechanism (Bahdanau et al., 2014) have shown improved performances in various tasks. Yang et al. (2016) proposed a review network to the image captioning, which reviews all the information encoded by the encoder and produces a compact thought vector. Mei et al. (2015) proposed RNN encoder-decoderbased model by using two attention layers to jointly train content selection and surface realization. More close to our work, Wen et al. 2016bproposed an attentive encoder-decoder based generator which computed the attention mechanism over the slot-value pairs. The model showed a domain scalability when a very limited amount of data is available.", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 96, |
| "text": "(Bahdanau et al., 2014)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 148, |
| "end": 166, |
| "text": "Yang et al. (2016)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 314, |
| "end": 331, |
| "text": "Mei et al. (2015)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Moving from a limited domain dialogue system to an open domain dialogue system raises some issues. Therefore, it is important to build an open domain dialogue system that can make as much use of existing abilities of functioning from other domains. There have been several works to tackle this problem, such as (Mrk\u0161i\u0107 et al., 2015) using RNN-based networks for multi-domain dialogue state tracking, (Wen et al., 2016a) using a procedure to train multi-domain via multiple adaptation steps, or Williams, 2013) adapting of SDS components to new domains.", |
| "cite_spans": [ |
| { |
| "start": 311, |
| "end": 332, |
| "text": "(Mrk\u0161i\u0107 et al., 2015)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 494, |
| "end": 509, |
| "text": "Williams, 2013)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The recurrent language generator proposed in this paper is based on a neural language generator (Wen et al., 2016b), which consists of three main components: (i) an Encoder that incorporates the target meaning representation (MR) as the model inputs, (ii) an Aligner that aligns and controls the semantic elements, and (iii) an RNN Decoder that generates output sentences. The generator architecture is shown in Figure 1 . The Encoder first encodes the MR into input semantic elements which are then aggregated and selected by utilizing an attention-based mechanism by the Aligner. The input to the RNN Decoder at each time step is a 1-hot encoding of a token 2 w t and an attentive DA representation d t . At each time step t, RNN Decoder also computes how much the feature value vector s t 1 retained for the next computational steps, and adds this information to the RNN output which represents the probability distribution of the next token w t+1 . At generation time, we can sample from this conditional distribution to obtain the next token in a generated sentence, and feed it as the next input to the RNN Decoder. This process finishes when an end sign is generated (Karpathy and Fei-Fei, 2015), or some constraints are reached (Zhang and Lapata, 2014) . The model can produce a sequence of tokens which can finally be lexicalized 3 to form the required utterance. Figure 2 : The RALSTM cell proposed in this paper, which consists of three components: an Refinement Cell, a traditional LSTM Cell, and an Adjustment Cell. At time step t, while the Refinement cell computes new input tokens x t based on the original input tokens and the attentional DA representation d t , the Adjustment Cell calculates how much information of the slot-value pairs can be generated by the LSTM Cell.", |
| "cite_spans": [ |
| { |
| "start": 1236, |
| "end": 1260, |
| "text": "(Zhang and Lapata, 2014)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 412, |
| "end": 420, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 1373, |
| "end": 1381, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Recurrent Neural Language Generator", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The slots and values are separated parameters used in the encoder side. This embeds the source information into a vector representation z i which is a concatenation of embedding vector representation of each slot-value pair, and is computed by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "z i = u i v i (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where u i , v i are the i-th slot and value embedding vectors, respectively, and is vector concatenation. The i index runs over the L given slot-value pairs. In this work, we use a 1-layer, Bidirectional LSTM (Bi-LSTM) to encode the sequence of slotvalue pairs 4 embedding. The Bi-LSTM consists of forward and backward LSTMs which read the sequence of slot-value pairs from left-to-right and right-to-left to produce forward and backward sequence of hidden states ( ! e 1 , .., ! e L ), and ( e 1 , .., e L ), respectively. We then obtain the sequence of encoded hidden states E = (e 1 , e 2 , .., e L ) where e i is a sum of the forward hidden state ! e i and the backward one e i as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "e i = ! e i + e i (2) 3.2 Aligner", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The Aligner utilizes attention mechanism to calculate the DA representation as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "t,i = exp e t,i P j exp e t,j", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "e t,i = a(e i , h t 1 )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "and t,i is the weight of i-th slot-value pair calculated by the attention mechanism. The alignment model a is computed by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "a(e i , h t 1 ) = v > a tanh(W a e i + U a h t 1 ) (5)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where v a , W a , U a are the weight matrices to learn. Finally, the Aligner calculates dialogue act embedding d t as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "d t = a X i t,i e i (6)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where a is vector embedding of the action type.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The proposed semantic RALSTM cell applied for Decoder side consists of three components: a Refinement cell, a traditional LSTM cell, and an Adjustment cell: Firstly, instead of feeding the original input token w t into the RNN cell, the input is recomputed by using a semantic gate as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "r t = (W rd d t + W rh h t 1 ) x t = r t w t", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where W rd and W rh are weight matrices. Element-wise multiplication plays a part in word-level matching which not only learns the vector similarity, but also preserves information about the two vectors. W rh acts like a key phrase detector that learns to capture the pattern of generation tokens or the relationship between multiple tokens. In other words, the new input x t consists of information of the original input token w t , the DA representation d t , and the hidden context h t 1 . r t is called a Refinement gate because the input tokens are refined by a combination gating information of the attentive DA representation d t and the previous hidden state h t 1 . By this way, we can represent the whole sentence based on the refined inputs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Secondly, the traditional LSTM network proposed by Hochreiter and Schmidhuber (2014) in which the input gate i i , forget gate f t and output gates o t are introduced to control information flow and computed as follows: 0", |
| "cite_spans": [ |
| { |
| "start": 51, |
| "end": 84, |
| "text": "Hochreiter and Schmidhuber (2014)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "B B @ i t f t o t c t 1 C C A = 0 B B @ tanh 1 C C A W 4n,4n 0 @ x t d t h t 1 1 A (8)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where n is hidden layer size, W 4n,4n is model parameters. The cell memory value c t is modified to depend on the DA representation as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "c t = f t c t 1 + i t \u0109 t + tanh(W cr r t ) h t = o t tanh(c t )", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "whereh t is the output. Thirdly, inspired by work of Wen et al. (2015b) in which the generator was further conditioned on a 1-hot representation vector s of given dialogue act, and work of Lu et al. (2016) that proposed a visual sentinel gate to make a decision on whether the model should attend to the image or to the sentinel gate, an additional gating cell is introduced on top of the traditional LSTM to gate another controlling vector s. Figure 6 shows how RAL-STM controls the DA vector s. First, starting from the 1-hot vector of the DA s 0 , at each time step t the proposed cell computes how much the LSTM outputh t affects the DA vector, which is computed as follows:", |
| "cite_spans": [ |
| { |
| "start": 189, |
| "end": 205, |
| "text": "Lu et al. (2016)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 444, |
| "end": 452, |
| "text": "Figure 6", |
| "ref_id": "FIGREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "a t = (W ax x t + W ahht ) s t = s t 1 a t", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where W ax , W ah are weight matrices to be learned. a t is called an Adjustment gate since its task is to control what information of the given DA have been generated and what information should be retained for future time steps. Second, we consider how much the information preserved in the DA s t can be contributed to the output, in which an additional output is computed by applying the output gate o t on the remaining information in s t as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "c a = W os s t h a = o t tanh(c a )", |
| "eq_num": "(11)" |
| } |
| ], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where W os is a weight matrix to project the DA presentation into the output space,h a is the Adjustment cell output. Final RALSTM output is a combination of both outputs of the traditional LSTM cell and the Adjustment cell, and computed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h t =h t +h a", |
| "eq_num": "(12)" |
| } |
| ], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Finally, the output distribution is computed by applying a softmax function g, and the distribution can be sampled to obtain the next token,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (w t+1 | w t , ...w 0 , DA) = g(W ho h t ) w t+1 \u21e0 P (w t+1 | w t , w t 1 , ...w 0 , DA)", |
| "eq_num": "(13)" |
| } |
| ], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where DA = (s, z).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RALSTM Decoder", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The objective function was the negative loglikelihood and computed by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "F(\u2713) = T X t=1 y > t log p t", |
| "eq_num": "(14)" |
| } |
| ], |
| "section": "Training", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where: y t is the ground truth token distribution, p t is the predicted token distribution, T is length of the input sentence. The proposed generators were trained by treating each sentence as a mini-batch with l 2 regularization added to the objective function for every 5 training examples. The models were initialized with a pretrained Glove word embedding vectors (Pennington et al., 2014) and optimized by using stochastic gradient descent and back propagation through time (Werbos, 1990). Early stopping mechanism was implemented to prevent over-fitting by using a validation set as suggested in (Mikolov, 2010) .", |
| "cite_spans": [ |
| { |
| "start": 368, |
| "end": 393, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 602, |
| "end": 617, |
| "text": "(Mikolov, 2010)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "The decoding consists of two phases: (i) overgeneration, and (ii) reranking. In the overgeneration, the generator conditioned on both representations of the given DA use a beam search to generate a set of candidate responses. In the reranking phase, cost of the generator is computed to form the reranking score R as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "R = F(\u2713) + ERR (15)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "where is a trade off constant and is set to a large value in order to severely penalize nonsensical outputs. The slot error rate ERR, which is the number of slots generated that is either missing or redundant, and is computed by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "ERR = p + q N (16)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "where N is the total number of slots in DA, and p, q is the number of missing and redundant slots, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "We extensively conducted a set of experiments to assess the effectiveness of the proposed models by using several metrics, datasets, and model architectures, in order to compare to prior methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We assessed the proposed models on four different NLG domains: finding a restaurant, finding a hotel, buying a laptop, and buying a television. The Restaurant and Hotel were collected in (Wen et al., 2015b), while the Laptop and TV datasets have been released by (Wen et al., 2016a) with a much larger input space but only one training example for each DA so that the system must learn partial realization of concepts and be able to recombine and apply them to unseen DAs. This makes the NLG tasks for the Laptop and TV domains become much harder. The dataset statistics are shown in Table 1 . ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 584, |
| "end": 591, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The generators were implemented using the Ten-sorFlow library (Abadi et al., 2016) and trained with training, validation and testing ratio as 3:1:1. The hidden layer size, beam size were set to be 80 and 10, respectively, and the generators were trained with a 70% of dropout rate. We performed 5 runs with different random initialization of the network and the training is terminated by using early stopping. We then chose a model that yields the highest BLEU score on the validation set as shown in Table 2 . Since the trained models can differ depending on the initialization, we also report the results which were averaged over 5 randomly initialized networks. Note that, except the results reported in Table 2 , all the results shown were averaged over 5 randomly initialized networks. We set to 1000 to severely discourage the reranker from selecting utterances which contain either redundant or missing slots. For each DA, we over-generated 20 candidate sentences and selected the top 5 realizations after reranking. Moreover, in order to better understand the effectiveness of our proposed methods, we: (i) performed an ablation experiments to demonstrate the contribution of each proposed cells (Tables 2, 3), (ii) trained the models on the Laptop domain with varied proportion of training data, starting from 10% to 100% (Figure 3 ), (iii) trained general models by merging all the data from four domains together and tested them in each individual domain (Figure 4) , and (iv) trained adaptation models on merging data from restaurant and hotel domains, then fine tuned the model on laptop domain with varied amount of adaptation data ( Figure 5 ).", |
| "cite_spans": [ |
| { |
| "start": 62, |
| "end": 82, |
| "text": "(Abadi et al., 2016)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 501, |
| "end": 508, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 707, |
| "end": 714, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 1331, |
| "end": 1340, |
| "text": "(Figure 3", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1466, |
| "end": 1476, |
| "text": "(Figure 4)", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 1648, |
| "end": 1656, |
| "text": "Figure 5", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Setups", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The generator performance was assessed on the two evaluation metrics: the BLEU and the slot error rate ERR by adopting code from an open source benchmark toolkit for Natural Language Generation 5 . We compared the proposed models against three strong baselines which have been recently published as state-of-the-art NLG benchmarks 5 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metrics and Baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 HLSTM proposed by Wen et al. (2015a) which used a heuristic gate to ensure that all of the slot-value information was accurately captured when generating.", |
| "cite_spans": [ |
| { |
| "start": 20, |
| "end": 38, |
| "text": "Wen et al. (2015a)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metrics and Baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 SCLSTM proposed by Wen et al. (2015b) which can jointly learn the gating signal and language model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metrics and Baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 Enc-Dec proposed by Wen et al. (2016b) which applied the attention-based encoderdecoder architecture.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metrics and Baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "5 Results and Analysis", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metrics and Baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We conducted extensive experiments on our models and compared against the previous methods. Overall, the proposed models consistently achieve the better performance regarding both evaluation metrics across all domains in all test cases.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The ablation studies (Tables 2, 3) demonstrate the contribution of different model components in which the models were assessed without Adjustment cell (w/o A), or without Refinement cell (w/o R). It clearly sees that the Adjustment cell contributes to reducing the slot error rate ERR score since it can effectively prevent the undesirable slot-value pair repetitions by gating the DA vector s. A comparison between the ARED-based models (denoted by ] in Table 2 ) shows that the proposed models not only have better performance with higher the BLEU score but also significantly reduce the slot error rate ERR score by a large margin about 2% to 4% in every datasets. Moreover, a comparison between the models with gating the DA vector also indicates that the proposed models (w/o R, RALSTM) have significant improved performance on both the evaluation metrics across the four domains compared to the SCLSTM model. The RALSTM cell without the Refinement cell is similar as the SCLSTM cell. However, it obtained the results much better than the SCLSTM baselines. This stipulates the necessary of the LSTM encoder and the Aligner in effectively partial learning the correlated order between slot-value representation in the DAs, especially for the unseen domain where there is only one training example for each DA. Table 3 further demonstrates the stable strength of our models since the results' pattern stays unchanged compared to those in Table 2 . Figure 3 shows a comparison of three models (Enc-Dec, SCLSTM, and RALSTM) which were trained from scratch on the unseen laptop domain in varied proportion of training data, from 1% to 100%. It clearly shows that the RALSTM outperforms the previous models in all cases, while the Enc-Dec has a much greater ERR score comparing to the two models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 21, |
| "end": 34, |
| "text": "(Tables 2, 3)", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 456, |
| "end": 463, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 1315, |
| "end": 1322, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 1442, |
| "end": 1449, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 1452, |
| "end": 1460, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Comparison in an Unseen Domain", |
| "sec_num": null |
| }, |
| { |
| "text": "A comparison of top responses generated for some input DAs between different models are shown in Table 4 . While the previous models still Table 4 : Comparison of top responses generated for some input dialogue acts between different models. Errors are marked in color (missing, misplaced information). All2* are general models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 97, |
| "end": 104, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 139, |
| "end": 146, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Comparison in an Unseen Domain", |
| "sec_num": null |
| }, |
| { |
| "text": "Generated Responses from Laptop Domain 1 Input DA ?compare(name=\"satellite notus 19\", pricerange=\"budget\", drive=\"500 gb\", name=\"portege thanatos 98\", pricerange=\"expensive\", drive=\"128 gb\") Reference the satellite notus 19 has a 500 gb drive and is in the budget price range . on the other hand the portege thanatos 98 has a 128 gb drive and is in the expensive price range . which would you prefer HLSTM the satellite notus 19 is a budget priced laptop with a 500 gb drive and a 128 gb drive . which one do you prefer [portege thanatos 98, expensive] Enc-Dec the satellite notus 19 is in the budget price range . the portege thanatos 98 has a 500 gb drive and is in the expensive price range . which one do you prefer [128 gb] SCLSTM the satellite notus 19 is in the budget price range and has a 500 gb drive and the portege thanatos 98 has a 128 gb drive . which one do you prefer [expensive] RALSTM the satellite notus 19 is in the budget price range with a 500 gb drive , while the portege thanatos 98 is in the expensive price range and has a 128 gb drive . which one do you prefer All2Laptop the satellite notus 19 is a budget price range with a 500 gb drive and the portege thanatos 98 has a 128 gb drive and is in the expensive price range . which one do you prefer # Model Generated Responses from TV Domain 2 Input DA inform count(count=\"73\", type=\"television\", hasusbport=\"dontcare\", hdmiport=\"2\", screensizerange=\"dontcare\") produce some errors (missing and misplaced information), the proposed models (RALSTM and the models All2* trained by pooling all datasets together) can generate appropriate sentences. We also found that the proposed models tend to generate more complete and concise sentences than the other models. All these prove the importance of the proposed components: the Refinement cell in aggregating and selecting the attentive information, and the Adjustment cell in controlling the feature vector (see Examples in Figure 6 ). Figure 4 shows a comparison performance of general models as described in Section 4.2. The results are consistent with the Figure 3 , in which the RALSTM has better performance than the Enc-Dec and SCLSTM on all domains in terms of the BLEU and the ERR scores, while the Enc-Dec has difficulties in reducing the ERR score. This indicates the relevant contribution of the proposed component Refinement and Adjustment cells to the original ARED architecture, in which the Refinement with attentional gating can effectively select and aggregate the information before putting them into the traditional LSTM cell, while the Adjustment with gating DA vector can effectively control the Adaptation Models Figure 5 shows domain scalability of the three models in which the models were first trained on the merging out-of-domain Restaurant and Hotel datasets, then fine tuned the parameters with varied amount of in-domain training data (laptop domain). The RALSTM model outperforms the previous model in both cases where the sufficient indomain data is used (as in Figure 5 -left) and the limited in-domain data is used ( Figure 5-right) . The Figure 5 -right also indicates that the RALSTM model can adapt to a new, unseen domain faster than the previous models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1947, |
| "end": 1955, |
| "text": "Figure 6", |
| "ref_id": "FIGREF5" |
| }, |
| { |
| "start": 1959, |
| "end": 1967, |
| "text": "Figure 4", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 2082, |
| "end": 2090, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 2658, |
| "end": 2666, |
| "text": "Figure 5", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 3017, |
| "end": 3025, |
| "text": "Figure 5", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 3074, |
| "end": 3089, |
| "text": "Figure 5-right)", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 3096, |
| "end": 3104, |
| "text": "Figure 5", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "# Model", |
| "sec_num": null |
| }, |
| { |
| "text": "We present an extension of ARED model, in which an RALSTM component is introduced to select and aggregate semantic elements produced by the Encoder, and to generate the required sentence. We assessed the proposed models on four NLG domains and compared to the state-of-theart generators. The proposed models empirically show consistent improvement over the previous methods in both the BLEU and ERR evaluation metrics. The proposed models also show an ability to extend to a new, unseen domain no matter how much the in-domain training data was fed. In the future, it would be interesting to apply the proposed model to other tasks that can be modeled based on the encoder-decoder architecture, i.e., image captioning, reading comprehension, and machine translation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "A combination of an action type and a set of slot-value pairs. e.g. inform(name='Bar crudo'; food='raw food')", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Input texts are delexicalized where slot values are replaced by its corresponding slot tokens.3 The process in which slot token is replaced by its value.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We treated the set of slot-value pairs as a sequence and use the order specified by slot's name (e.g., slot address comes first, food follows address). We have tried treating slot-value pairs as a set with natural order as in the given DAs. However, this yielded even worse results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/shawnwun/RNNLG", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by JSPS Kakenhi Grant Number JP15K16048 and JST CREST JP-MJCR1513. The first author would like to thank the Vietnamese Government Scholarship (911 project).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Tensorflow: Large-scale machine learning on heterogeneous distributed systems", |
| "authors": [ |
| { |
| "first": "Mart\u0131n", |
| "middle": [], |
| "last": "Abadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Agarwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Barham", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Brevdo", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Craig", |
| "middle": [], |
| "last": "Citro", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Andy", |
| "middle": [], |
| "last": "Davis", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthieu", |
| "middle": [], |
| "last": "Devin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1603.04467" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mart\u0131n Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig Citro, Greg S Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, et al. 2016. Tensorflow: Large-scale machine learning on heterogeneous distributed systems. arXiv preprint arXiv:1603.04467 .", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1409.0473" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2014. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473 .", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Distributed dialogue policies for multi-domain statistical dialogue management", |
| "authors": [ |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Ga\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongho", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Pirros", |
| "middle": [], |
| "last": "Tsiakoulis", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on. IEEE", |
| "volume": "", |
| "issue": "", |
| "pages": "5371--5375", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Milica Ga\u0161i\u0107, Dongho Kim, Pirros Tsiakoulis, and Steve Young. 2015. Distributed dialogue poli- cies for multi-domain statistical dialogue manage- ment. In Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on. IEEE, pages 5371-5375.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural computation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation .", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Deep visualsemantic alignments for generating image descriptions", |
| "authors": [ |
| { |
| "first": "Andrej", |
| "middle": [], |
| "last": "Karpathy", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Fei-Fei", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the IEEE Conference CVPR", |
| "volume": "", |
| "issue": "", |
| "pages": "3128--3137", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrej Karpathy and Li Fei-Fei. 2015. Deep visual- semantic alignments for generating image descrip- tions. In Proceedings of the IEEE Conference CVPR. pages 3128-3137.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "A diversity-promoting objective function for neural conversation models", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1510.03055" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2015. A diversity-promoting objec- tive function for neural conversation models. arXiv preprint arXiv:1510.03055 .", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A persona-based neural conversation model", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Georgios", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Spithourakis", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1603.06155" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Georgios P Spithourakis, Jianfeng Gao, and Bill Dolan. 2016. A persona-based neural conversation model. arXiv preprint arXiv:1603.06155 .", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Mutual information and diverse decoding improve neural machine translation", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1601.00372" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li and Dan Jurafsky. 2016. Mutual information and diverse decoding improve neural machine trans- lation. arXiv preprint arXiv:1601.00372 .", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Knowing when to look: Adaptive attention via a visual sentinel for image captioning", |
| "authors": [ |
| { |
| "first": "Jiasen", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Caiming", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Devi", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1612.01887" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiasen Lu, Caiming Xiong, Devi Parikh, and Richard Socher. 2016. Knowing when to look: Adaptive at- tention via a visual sentinel for image captioning. arXiv preprint arXiv:1612.01887 .", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Multi-task sequence to sequence learning", |
| "authors": [ |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1511.06114" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minh-Thang Luong, Quoc V Le, Ilya Sutskever, Oriol Vinyals, and Lukasz Kaiser. 2015. Multi-task sequence to sequence learning. arXiv preprint arXiv:1511.06114 .", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "What to talk about and how? selective generation using lstms with coarse-to-fine alignment", |
| "authors": [ |
| { |
| "first": "Hongyuan", |
| "middle": [], |
| "last": "Mei", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew R", |
| "middle": [], |
| "last": "Walter", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1509.00838" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hongyuan Mei, Mohit Bansal, and Matthew R Walter. 2015. What to talk about and how? selective gen- eration using lstms with coarse-to-fine alignment. arXiv preprint arXiv:1509.00838 .", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Recurrent neural network based language model", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov. 2010. Recurrent neural network based language model.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Dialogue management using scripts and combined confidence scores", |
| "authors": [ |
| { |
| "first": "Danilo", |
| "middle": [], |
| "last": "Mirkovic", |
| "suffix": "" |
| }, |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "Cavedon", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Purver", |
| "suffix": "" |
| }, |
| { |
| "first": "Florin", |
| "middle": [], |
| "last": "Ratiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tobias", |
| "middle": [], |
| "last": "Scheideck", |
| "suffix": "" |
| }, |
| { |
| "first": "Fuliang", |
| "middle": [], |
| "last": "Weng", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kui", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "US Patent", |
| "volume": "7", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Danilo Mirkovic, Lawrence Cavedon, Matthew Purver, Florin Ratiu, Tobias Scheideck, Fuliang Weng, Qi Zhang, and Kui Xu. 2011. Dialogue manage- ment using scripts and combined confidence scores. US Patent 7,904,297.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Multidomain dialog state tracking using recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Diarmuid", |
| "suffix": "" |
| }, |
| { |
| "first": "Blaise", |
| "middle": [], |
| "last": "S\u00e9aghdha", |
| "suffix": "" |
| }, |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "Pei-Hao", |
| "middle": [], |
| "last": "Ga\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Tsung-Hsien", |
| "middle": [], |
| "last": "Vandyke", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1506.07190" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikola Mrk\u0161i\u0107, Diarmuid O S\u00e9aghdha, Blaise Thom- son, Milica Ga\u0161i\u0107, Pei-Hao Su, David Vandyke, Tsung-Hsien Wen, and Steve Young. 2015. Multi- domain dialog state tracking using recurrent neural networks. arXiv preprint arXiv:1506.07190 .", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Abstractive text summarization using sequence-to-sequence rnns and beyond", |
| "authors": [ |
| { |
| "first": "Ramesh", |
| "middle": [], |
| "last": "Nallapati", |
| "suffix": "" |
| }, |
| { |
| "first": "Bowen", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Caglar", |
| "middle": [], |
| "last": "Gulcehre", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Xiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1602.06023" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ramesh Nallapati, Bowen Zhou, Caglar Gulcehre, Bing Xiang, et al. 2016. Abstractive text summa- rization using sequence-to-sequence rnns and be- yond. arXiv preprint arXiv:1602.06023 .", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Stochastic language generation for spoken dialogue systems", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Alice", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "I" |
| ], |
| "last": "Oh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rudnicky", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of the 2000 ANLP/NAACL Workshop on Conversational systems", |
| "volume": "3", |
| "issue": "", |
| "pages": "27--32", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alice H Oh and Alexander I Rudnicky. 2000. Stochas- tic language generation for spoken dialogue sys- tems. In Proceedings of the 2000 ANLP/NAACL Workshop on Conversational systems-Volume 3. As- sociation for Computational Linguistics, pages 27- 32.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th ACL. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th ACL. Association for Computational Linguis- tics, pages 311-318.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "EMNLP", |
| "volume": "14", |
| "issue": "", |
| "pages": "1532--1575", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global vectors for word representation. In EMNLP. volume 14, pages 1532- 43.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Sumit Chopra, and Jason Weston. 2015. A neural attention model for abstractive sentence summarization", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Alexander", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1509.00685" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander M Rush, Sumit Chopra, and Jason We- ston. 2015. A neural attention model for ab- stractive sentence summarization. arXiv preprint arXiv:1509.00685 .", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Trainable sentence planning for complex information presentation in spoken dialog systems", |
| "authors": [ |
| { |
| "first": "Amanda", |
| "middle": [], |
| "last": "Stent", |
| "suffix": "" |
| }, |
| { |
| "first": "Rashmi", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "Marilyn", |
| "middle": [], |
| "last": "Walker", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 42nd ACL. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amanda Stent, Rashmi Prasad, and Marilyn Walker. 2004. Trainable sentence planning for complex in- formation presentation in spoken dialog systems. In Proceedings of the 42nd ACL. Association for Com- putational Linguistics, page 79.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "A neural conversational model", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1506.05869" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals and Quoc Le. 2015. A neural conversa- tional model. arXiv preprint arXiv:1506.05869 .", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Show and tell: A neural image caption generator", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Toshev", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Dumitru", |
| "middle": [], |
| "last": "Erhan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "3156--3164", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, Alexander Toshev, Samy Bengio, and Dumitru Erhan. 2015. Show and tell: A neural im- age caption generator. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recog- nition. pages 3156-3164.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Stochastic Language Generation in Dialogue using Recurrent Neural Networks with Convolutional Sentence Reranking", |
| "authors": [ |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Tsung-Hsien Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongho", |
| "middle": [], |
| "last": "Ga\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Pei-Hao", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Vandyke", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings SIGDIAL. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsung-Hsien Wen, Milica Ga\u0161i\u0107, Dongho Kim, Nikola Mrk\u0161i\u0107, Pei-Hao Su, David Vandyke, and Steve Young. 2015a. Stochastic Language Generation in Dialogue using Recurrent Neural Networks with Convolutional Sentence Reranking. In Proceedings SIGDIAL. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Multi-domain neural network language generation for spoken dialogue systems", |
| "authors": [ |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Tsung-Hsien Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Gasic", |
| "suffix": "" |
| }, |
| { |
| "first": "Lina", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mrksic", |
| "suffix": "" |
| }, |
| { |
| "first": "Pei-Hao", |
| "middle": [], |
| "last": "Rojas-Barahona", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Vandyke", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1603.01232" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsung-Hsien Wen, Milica Gasic, Nikola Mrksic, Lina M Rojas-Barahona, Pei-Hao Su, David Vandyke, and Steve Young. 2016a. Multi-domain neural network language generation for spoken dia- logue systems. arXiv preprint arXiv:1603.01232 .", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Toward multidomain language generation using recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Tsung-Hsien Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Ga\u0161ic", |
| "suffix": "" |
| }, |
| { |
| "first": "Lina", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mrk\u0161ic", |
| "suffix": "" |
| }, |
| { |
| "first": "Pei-Hao", |
| "middle": [], |
| "last": "Rojas-Barahona", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Vandyke", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsung-Hsien Wen, Milica Ga\u0161ic, Nikola Mrk\u0161ic, Lina M Rojas-Barahona, Pei-Hao Su, David Vandyke, and Steve Young. 2016b. Toward multi- domain language generation using recurrent neural networks .", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Semantically conditioned lstm-based natural language generation for spoken dialogue systems", |
| "authors": [ |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Tsung-Hsien Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Ga\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Pei-Hao", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Vandyke", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of EMNLP. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsung-Hsien Wen, Milica Ga\u0161i\u0107, Nikola Mrk\u0161i\u0107, Pei- Hao Su, David Vandyke, and Steve Young. 2015b. Semantically conditioned lstm-based natural lan- guage generation for spoken dialogue systems. In Proceedings of EMNLP. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Stefan Ultes, and Steve Young. 2016c. A networkbased end-to-end trainable task-oriented dialogue system", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Tsung-Hsien Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Vandyke", |
| "suffix": "" |
| }, |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Mrksic", |
| "suffix": "" |
| }, |
| { |
| "first": "Lina", |
| "middle": [ |
| "M" |
| ], |
| "last": "Gasic", |
| "suffix": "" |
| }, |
| { |
| "first": "Pei-Hao", |
| "middle": [], |
| "last": "Rojas-Barahona", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1604.04562" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsung-Hsien Wen, David Vandyke, Nikola Mrksic, Milica Gasic, Lina M Rojas-Barahona, Pei-Hao Su, Stefan Ultes, and Steve Young. 2016c. A network- based end-to-end trainable task-oriented dialogue system. arXiv preprint arXiv:1604.04562 .", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Backpropagation through time: what it does and how to do it", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Werbos", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Proceedings of the IEEE", |
| "volume": "78", |
| "issue": "10", |
| "pages": "1550--1560", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul J Werbos. 1990. Backpropagation through time: what it does and how to do it. Proceedings of the IEEE 78(10):1550-1560.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Multi-domain learning and generalization in dialog state tracking", |
| "authors": [ |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of SIGDIAL. Citeseer", |
| "volume": "62", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason Williams. 2013. Multi-domain learning and gen- eralization in dialog state tracking. In Proceedings of SIGDIAL. Citeseer, volume 62.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Show, attend and tell: Neural image caption generation with visual attention", |
| "authors": [ |
| { |
| "first": "Kelvin", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Kiros", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhudinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Rich", |
| "middle": [], |
| "last": "Zemel", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "2048--2057", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kelvin Xu, Jimmy Ba, Ryan Kiros, Kyunghyun Cho, Aaron Courville, Ruslan Salakhudinov, Rich Zemel, and Yoshua Bengio. 2015. Show, attend and tell: Neural image caption generation with visual at- tention. In International Conference on Machine Learning. pages 2048-2057.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Review networks for caption generation", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ye", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuexin", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan R", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "2361--2369", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Ye Yuan, Yuexin Wu, William W Cohen, and Ruslan R Salakhutdinov. 2016. Review net- works for caption generation. In Advances in Neural Information Processing Systems. pages 2361-2369.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Chinese poetry generation with recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Xingxing", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "670--680", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xingxing Zhang and Mirella Lapata. 2014. Chinese poetry generation with recurrent neural networks. In EMNLP. pages 670-680.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Unrolled presentation of the RNNsbased neural language generator. The Encoder part is a BiLSTM, the Aligner is an attention mechanism over the encoded inputs, and the Decoder is the proposed RALSTM model conditioned on a 1-hot representation vector s. The fading color of the vector s indicates retaining information for future computational time steps." |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Performance comparison of the models trained on Laptop domain." |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Performance comparison of the general models on four different domains." |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Performance on Laptop domain with varied amount of the adaptation training data when adapting models trained on Restaurant+Hotel dataset." |
| }, |
| "FIGREF4": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "(a) An example from the Laptop domain. (b) An example from the TV domain." |
| }, |
| "FIGREF5": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Example showing how RALSTM drives down the DA feature value vector s step-by-step, in which the model generally shows its ability to detect words and phases describing a corresponding slot-value pair.information flow during generation." |
| }, |
| "TABREF0": { |
| "text": "Dataset statistics.", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td/><td colspan=\"3\">Restaurant Hotel Laptop</td><td>TV</td></tr><tr><td># train</td><td>3,114</td><td>3,223</td><td>7,944</td><td>4,221</td></tr><tr><td># validation</td><td>1,039</td><td>1,075</td><td>2,649</td><td>1,407</td></tr><tr><td># test</td><td>1,039</td><td>1,075</td><td>2,649</td><td>1,407</td></tr><tr><td># distinct DAs</td><td>248</td><td>164</td><td colspan=\"2\">13,242 7,035</td></tr><tr><td># DA types</td><td>8</td><td>8</td><td>14</td><td>14</td></tr><tr><td># slots</td><td>12</td><td>12</td><td>19</td><td>15</td></tr></table>" |
| }, |
| "TABREF1": { |
| "text": "Performance comparison on four datasets in terms of the BLEU and the error rate ERR(%) scores. The results were produced by training each network on 5 random initialization and selected model with the highest validation BLEU score. ] denotes the Attention-based Encoder-Decoder model. The best and second best models highlighted in bold and italic face, respectively.", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Model</td><td>Restaurant BLEU ERR BLEU ERR BLEU ERR BLEU ERR Hotel Laptop TV</td></tr><tr><td>HLSTM</td><td>0.7466 0.74% 0.8504 2.67% 0.5134 1.10% 0.5250 2.50%</td></tr><tr><td colspan=\"2\">SCLSTM 0.7525 0.38% 0.8482 3.07% 0.5116 0.79% 0.5265 2.31%</td></tr><tr><td colspan=\"2\">Enc-Dec ] 0.7398 2.78% 0.8549 4.69% 0.5108 4.04% 0.5182 3.18%</td></tr><tr><td>w/o A ]</td><td>0.7651 0.99% 0.8940 1.82% 0.5219 1.64% 0.5296 2.40%</td></tr><tr><td>w/o R ]</td><td>0.7748 0.22% 0.8944 0.48% 0.5235 0.57% 0.5350 0.72%</td></tr><tr><td colspan=\"2\">RALSTM ] 0.7789 0.16% 0.8981 0.43% 0.5252 0.42% 0.5406 0.63%</td></tr></table>" |
| }, |
| "TABREF2": { |
| "text": "Performance comparison of the proposed models on four datasets in terms of the BLEU and the error rate ERR(%) scores. The results were averaged over 5 randomly initialized networks. bold denotes the best model.", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Model</td><td>Restaurant BLEU ERR BLEU ERR BLEU ERR BLEU ERR Hotel Laptop TV</td></tr><tr><td>w/o A</td><td>0.7619 2.26% 0.8913 1.85% 0.5180 1.81% 0.5270 2.10%</td></tr><tr><td>w/o R</td><td>0.7733 0.23% 0.8901 0.59% 0.5208 0.60% 0.5321 0.50%</td></tr><tr><td colspan=\"2\">RALSTM 0.7779 0.20% 0.8965 0.58% 0.5231 0.50% 0.5373 0.49%</td></tr></table>" |
| } |
| } |
| } |
| } |