| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:31:31.748718Z" |
| }, |
| "title": "A Template-guided Hybrid Pointer Network for Knowledge-based Task-oriented Dialogue Systems", |
| "authors": [ |
| { |
| "first": "Dingmin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Oxford", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "dingmin.wang@cs.ox.ac.uk" |
| }, |
| { |
| "first": "Ziyao", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Wanwei", |
| "middle": [], |
| "last": "He", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Zhong", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chinese Academy of Sciences", |
| "location": { |
| "addrLine": "China 4 ByteDance, 5 Amazon Web Services", |
| "settlement": "Shenzhen, Seattle", |
| "country": "China, USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Yunzhe", |
| "middle": [], |
| "last": "Tao", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "yunzhet@amazon.com" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chinese Academy of Sciences", |
| "location": { |
| "addrLine": "China 4 ByteDance, 5 Amazon Web Services", |
| "settlement": "Shenzhen, Seattle", |
| "country": "China, USA" |
| } |
| }, |
| "email": "min.yang@siat.ac.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Most existing neural network based taskoriented dialogue systems follow encoderdecoder paradigm, where the decoder purely depends on the source texts to generate a sequence of words, usually suffering from instability and poor readability. Inspired by the traditional template-based generation approaches, we propose a template-guided hybrid pointer network for the knowledgebased task-oriented dialogue system, which retrieves several potentially relevant answers from a pre-constructed domain-specific conversational repository as guidance answers, and incorporates the guidance answers into both the encoding and decoding processes. Specifically, we design a memory pointer network model with a gating mechanism to fully exploit the semantic correlation between the retrieved answers and the ground-truth response. We evaluate our model on four widely used task-oriented datasets, including one simulated and three manually created datasets. The experimental results demonstrate that the proposed model achieves significantly better performance than the state-of-the-art methods over different automatic evaluation metrics 1 .", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Most existing neural network based taskoriented dialogue systems follow encoderdecoder paradigm, where the decoder purely depends on the source texts to generate a sequence of words, usually suffering from instability and poor readability. Inspired by the traditional template-based generation approaches, we propose a template-guided hybrid pointer network for the knowledgebased task-oriented dialogue system, which retrieves several potentially relevant answers from a pre-constructed domain-specific conversational repository as guidance answers, and incorporates the guidance answers into both the encoding and decoding processes. Specifically, we design a memory pointer network model with a gating mechanism to fully exploit the semantic correlation between the retrieved answers and the ground-truth response. We evaluate our model on four widely used task-oriented datasets, including one simulated and three manually created datasets. The experimental results demonstrate that the proposed model achieves significantly better performance than the state-of-the-art methods over different automatic evaluation metrics 1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Task oriented dialogue systems have attracted increasing attention recently due to broad applications such as reserving restaurants and booking flights. Conventional task-oriented dialogue systems are mainly implemented by rule-based methods (Lemon et al., 2006; Wang and Lemon, 2013) , which rely heavily on the hand-crafted features, establishing significant barriers for adapting the dialogue systems to new domains. Motivated by the great success of deep learning in various NLP tasks, the neural network based methods (Bordes 1 https://github.com/wdimmy/THPN et al., 2017; Madotto et al., 2018) have dominated the study since these methods can be trained in an end-to-end manner and scaled to different domains.", |
| "cite_spans": [ |
| { |
| "start": 242, |
| "end": 262, |
| "text": "(Lemon et al., 2006;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 263, |
| "end": 284, |
| "text": "Wang and Lemon, 2013)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 523, |
| "end": 532, |
| "text": "(Bordes 1", |
| "ref_id": null |
| }, |
| { |
| "start": 578, |
| "end": 599, |
| "text": "Madotto et al., 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Despite the remarkable progress of previous studies, the performance of task-oriented dialogue systems is still far from satisfactory. On one hand, due to the exposure bias problem (Ranzato et al., 2016) , the neural network based models, e.g., the sequence to sequence models (seq2seq), tend to accumulate errors with increasing length of the generation. Concretely, the first several generated words can be reasonable, while the quality of the generated sequence deteriorates quickly once the decoder produces a \"bad\" word. On the other hand, as shown in previous works (Cao et al., 2018; Madotto et al., 2018) , the Seq2Seq models are likely to generate non-committal or similar responses that often involve high-frequency words or phrases. These responses are usually of low informativeness or readability. This may be because that arbitrarylength sequences can be generated, and it is not enough for the decoder to be purely based on the source input sentence to generate informative and fluent responses.", |
| "cite_spans": [ |
| { |
| "start": 181, |
| "end": 203, |
| "text": "(Ranzato et al., 2016)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 572, |
| "end": 590, |
| "text": "(Cao et al., 2018;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 591, |
| "end": 612, |
| "text": "Madotto et al., 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We demonstrate empirically that in task-oriented dialogue systems, the responses for the requests with similar types often follow the same sentence structure except that different named entities are used according to the specific dialogue context. Table 1 shows two conversations from real taskoriented dialogues about navigation and weather. From the navigation case, we can observe that although the two requests are for different destinations, the corresponding responses are similar in sentence structure, replacing \"children's health\" with \"5677 springer street\". For the weather example, it requires the model to first detect the entity \"carson\" and then query the corresponding information from the knowledge base (KB). After obtaining the returned KB entries, we generate the response by replacing the corresponding entities in the retrieved candidate answer. Therefore, we argue that the golden responses of the requests with similar types can provide a reference point to guide the response generation process and enable to generate high-quality responses for the given requests.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 248, |
| "end": 255, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we propose a template-guided hybrid pointer network (THPN to generate the response given a user-issued query, in which the domain specific knowledge base (KB) and potentially relevant answers are leveraged as extra input to enrich the input representations of the decoder. Here, knowledge base refers to the database to store the relevant and necessary information for supporting the model in accomplishing the given tasks. We follow previous works and use a triple (subject, relation, object) representation. For example, the triple (Starbucks, address, 792 Bedoin St) is an example in KB representing the information related to the Starbucks. Specifically, given a query, we first retrieve top-n answer candidates from a pre-constructed conversational repository with question-answer pairs using BERT (Devlin et al., 2018) . Then, we extend memory networks (Sukhbaatar et al., 2015) to incorporate the commonsense knowledge from KB to learn the knowledge-enhanced representations of the dialogue history. Finally, we introduce a gating mechanism to effectively utilize candidate answers and improve the decoding process. The main contributions of this paper can be summarized as follows:", |
| "cite_spans": [ |
| { |
| "start": 818, |
| "end": 839, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 874, |
| "end": 899, |
| "text": "(Sukhbaatar et al., 2015)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We propose a hybrid pointer network consisting of entity pointer network (EPN) and pattern pointer network (PPN) to generate informative and relevant responses. EPN copies entity words from dialogue history, and PPN extracts pattern words from retrieved answers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We introduce a gating mechanism to learn the semantic correlations between the userissued query and the retrieved candidate answers, which reduces the \"noise\" brought by the retrieved answers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We evaluate the effectiveness of our model on four benchmark task-oriented dialogue datasets from different domains. Experimental results demonstrate the superiority of our proposed model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Task-oriented dialogue systems are mainly studied via two different approaches: pipeline based and end-to-end. Pipeline based models (Williams and Young, 2007; Young et al., 2013) achieve good stability but need domain-specific knowledge and handcrafted labels. End-to-end methods have shown promising results recently and attracted more attention since they are easily adapted to a new domain. Neural network based dialogue systems can avoid the laborious feature engineering since the neural networks have great ability to learn the latent representations of the input text. However, as revealed by previous studies (Koehn and Knowles, 2017; Cao et al., 2018; He et al., 2019) , the performance of the sequence to sequence model deteriorates quickly with the increase of the length of generation. Therefore, how to improve the stability and readability of the neural network models has attracted increasing attention. proposed a copy augmented Seq2Seq model by copying relevant information directly from the KB information. Madotto et al. (2018) proposed a generative model by employing the multi-hop attention over memories with the idea of pointer network. Wu et al. (2019) proposes a global-tolocally pointer mechanism to effectively utilize the knowledge base information, which improves the quality of the generated response.", |
| "cite_spans": [ |
| { |
| "start": 147, |
| "end": 159, |
| "text": "Young, 2007;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 160, |
| "end": 179, |
| "text": "Young et al., 2013)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 618, |
| "end": 643, |
| "text": "(Koehn and Knowles, 2017;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 644, |
| "end": 661, |
| "text": "Cao et al., 2018;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 662, |
| "end": 678, |
| "text": "He et al., 2019)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1026, |
| "end": 1047, |
| "text": "Madotto et al. (2018)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1161, |
| "end": 1177, |
| "text": "Wu et al. (2019)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Previous proposed neural approaches have shown the importance of external knowledge in the sequence generation (Chen et al., 2017; Zhu et al., 2018; Yang et al., 2019; Ding et al., 2019) , especially in the task-oriented dialogue systems where an appropriate response usually requires correctly extracting knowledge from the domain-specific or commonsense knowledge base (Madotto et al., 2018; Zhu et al., 2018; Qin et al., 2019) . However, it is still under great exploration with regard with the inclusion of external knowledge into the model. Yan et al. (2016) ; Song et al. (2018) argue that retrieval and generative methods have their own demerits and merits, and they have achieved good performance in the chit-chat response generation by incorporating the retrieved results in the Seq2Seq based models. Zhu et al. (2018) proposed an adversarial training approach, which is enhanced by retrieving some related candidate answers in the neural response generation, and Ghazvininejad et al. (2018) also applies a similar method in the neural conversation model. In addition, in task-oriented dialogue tasks, the copy mechanism (Gulcehre et al., 2016) has also been widely utilized Madotto et al., 2018) , which shows the superiority of generation based methods with copy strategy.", |
| "cite_spans": [ |
| { |
| "start": 111, |
| "end": 130, |
| "text": "(Chen et al., 2017;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 131, |
| "end": 148, |
| "text": "Zhu et al., 2018;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 149, |
| "end": 167, |
| "text": "Yang et al., 2019;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 168, |
| "end": 186, |
| "text": "Ding et al., 2019)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 371, |
| "end": 393, |
| "text": "(Madotto et al., 2018;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 394, |
| "end": 411, |
| "text": "Zhu et al., 2018;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 412, |
| "end": 429, |
| "text": "Qin et al., 2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 546, |
| "end": 563, |
| "text": "Yan et al. (2016)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 566, |
| "end": 584, |
| "text": "Song et al. (2018)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 810, |
| "end": 827, |
| "text": "Zhu et al. (2018)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 973, |
| "end": 1000, |
| "text": "Ghazvininejad et al. (2018)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1130, |
| "end": 1153, |
| "text": "(Gulcehre et al., 2016)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1184, |
| "end": 1205, |
| "text": "Madotto et al., 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We build our model based on a seq2seq dialogue generation mode, and the overall architecture is exhibited in Figure 1 . Each module will be elaborated in the following subsections.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 109, |
| "end": 117, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "By checking if a word is in the given KB, we divide words into two types: entity words (EW) and non-entity words (NEW). Taking \"what is the temperature of carson on tuesday\" as an example, all words are NEW except for \"carson\" and \"tuesday\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We represent a multi-turn dialogue as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "D = {(u i , s i )} T i=1 ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where T is the number of turns in the dialogue, and u i and s i denote the utterances of the user and the system at the i th turn, respectively. KB information is represented", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "as KB = {k 1 , k 2 , \u2022 \u2022 \u2022 , k l },", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where k i is a tuple and l is the size of KB. Following Madotto et al. 2018, we concatenate the previous dialogue and KB as input. At first turn, input to the decoder is [u 1 ; KB], the concatenation of first user request and KB. For i > 1, previous history dialog information is included, namely, input is supposed to be", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "[u 1 , s 1 , \u2022 \u2022 \u2022 , u i ; KB].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We define words in the concatenated input as a sequence of tokens", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "W = {w 1 , w 2 , \u2022 \u2022 \u2022 , w n }, where w j \u2208 {u 1 , s 1 , \u2022 \u2022 \u2022 , u i , KB} , n is the number of tokens.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In this paper, we use the memory network (MemNN) proposed in Sukhbaatar et al. (2015) as the encoder module. The memories of MemNN are represented by a set of trainable embedding matrices", |
| "cite_spans": [ |
| { |
| "start": 61, |
| "end": 85, |
| "text": "Sukhbaatar et al. (2015)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "M = {M 1 , M 2 , \u2022 \u2022 \u2022 , M K },", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where K represents the number of hops and each M k maps the input into vectors. Different from Sukhbaatar et al. 2015; Madotto et al. 2018, we initialize each M k with the pre-trained embeddings 2 , whose weights are set to be trainable. At hop k, W is mapped to a set of memory vectors,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "{m k 1 , m k 2 , \u2022 \u2022 \u2022 , m k n },", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where the memory vectors m k i of dimension d from M k is computed by embedding each word in a continuous space, in the simplest case, using an embedding matrix A. A query vector q is used as a reading head, which will loop over K hops and compute the attention weights at hop k for each memory by taking the inner product followed by a softmax function,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "p k i = softmax q k T m k i (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where p k i is a soft memory selector that decides the memory relevance with respect to the query vector q. The model then gets the memory c k by the weighted sum over m k+1 ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "c k = i p k i m k+1 i (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In addition, the query vector is updated for the next hop by q k+1 = q k + c k . In total, we can achieve K hidden states encoded from MemNN, represented", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "as C = {c 1 , c 2 , \u2022 \u2022 \u2022 , c K }.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Masking NEW in the history dialogue We observe that the ratio of non-entity words in both the history dialogue and the expected response is extremely low. Therefore, to prevent the model from copying non-entity words from the history dialogue, we introduce an array R h 3 whose elements are zeros and ones, where 0 denotes NEW and 1 for EW. When w i is pointed to, and if i is the sentinel location or R h [i] = 0, then w i will not be copied. The overall structure of our model. During test time, given a user query q, we retrieve at most 3 similar questions to q using BERT from QA Paris repository, and the corresponding answers are used as our answer templates. The retrieved answers as well as the dialogue history and KB information are then utilized for the response generation. Especially, we utilize the gating mechanism to filter out noise from unrelated retrieval results. Finally, words are generated either from the vocabulary or directly copying from the multi-source information using a hybrid pointer network.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder Module", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For each dataset, we use the corresponding training data to pre-construct a question-answer repository. In particular, we treat each post-response (u i and s i ) in a dialogue as a pair of question-answer. To effectively retrieve potentially relevant answers, we adopt a sentence matching based approach, in which each sentence is represented as a dense vector, and the cosine similarity serves as the selection metrics. We have explored several unsupervised text matching methods, such as BM25 (Robertson et al., 2009) , Word2Vec (Mikolov et al., 2013b) , and BERT (Devlin et al., 2018) , and revealed that BERT could achieve the best performance. In addition, based on our preliminary experiments, we observed that the number of retrieved answer candidates have an impact on the model performance, so we define a threshold \u03b8 for controlling the number of retrieval answer candidates.", |
| "cite_spans": [ |
| { |
| "start": 495, |
| "end": 519, |
| "text": "(Robertson et al., 2009)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 531, |
| "end": 554, |
| "text": "(Mikolov et al., 2013b)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 566, |
| "end": 587, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrieval Module", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Specifically, for each question in the preconstructed database, we pre-compute the corresponding sentence embedding using BERT. Then, for each new user-issued query u q , we embed u q into u e q , and search in the pre-constructed database for the most similar requests based on cosine similarity. The corresponding answers are selected and serve as our answer candidates.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrieval Module", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Masking EW in the retrieved answers In real dialogue scenes, the reply's sentence structure might be similar but the involved entities are usually different. To prevent the model from copying these entities, we introduce another array R r similar to R h mentioned before. Finally, the retrieved candidate answers are encoded into lowdimension distributed representations, denoted as ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrieval Module", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "AN = {a 1 , a 2 , \u2022 \u2022 \u2022 , a m },", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrieval Module", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "h a = W 2 tanh m i=1 W 1 c K ; a i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrieval Module", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "(3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrieval Module", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We first apply Gated Recurrent Unit (GRU) (Chung et al., 2014) to obtain the hidden state h t ,", |
| "cite_spans": [ |
| { |
| "start": 42, |
| "end": 62, |
| "text": "(Chung et al., 2014)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h t = GRU \u03c6 emb (y t\u22121 ), h * t\u22121", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where \u03c6 emb (\u2022) is an embedding function that maps each token to a fixed-dimensional vector. At the first time step, we use the special symbol \"SOS\" as y 0 and the initial hidden state", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "h * 0 = h a . h * t\u22121", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "consists of three parts, namely, the last hidden state h t\u22121 , the attention over", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "C = {c 1 , c 2 , \u2022 \u2022 \u2022 , c K }", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "from the encoder module, denoted as H c , and H g , which is calculated by linearly transforming last state h t\u22121 and h a with a multi-layer perceptron network. We formulate H c and H g as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Attention over C = {c 1 , c 2 , \u2022 \u2022 \u2022 , c K } Since", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "MemNN consists of multiple hops, we believe that different hops are relatively independent and have their own semantic meanings over the history dialog. At different time steps, we need to use different semantic information to generate different tokens, so our aim is to get a context-aware representation. We can achieve it by applying attention mechanism to the hidden states achieved at different hops,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "H c = K i=1 \u03b1 i,t c i , \u03b1 i,t = e \u03b7(h t\u22121 ,c i ) K i=1 e \u03b7(h t\u22121 ,c i )", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where \u03b7 is the function that represents the correspondence for attention, usually approximated by a multi-layer neural network.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Template-guided gating mechanism As reported in Song et al. (2018) , the top-ranked retrieved reply is not always the one that best match the query, and multiple retrieved replies may provide different reference information to guide the response generation. However, using multiple retrieved replies may increase the probability of introducing \"noisy\" information, which adversely reduces the quality of the response generation. To tackle this issue, we add a gating mechanism to the hidden state of candidate answers, aiming at extracting valuable \"information\" at different time steps. Mathematically,", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 66, |
| "text": "Song et al. (2018)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "H g = sigmoid (h a h t\u22121 ) h a", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We use element-wise multiplication to model the interaction between candidate answers (h a ) and last hidden state of GRU. h * t\u22121 is obtained by concatenating h t\u22121 , H c , and H g .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder Module", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We use another MemNN with three hops for the response generation, where h t of GRU serves as the initial reading head, as shown in Figure 1 . The output of MemNN is denoted as O = {o 1 , o 2 , o 3 } and attention weights are", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 131, |
| "end": 139, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Hybrid pointer networks", |
| "sec_num": null |
| }, |
| { |
| "text": "P o = {p 1 o , p 2 o , p 3 o }.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hybrid pointer networks", |
| "sec_num": null |
| }, |
| { |
| "text": "Other than a candidate softmax P v used for generating a word from the vocabulary, we adopt the idea of Pointer Softmax in Gulcehre et al. (2016) , and introduce an Entity Pointer Networks (EPN) and a Pattern Pointer Networks (PPN), where EPN is trained to learn to copy entity words from dialogue history (or KB), and PPN is responsible for extracting pattern words from retrieved answers. For EPN, we use a location softmax P h , which is a pointer network where each of the output dimension corresponds to the location of a word in the context sequence. Likewise, we introduce a location softmax P r for PPN. P v is generated by concatenating the first hop attention read out and the current query vector,", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 145, |
| "text": "Gulcehre et al. (2016)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hybrid pointer networks", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P v = softmax(W v [o 1 ; h t ])", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Hybrid pointer networks", |
| "sec_num": null |
| }, |
| { |
| "text": "For P r and P h , we take the attention weights at the second MemNN hop and the third hop of the decoder, respectively: P r = p 2 o and P h = p 3 o . The output dimensions of P h and P v vary according to the length of the corresponding target sequence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hybrid pointer networks", |
| "sec_num": null |
| }, |
| { |
| "text": "With the three distributions, the key issue is how to decide which distribution should be chosen to generate a word w i for the current time step. Intuitively, entity words are relatively important, so we set the selection priority order as P r > P h > P v . Instead of using a gate function for selection (Gulcehre et al., 2016), we adopt the sentinel mechanism proposed in Madotto et al. (2018) . If the expected word is not appearing in the memories, then P h and P r are trained to produce a sentinel token 4 . When both P h and P r choose the sentinel token or the masked position, our model will generate the token from P v . Otherwise, it takes the memory content using P v or P r .", |
| "cite_spans": [ |
| { |
| "start": 375, |
| "end": 396, |
| "text": "Madotto et al. (2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hybrid pointer networks", |
| "sec_num": null |
| }, |
| { |
| "text": "We use four public multi-turn task-oriented dialog datasets to evaluate our model, including bAbI (Weston et al., 2015), In-Car Assistant (Eric and Manning, 2017) , DSTC2 (Henderson et al., 2014) and CamRest (Wen et al., 2016) . bAbI is automatically generated and the other three datasets are collected from real human dialogs.", |
| "cite_spans": [ |
| { |
| "start": 171, |
| "end": 195, |
| "text": "(Henderson et al., 2014)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 200, |
| "end": 226, |
| "text": "CamRest (Wen et al., 2016)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "bAbI We use tasks 1-5 from bAbI dialog corpus for restaurant reservation to verify the effectiveness of our model. For each task, there are 1000 dialogs for training, 1000 for development, and 1000 for testing. Tasks 1-2 verify dialog management to check if the model can track the dialog state implicitly. Tasks 3-4 verify if the model can leverage the KB tuples for the task-oriented dialog system. Tasks 5 combines Tasks 1-4 to produce full dialogs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In-Car Assistant This dataset consists of 3,031 multi-turn dialogs in three distinct domains: calendar sheduling, weather information retrieval, and point-of-interest navigation. This dataset has an average of 2.6 conversation turns and the KB information is complicated. Following the data processing in Madotto et al. (2018) , we obtain 2,425/302/304 dialogs for training/validation/testing respectively.", |
| "cite_spans": [ |
| { |
| "start": 305, |
| "end": 326, |
| "text": "Madotto et al. (2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The dialogs were extracted from the Dialogue State Tracking Challenge 2 for restaurant reservation. Following Bordes et al. 2017, we use merely the raw text of the dialogs and ignore the dialog state labels. In total, there are 1618 dialogs for training, 500 dialogs for validation, and 1117 dialogs for testing. Each dialog is composed of user and system utterances, and API calls to the domain-specific KB for the user's queries.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "DSTC2", |
| "sec_num": null |
| }, |
| { |
| "text": "CamRest This dataset consists of 676 human-tohuman dialogs in the restaurant reservation domain. This dataset has much more conversation turns with 5.1 turns on average. Following the data processing in Wen et al. 2017, we divide the dataset into training/validation/testing sets with 406/135/135 dialogs respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "DSTC2", |
| "sec_num": null |
| }, |
| { |
| "text": "We use the 300-dimensional word2vec vectors to initialize the word embeddings. The size of the GRU hidden units is set to 256. The recurrent weight parameters are initialized as orthogonal matrices. We initialize the other weight parameters with the normal distribution N (0, 0.01) and set the bias terms as zero. We train our model with Adam optimizer (Kingma and Ba, 2015) with an initial learning rate of 1e \u2212 4. By tuning the hyperparameters with the grid search over the validation sets, we find the other best settings in our model as follows. The number of hops for the memory network is set to 3, and gradients are clipped with a threshold of 10 to avoid explosion. In addition, we apply the dropout (Hinton et al., 2012) as a regularizer to the input and output of GRU, where the dropout rate is set to be 0.4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Detail", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We compare our model with several existing endto-end task-oriented dialogue systems 5 :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 Retrieval method: This approach directly uses the retrieved result as the answer of the given utterance. Specifically, we use BERT-Base as a feature extractor for the sentences, and we use the cosine distance of the features as our retrieve scores, and then select the one with the highest score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 Attn: Vanilla sequence-to-sequence model with attention (Luong et al., 2015 ).", |
| "cite_spans": [ |
| { |
| "start": 58, |
| "end": 77, |
| "text": "(Luong et al., 2015", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 MemNN: An extended Seq2Seq model where the recurrence read from a external memory multiple times before outputting the target word (Sukhbaatar et al., 2015 ).", |
| "cite_spans": [ |
| { |
| "start": 133, |
| "end": 157, |
| "text": "(Sukhbaatar et al., 2015", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 PtrUnk: An augmented sequence-tosequence model with attention based copy mechanism to copy unknown words during generation (Gulcehre et al., 2016 ).", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 147, |
| "text": "(Gulcehre et al., 2016", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 CASeq2Seq: This is a copy-augmented Seq2Seq model that learns attention weights to dialogue history with copy mechanism .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 Mem2Seq: A memory network based approach with multi-hop attention for attending over dialogue history and KB tuples (Madotto et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 118, |
| "end": 140, |
| "text": "(Madotto et al., 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 BossNet: A bag-of-sequences memory architecture is proposed for disentangling language model from KB incorporation in task-oriented dialogues (Raghu et al., 2019 ).", |
| "cite_spans": [ |
| { |
| "start": 144, |
| "end": 163, |
| "text": "(Raghu et al., 2019", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 WMM2Seq: This method adopts a working memory to interact with two separated memory networks for dialogue history and KB entities (Chen et al., 2019 ).", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 149, |
| "text": "(Chen et al., 2019", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 GLMP: This is an augmented memory based model with a global memory pointer and a local memory pointer to strengthen the model's copy ability (Wu et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 143, |
| "end": 160, |
| "text": "(Wu et al., 2019)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In bAbI dataset, we adopt a common metric perresponse accuracy (Bordes et al., 2017) to evaluate the model performance. Following previous works (Madotto et al., 2018) , for three real human dialog datasets, we employ bilingual evaluation understudy (BLEU) (Papineni et al., 2002) and Entity F1 scores to evaluate the model's ability to generate relevant entities from knowledge base and to capture the semantics of the user-initiated dialogue flow .", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 84, |
| "text": "(Bordes et al., 2017)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 145, |
| "end": 167, |
| "text": "(Madotto et al., 2018)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 257, |
| "end": 280, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation Metrics", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "BLEU We use BLEU to measure the n-gram (i.e., 4-gram) matching between the generated responses and the reference responses. The higher BLEU score indicates a better performance of the conversation system. Formally, we compute the 4-gram precision for the generated response Y as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation Metrics", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (Y,\u0176 ) = \u1ef8 min(\u03b7(\u1ef8 , Y ), \u03b7(\u1ef8 ,\u0176 )) \u1ef8 \u03b7(\u1ef8 , Y )", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Automatic Evaluation Metrics", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "where\u1ef8 traverses all candidate 4-grams, Y and Y are the ground-truth and predicted responses, \u03b7(\u1ef8 , Y ) indicates the number of 4-grams in Y . After achieving the precision, the BLEU score is then calculated as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation Metrics", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "BLEU = \u03bd(Y,\u0176 ) exp( 4 n=1 \u03b2 n log P (Y,\u0176 ))", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Automatic Evaluation Metrics", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "where \u03b2 n = 1/4 is a weight score. \u03bd(Y,\u0176 ) is a brevity penalty that penalizes short sentences. The higher BLEU score indicates better performance of the conversation system.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation Metrics", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We adopt the perresponse accuracy metric to evaluate the dialog system's capability of generating an exact, correct responses. A generated response is considered right only if each word of the system output matches the corresponding word in the gold response. The final per-response accuracy score is calculated as the percentage of responses that are exactly the same as the corresponding gold dialogues. Per-response accuracy is a strict evaluation measure, which may only be suitable for the simulated dialog datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Per-response Accuracy", |
| "sec_num": null |
| }, |
| { |
| "text": "Entity F1 Entity F1 metric is used measure the system's capability of generating relevant entities from the provided task-oriented knowledge base. Each utterance in the test set has a set of gold entities. An entity F1 is computed by micro-averaging over all the generated responses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Per-response Accuracy", |
| "sec_num": null |
| }, |
| { |
| "text": "bAbI The dataset is automatically generated based on some rules, thus many requests and their corresponding replies are quite similar in terms of the syntactic structure and the wording usage. According to the results shown in see that our model achieves the best per-response scores in all the five tasks. It is also believed that the retrieved results can contribute to guiding the response generation in this case, which can be inferred from the high threshold value (\u03b8 = 0.8).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation on Four Datasets", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In-Car Assistant Dataset As shown in Table 6 , our model achieves all best metrics (BLEU, Ent.F1, Sch.F1, Wea.F1 and Nav.F1) over other reported models. The possible reason is that the retrieved answers with high relevance to the gold answers provide valid sentence pattern information. By using this sentence pattern information, our model can better control the generation of responses. Additionally, our model improves the success rate of generation correct entities which appeared in the dialogue history. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 37, |
| "end": 44, |
| "text": "Table 6", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Automatic Evaluation on Four Datasets", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We also present the evaluation on DSTC2 and CamRest datasets in Table 8 and Table 9 , respectively. By comparing the results, we can notice that our model performs better than the compared methods. On the DSTC2, our model achieves the state-of-the-art performance in terms of both Entity F1 score and BLEU metrics, and has a comparable per-response accuracy with compared methods. On the CamRest, our model obtains the best Entity F1 score but has a drop in BLEU in comparison to Mem2Seq model.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 64, |
| "end": 71, |
| "text": "Table 8", |
| "ref_id": "TABREF11" |
| }, |
| { |
| "start": 76, |
| "end": 83, |
| "text": "Table 9", |
| "ref_id": "TABREF12" |
| } |
| ], |
| "eq_spans": [], |
| "section": "DSTC2 and CamRest Datasets", |
| "sec_num": null |
| }, |
| { |
| "text": "An ablation study typically refers to removing some components or parts of the model, and seeing how that affects performance. To measure the influence of the individual components, we evaluate the proposed THPN model with each of them removed separately, and then measure the degradation of the overall performance. Table 7 reports ablation study results of THPN on bAbI and DSTC2 datasets by removing retrieved answers (w/o IR), removing EPN and PPN in decoding (w/o Ptr), removing answer-guided gating mechanism (w/o Gate), respectively. For example, \"w/o Gate\" means we do not use the answer-guided gating mechanism while keeping other components intact.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 317, |
| "end": 324, |
| "text": "Table 7", |
| "ref_id": "TABREF10" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation Study", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "If the retrieved answer is not used, the performance reduces dramatically, which can be interpreted that without the guiding information from the retrieved answer, the decoder may deteriorate quickly once it produce a \"bad\" word since it solely relies on the input query.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ablation Study", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "If no copy mechanism is used, we can see that Entity F1 score is the lowest, which indicates that many entities are not generated since these entity words may not be included in the vocabulary. Therefore, the best way to generate some unseen words is to directly copy from the input query, which is consistent with the findings of previous work Madotto et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 345, |
| "end": 366, |
| "text": "Madotto et al., 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ablation Study", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "If the gate is excluded, we can see around 2% drop for DSTC2. A possible reason is that some useless retrieved answers introduce \"noise\" to the system, which deteriorates the response generation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ablation Study", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "To validate the effectiveness of the masking operation, we carry out a comparison experiment on In-Car Assistant, and present the results in Table 2 h & R \u2212 r has the lowest scores. By diving into the experimental results, we find that if we do not mask EW in the retrieved answers, the model copies many incorrect entities from the retrieved answers, which reduces the Entity F1 scores. If we do not mask NEW in the history dialogue, the percentage of NEW copied from the history dialogue is high, most of which are unrelated to the gold answer, thus bringing down the BLEU score.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 141, |
| "end": 148, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Masking Operation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "According to our preliminary experimental results, we observed that better retrieved candidate answers could further improve the overall model performance in response generation. Therefore, we also conduct experiments to evaluate the effectiveness of three popular text matching methods, including BM25 (Robertson et al., 2009) , word2vec (Mikolov et al., 2013a) and BERT (Devlin et al., 2018) . Here, BLEU is utilized as our evaluation criterion. From the experimental results shown in Table 4 , we can see that using BERT (Devlin et al., 2018) , a transformer-based pre-trained language model, achieves the highest BLEU scores. A possible reason is that the size of each training dataset is limited, the word co-occurrence based algorithms (e.g., BM25) may not capture the semantic information, thus result in poor retrieving performance.", |
| "cite_spans": [ |
| { |
| "start": 303, |
| "end": 327, |
| "text": "(Robertson et al., 2009)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 339, |
| "end": 362, |
| "text": "(Mikolov et al., 2013a)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 372, |
| "end": 393, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 524, |
| "end": 545, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 487, |
| "end": 494, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison of Different Retrieval Methods", |
| "sec_num": null |
| }, |
| { |
| "text": "One vs. Multiple Retrieved Answers Cosine similarity is not an absolute criterion and there is no guarantee that a candidate with higher cosine value will always provide more reference information to the response generation. Therefore, we conduct an experiment to investigate the effect of the number of retrieved answers. By setting different cosine threshold values \u03b8, we retrieve different numbers of answer candidates. In particular, if no answer candidate satisfies the given threshold, we choose one with the highest cosine value. To limit the number of retrieved answers, we only select the top-3 results if there are more than three answer candidates that have higher consine values than the given threshold \u03b8. Table 3 gives the experimental results of DSTC2 dataset under different threshold \u03b8 values. When \u03b8 is set to be 1.0, it is considered as a special case where only one answer is retrieved. We can observe that using multiple answer candidates obtains higher performance than only using one result. It is intuitive that the model will be misguided if the retrieved single answer has no relation to the given request, and using multiple candidate answers can ameliorate this issue.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 719, |
| "end": 726, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison of Different Retrieval Methods", |
| "sec_num": null |
| }, |
| { |
| "text": "Setting of \u03b8 Although using more retrieved answers might improve the chance of including the relevant information, it may also bring more \"noise\" and adversely affect the quality of retrieved answers. From Table 3 , we can see that with the reduced value of \u03b8, the average number of retrieved candidate answers increase, but the model performance does not improve accordingly. Experimental results on the other datasets demonstrate that the \u03b8 is not fixed and needs to be adjusted according to the experimental data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 206, |
| "end": 213, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison of Different Retrieval Methods", |
| "sec_num": null |
| }, |
| { |
| "text": "In task-oriented dialog systems, the words and sentence structures are relatively limited and fixed, thus it is intuitive that the retrieved results can provide valuable information in guiding the response generation. In this paper, we retrieve several potentially relevant answers from a pre-constructed domain-specific conversation repository as guidance answers, and incorporate the guidance answers into both the encoding and decoding processes. We copy the words from the previous context and the retrieved answers directly, and generate words from the vocabulary. Experimental results over four datasets have demonstrated the effectiveness of our model in generating informative responses. In the future, we plan to leverage the dialogue context information to retrieve candidate answers turn by turn in multi-turn scenarios.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://s3-us-west-1.amazonaws.com/ fasttext-vectors/wiki.en.vec.3 The length of R h equals to that of W .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We add a special symbol to the end of each sentence. For example, \"good morning\" is converted to \"good morning $$$\". Therefore, if the model predicts the location of \"$$$\", it means that the expected word is not appearing in the context sequence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Part of experimental results of baseline models are directly extracted from corresponding published papers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Learning End-to-end Goal-oriented Dialog", |
| "authors": [ |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Y-Lan", |
| "middle": [], |
| "last": "Boureau", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antoine Bordes, Y-Lan Boureau, and Jason Weston. 2017. Learning End-to-end Goal-oriented Dialog. International Conference on Learning Representa- tions.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Retrieve, Rerank and Rewrite: Soft Template Based Neural Summarization", |
| "authors": [ |
| { |
| "first": "Ziqiang", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenjie", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "152--161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ziqiang Cao, Wenjie Li, Sujian Li, and Furu Wei. 2018. Retrieve, Rerank and Rewrite: Soft Template Based Neural Summarization. In Proceedings of the 56th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), vol- ume 1, pages 152-161.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Neural natural language inference models enhanced with external knowledge", |
| "authors": [ |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1711.04289" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qian Chen, Xiaodan Zhu, Zhen-Hua Ling, Diana Inkpen, and Si Wei. 2017. Neural natural language inference models enhanced with external knowledge. arXiv preprint arXiv:1711.04289.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A working memory model for task-oriented dialog response generation", |
| "authors": [ |
| { |
| "first": "Xiuyi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiaming", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2687--2693", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiuyi Chen, Jiaming Xu, and Bo Xu. 2019. A work- ing memory model for task-oriented dialog response generation. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 2687-2693.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling", |
| "authors": [ |
| { |
| "first": "Junyoung", |
| "middle": [], |
| "last": "Chung", |
| "suffix": "" |
| }, |
| { |
| "first": "Caglar", |
| "middle": [], |
| "last": "Gulcehre", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.3555" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Junyoung Chung, Caglar Gulcehre, KyungHyun Cho, and Yoshua Bengio. 2014. Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling. arXiv preprint arXiv:1412.3555.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Bert: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "North American Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of Deep Bidirectional Transformers for Language Un- derstanding. North American Chapter of the Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Event representation learning enhanced with external commonsense knowledge", |
| "authors": [ |
| { |
| "first": "Xiao", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Kuo", |
| "middle": [], |
| "last": "Liao", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhongyang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Junwen", |
| "middle": [], |
| "last": "Duan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.05190" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiao Ding, Kuo Liao, Ting Liu, Zhongyang Li, and Junwen Duan. 2019. Event representation learning enhanced with external commonsense knowledge. arXiv preprint arXiv:1909.05190.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Key-value retrieval networks for task-oriented dialogue", |
| "authors": [ |
| { |
| "first": "Mihail", |
| "middle": [], |
| "last": "Eric", |
| "suffix": "" |
| }, |
| { |
| "first": "Lakshmi", |
| "middle": [], |
| "last": "Krishnan", |
| "suffix": "" |
| }, |
| { |
| "first": "Francois", |
| "middle": [], |
| "last": "Charette", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "37--49", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mihail Eric, Lakshmi Krishnan, Francois Charette, and Christopher D Manning. 2017. Key-value retrieval networks for task-oriented dialogue. In Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue, pages 37-49.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A Copy-augmented Sequence-to-sequence Architecture Gives Good Performance on Task-oriented Dialogue", |
| "authors": [ |
| { |
| "first": "Mihail", |
| "middle": [], |
| "last": "Eric", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "European Association of Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mihail Eric and Christopher D Manning. 2017. A Copy-augmented Sequence-to-sequence Architec- ture Gives Good Performance on Task-oriented Dia- logue. European Association of Computational Lin- guistics, page 468.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A Knowledge-grounded Neural Conversation Model", |
| "authors": [ |
| { |
| "first": "Marjan", |
| "middle": [], |
| "last": "Ghazvininejad", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yih", |
| "middle": [], |
| "last": "Wen-Tau", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marjan Ghazvininejad, Chris Brockett, Ming-Wei Chang, Bill Dolan, Jianfeng Gao, Wen-tau Yih, and Michel Galley. 2018. A Knowledge-grounded Neu- ral Conversation Model. In Thirty-Second AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Pointing the unknown words", |
| "authors": [ |
| { |
| "first": "Caglar", |
| "middle": [], |
| "last": "Gulcehre", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungjin", |
| "middle": [], |
| "last": "Ahn", |
| "suffix": "" |
| }, |
| { |
| "first": "Ramesh", |
| "middle": [], |
| "last": "Nallapati", |
| "suffix": "" |
| }, |
| { |
| "first": "Bowen", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "140--149", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Caglar Gulcehre, Sungjin Ahn, Ramesh Nallapati, Bowen Zhou, and Yoshua Bengio. 2016. Pointing the unknown words. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 140-149.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Quantifying exposure bias for neural language generation", |
| "authors": [ |
| { |
| "first": "Tianxing", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingzhao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1905.10617" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tianxing He, Jingzhao Zhang, Zhiming Zhou, and James Glass. 2019. Quantifying exposure bias for neural language generation. arXiv preprint arXiv:1905.10617.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "The Second Dialog State Tracking Challenge", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Henderson", |
| "suffix": "" |
| }, |
| { |
| "first": "Blaise", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [ |
| "D" |
| ], |
| "last": "Williams", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 15th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "263--272", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Henderson, Blaise Thomson, and Jason D Williams. 2014. The Second Dialog State Tracking Challenge. In Proceedings of the 15th Annual Meet- ing of the Special Interest Group on Discourse and Dialogue, pages 263-272.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Improving Neural Networks by Preventing Coadaptation of Feature Detectors", |
| "authors": [ |
| { |
| "first": "Nitish", |
| "middle": [], |
| "last": "Geoffrey E Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan R", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1207.0580" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Geoffrey E Hinton, Nitish Srivastava, Alex Krizhevsky, Ilya Sutskever, and Ruslan R Salakhutdinov. 2012. Improving Neural Networks by Preventing Co- adaptation of Feature Detectors. arXiv preprint arXiv:1207.0580.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "ADAM: A Method for Stochastic Optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 3rd International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Lei Ba. 2015. ADAM: A Method for Stochastic Optimization. In Proceed- ings of the 3rd International Conference on Learn- ing Representations.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Six Challenges for Neural Machine Translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Knowles", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1706.03872" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn and Rebecca Knowles. 2017. Six Challenges for Neural Machine Translation. arXiv preprint arXiv:1706.03872.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "An isu dialogue system exhibiting reinforcement learning of dialogue policies: generic slot-filling in the talk in-car system", |
| "authors": [ |
| { |
| "first": "Oliver", |
| "middle": [], |
| "last": "Lemon", |
| "suffix": "" |
| }, |
| { |
| "first": "Kallirroi", |
| "middle": [], |
| "last": "Georgila", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Henderson", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Stuttle", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Eleventh Conference of the European Chapter of the Association for Computational Linguistics: Posters & Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "119--122", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oliver Lemon, Kallirroi Georgila, James Henderson, and Matthew Stuttle. 2006. An isu dialogue system exhibiting reinforcement learning of dialogue poli- cies: generic slot-filling in the talk in-car system. In Proceedings of the Eleventh Conference of the Euro- pean Chapter of the Association for Computational Linguistics: Posters & Demonstrations, pages 119- 122. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Effective Approaches to Attention-based Neural Machine Translation", |
| "authors": [ |
| { |
| "first": "Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1412--1421", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thang Luong, Hieu Pham, and Christopher D Manning. 2015. Effective Approaches to Attention-based Neu- ral Machine Translation. Proceedings of the 2015 Conference on Empirical Methods in Natural Lan- guage Processing, pages 1412-1421.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Mem2seq: Effectively incorporating knowledge bases into end-to-end task-oriented dialog systems", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Chien-Sheng", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1468--1478", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrea Madotto, Chien-Sheng Wu, and Pascale Fung. 2018. Mem2seq: Effectively incorporating knowl- edge bases into end-to-end task-oriented dialog sys- tems. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 1468-1478.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1301.3781" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jef- frey Dean. 2013a. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013b. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in neural information processing systems, pages 3111-3119.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "BLEU: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th annual meeting on association for computational linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. BLEU: a method for automatic eval- uation of machine translation. In Proceedings of the 40th annual meeting on association for compu- tational linguistics, pages 311-318.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Entity-consistent end-to-end task-oriented dialogue system with kb retriever", |
| "authors": [ |
| { |
| "first": "Libo", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yijia", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Wanxiang", |
| "middle": [], |
| "last": "Che", |
| "suffix": "" |
| }, |
| { |
| "first": "Haoyang", |
| "middle": [], |
| "last": "Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yangming", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.06762" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Libo Qin, Yijia Liu, Wanxiang Che, Haoyang Wen, Yangming Li, and Ting Liu. 2019. Entity-consistent end-to-end task-oriented dialogue system with kb re- triever. arXiv preprint arXiv:1909.06762.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Disentangling language and knowledge in task-oriented dialogs", |
| "authors": [ |
| { |
| "first": "Dinesh", |
| "middle": [], |
| "last": "Raghu", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1239--1255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dinesh Raghu, Nikhil Gupta, et al. 2019. Disentan- gling language and knowledge in task-oriented di- alogs. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 1239-1255.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Sequence level training with recurrent neural networks. International Conference on Learning Representations", |
| "authors": [ |
| { |
| "first": "Aurelio", |
| "middle": [], |
| "last": "Marc", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Wojciech", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zaremba", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marc'Aurelio Ranzato, Sumit Chopra, Michael Auli, and Wojciech Zaremba. 2016. Sequence level train- ing with recurrent neural networks. International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "The probabilistic relevance framework: Bm25 and beyond", |
| "authors": [ |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Robertson", |
| "suffix": "" |
| }, |
| { |
| "first": "Hugo", |
| "middle": [], |
| "last": "Zaragoza", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Foundations and Trends R in Information Retrieval", |
| "volume": "3", |
| "issue": "4", |
| "pages": "333--389", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: Bm25 and be- yond. Foundations and Trends R in Information Re- trieval, 3(4):333-389.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "An Ensemble of Retrieval-Based and Generation-Based Human-Computer Conversation Systems", |
| "authors": [ |
| { |
| "first": "Yiping", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Cheng-Te", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian-Yun", |
| "middle": [], |
| "last": "Nie", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongyan", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yiping Song, Rui Yan, Cheng-Te Li, Jian-Yun Nie, Ming Zhang, and Dongyan Zhao. 2018. An En- semble of Retrieval-Based and Generation-Based Human-Computer Conversation Systems.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "End-to-end memory networks", |
| "authors": [ |
| { |
| "first": "Sainbayar", |
| "middle": [], |
| "last": "Sukhbaatar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Rob", |
| "middle": [], |
| "last": "Fergus", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "2440--2448", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sainbayar Sukhbaatar, Jason Weston, Rob Fergus, et al. 2015. End-to-end memory networks. In Advances in neural information processing systems, pages 2440-2448.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "A simple and generic belief tracking mechanism for the dialog state tracking challenge: On the believability of observed information", |
| "authors": [ |
| { |
| "first": "Zhuoran", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Oliver", |
| "middle": [], |
| "last": "Lemon", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the SIGDIAL 2013 Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "423--432", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhuoran Wang and Oliver Lemon. 2013. A simple and generic belief tracking mechanism for the dialog state tracking challenge: On the believability of ob- served information. In Proceedings of the SIGDIAL 2013 Conference, pages 423-432.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Conditional generation and snapshot learning in neural dialogue systems", |
| "authors": [ |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Tsung-Hsien Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Gasic", |
| "suffix": "" |
| }, |
| { |
| "first": "Lina M Rojas", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Pei-Hao", |
| "middle": [], |
| "last": "Barahona", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Ultes", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Vandyke", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2153--2162", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsung-Hsien Wen, Milica Gasic, Nikola Mrk\u0161i\u0107, Lina M Rojas Barahona, Pei-Hao Su, Stefan Ultes, David Vandyke, and Steve Young. 2016. Conditional gen- eration and snapshot learning in neural dialogue sys- tems. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2153-2162.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "A network-based end-to-end trainable task-oriented dialogue system", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Tsung-Hsien Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Vandyke", |
| "suffix": "" |
| }, |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Mrksic", |
| "suffix": "" |
| }, |
| { |
| "first": "Lina", |
| "middle": [ |
| "Maria" |
| ], |
| "last": "Gasic", |
| "suffix": "" |
| }, |
| { |
| "first": "Pei-Hao", |
| "middle": [], |
| "last": "Rojas-Barahona", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [ |
| "J" |
| ], |
| "last": "Ultes", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter", |
| "volume": "1", |
| "issue": "", |
| "pages": "438--449", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/e17-1042" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsung-Hsien Wen, David Vandyke, Nikola Mrksic, Milica Gasic, Lina Maria Rojas-Barahona, Pei-Hao Su, Stefan Ultes, and Steve J. Young. 2017. A network-based end-to-end trainable task-oriented di- alogue system. In Proceedings of the 15th Confer- ence of the European Chapter of the Association for Computational Linguistics, EACL 2017, Valen- cia, Spain, April 3-7, 2017, Volume 1: Long Papers, pages 438-449. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Towards ai-complete question answering: A set of prerequisite toy tasks", |
| "authors": [ |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "M" |
| ], |
| "last": "Rush", |
| "suffix": "" |
| }, |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Van Merri\u00ebnboer", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1502.05698" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason Weston, Antoine Bordes, Sumit Chopra, Alexan- der M Rush, Bart van Merri\u00ebnboer, Armand Joulin, and Tomas Mikolov. 2015. Towards ai-complete question answering: A set of prerequisite toy tasks. arXiv preprint arXiv:1502.05698.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Partially observable markov decision processes for spoken dialog systems", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Jason", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Computer Speech & Language", |
| "volume": "21", |
| "issue": "2", |
| "pages": "393--422", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason D Williams and Steve Young. 2007. Partially observable markov decision processes for spoken dialog systems. Computer Speech & Language, 21(2):393-422.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Global-to-local memory pointer networks for task-oriented dialogue", |
| "authors": [ |
| { |
| "first": "Chien-Sheng", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Caiming", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "7th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chien-Sheng Wu, Richard Socher, and Caiming Xiong. 2019. Global-to-local memory pointer networks for task-oriented dialogue. In 7th International Confer- ence on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Learning to Respond with Deep Neural Networks for Retrievalbased Human-computer Conversation System", |
| "authors": [ |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiping", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 39th International ACM SIGIR conference on Research and Development in Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "55--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rui Yan, Yiping Song, and Hua Wu. 2016. Learning to Respond with Deep Neural Networks for Retrieval- based Human-computer Conversation System. In Proceedings of the 39th International ACM SIGIR conference on Research and Development in Infor- mation Retrieval, pages 55-64.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Enhancing topic-to-essay generation with external commonsense knowledge", |
| "authors": [ |
| { |
| "first": "Pengcheng", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Fuli", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianyu", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2002--2012", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pengcheng Yang, Lei Li, Fuli Luo, Tianyu Liu, and Xu Sun. 2019. Enhancing topic-to-essay generation with external commonsense knowledge. In Proceed- ings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 2002-2012.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Pomdp-based statistical spoken dialog systems: A review", |
| "authors": [ |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| }, |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Ga\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Blaise", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [ |
| "D" |
| ], |
| "last": "Williams", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the IEEE", |
| "volume": "101", |
| "issue": "5", |
| "pages": "1160--1179", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steve Young, Milica Ga\u0161i\u0107, Blaise Thomson, and Ja- son D Williams. 2013. Pomdp-based statistical spo- ken dialog systems: A review. Proceedings of the IEEE, 101(5):1160-1179.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Syntaxenhanced self-attention-based semantic role labeling", |
| "authors": [ |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Luo", |
| "middle": [], |
| "last": "Si", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1910.11204" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yue Zhang, Rui Wang, and Luo Si. 2019. Syntax- enhanced self-attention-based semantic role label- ing. arXiv preprint arXiv:1910.11204.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Retrieval-Enhanced Adversarial Training for Neural Response Generation", |
| "authors": [ |
| { |
| "first": "Qingfu", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Cui", |
| "suffix": "" |
| }, |
| { |
| "first": "Weinan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Yining", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1809.04276" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qingfu Zhu, Lei Cui, Weinan Zhang, Furu Wei, Yining Chen, and Ting Liu. 2018. Retrieval-Enhanced Ad- versarial Training for Neural Response Generation. arXiv preprint arXiv:1809.04276.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Figure 1: The overall structure of our model. During test time, given a user query q, we retrieve at most 3 similar questions to q using BERT from QA Paris repository, and the corresponding answers are used as our answer templates. The retrieved answers as well as the dialogue history and KB information are then utilized for the response generation. Especially, we utilize the gating mechanism to filter out noise from unrelated retrieval results. Finally, words are generated either from the vocabulary or directly copying from the multi-source information using a hybrid pointer network." |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "where m is the total number of the words. Moreover, by an interaction between c K and AN = {a 1 , a 2 , \u2022 \u2022 \u2022 , a m }, we obtain a dense vector h a as the representation of the retrieved answers," |
| }, |
| "TABREF0": { |
| "num": null, |
| "content": "<table><tr><td/><td>Navigation</td><td/><td/></tr><tr><td>KB</td><td/><td>KB</td><td>carson: tuesday low of 20f carson: tuesday high of 40f</td></tr><tr><td>Gold</td><td>no problem, I will be navigating you to</td><td>Gold</td><td>the temperature in carson on tuesday will be</td></tr><tr><td/><td>5677 spring street right now</td><td/><td>low of 20f and high of 40f</td></tr></table>", |
| "type_str": "table", |
| "text": "Two example conversations from real dialogues about navigation and weather. Weather User please give me directions to 5677 spring street User what is the temperature of carson on tuesday Retrieve q1: direct me to stanford children's health Retrieve q1: the temperature of new york on wednesday a1: no problem, I will be navigating you to stanford children's health right now a1: the temperature in new york on wednesday will be low of 80f and high of 90f", |
| "html": null |
| }, |
| "TABREF1": { |
| "num": null, |
| "content": "<table><tr><td>, we can</td></tr></table>", |
| "type_str": "table", |
| "text": "", |
| "html": null |
| }, |
| "TABREF2": { |
| "num": null, |
| "content": "<table><tr><td colspan=\"3\">R + h & R + r means that we simultaneously mask NEW</td></tr><tr><td colspan=\"3\">and EW in the history dialogue and retrieved answers.</td></tr><tr><td>\u03b8</td><td colspan=\"2\"># of RA BLEU</td></tr><tr><td>0.3</td><td>2.48</td><td>56.1</td></tr><tr><td>0.4</td><td>2.16</td><td>56.2</td></tr><tr><td>0.5</td><td>1.90</td><td>59.8</td></tr><tr><td>0.6</td><td>1.75</td><td>56.6</td></tr><tr><td>1.0</td><td>1.00</td><td>56.5</td></tr></table>", |
| "type_str": "table", |
| "text": "Masking comparison experiment on In-Car Assistant. + means with masking and \u2212 denotes without.", |
| "html": null |
| }, |
| "TABREF3": { |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Experimental results in terms of BLEU on DSTC2 by using different \u03b8. # of RA denotes the average number of retrieved answers.", |
| "html": null |
| }, |
| "TABREF5": { |
| "num": null, |
| "content": "<table><tr><td colspan=\"10\">Task Retrieval Attn MemNN PtrUnk Mem2Seq BossNet GLMP WMM2Seq THPN</td></tr><tr><td>Task1</td><td>74.8</td><td>100</td><td>99.9</td><td>100</td><td>100</td><td>100</td><td>100</td><td>100</td><td>100</td></tr><tr><td>Task2</td><td>93.7</td><td>100</td><td>100</td><td>100</td><td>100</td><td>100</td><td>100</td><td>100</td><td>100</td></tr><tr><td>Task3</td><td>80.3</td><td>74.8</td><td>74.9</td><td>85.1</td><td>94.5</td><td>95.2</td><td>96.3</td><td>94.9</td><td>95.8</td></tr><tr><td>Task4</td><td>87.5</td><td>57.2</td><td>59.5</td><td>100</td><td>100</td><td>100</td><td>100</td><td>100</td><td>100</td></tr><tr><td>Task5</td><td>83.8</td><td>98.4</td><td>96.1</td><td>99.4</td><td>98.2</td><td>97.3</td><td>99.2</td><td>97.9</td><td>99.6</td></tr></table>", |
| "type_str": "table", |
| "text": "Comparison of different matching methods.", |
| "html": null |
| }, |
| "TABREF6": { |
| "num": null, |
| "content": "<table><tr><td>Method</td><td colspan=\"5\">BLEU Ent.F1 Sch.F1 Wea.F1 Nav.F1</td></tr><tr><td>Retrieval</td><td>15.3</td><td>20.1</td><td>24.9</td><td>26.3</td><td>9.4</td></tr><tr><td>Attn</td><td>9.3</td><td>19.9</td><td>23.4</td><td>25.6</td><td>10.8</td></tr><tr><td colspan=\"2\">CASeq2Seq 8.7</td><td>13.3</td><td>13.4</td><td>15.6</td><td>11.0</td></tr><tr><td>MemNN</td><td>8.3</td><td>22.7</td><td>26.9</td><td>26.7</td><td>14.9</td></tr><tr><td>PtrUnk</td><td>8.3</td><td>22.7</td><td>26.9</td><td>26.7</td><td>14.9</td></tr><tr><td>Mem2Seq</td><td>12.6</td><td>33.4</td><td>49.3</td><td>32.8</td><td>20.0</td></tr><tr><td>BossNet</td><td>8.3</td><td>35.9</td><td>50.2</td><td>34.5</td><td>21.6</td></tr><tr><td>THPN</td><td>12.8</td><td>37.8</td><td>50.0</td><td>37.9</td><td>27.5</td></tr></table>", |
| "type_str": "table", |
| "text": "Per-response scores on the five tasks of the bAbI dataset with \u03b8 = 0.8.", |
| "html": null |
| }, |
| "TABREF7": { |
| "num": null, |
| "content": "<table><tr><td>: Evaluation results on the In-Car Assistant</td></tr><tr><td>dataset with \u03b8 = 0.3.</td></tr></table>", |
| "type_str": "table", |
| "text": "", |
| "html": null |
| }, |
| "TABREF10": { |
| "num": null, |
| "content": "<table><tr><td>Method</td><td colspan=\"2\">Ent.F1 BLEU</td></tr><tr><td>Retrieval</td><td>21.1</td><td>47.1</td></tr><tr><td>Attn</td><td>67.1</td><td>56.6</td></tr><tr><td>KV Net</td><td>71.6</td><td>55.4</td></tr><tr><td>Mem2Seq</td><td>75.3</td><td>55.3</td></tr><tr><td>GLMP</td><td>67.4</td><td>58.1</td></tr><tr><td>THPN</td><td>76.8</td><td>59.8</td></tr></table>", |
| "type_str": "table", |
| "text": "Ablation test results of our THPN model on bAbI and DSTC2 datasets.", |
| "html": null |
| }, |
| "TABREF11": { |
| "num": null, |
| "content": "<table><tr><td>Method</td><td colspan=\"2\">Ent.F1 BLEU</td></tr><tr><td>Retrieval</td><td>7.9</td><td>21.2</td></tr><tr><td>Attn</td><td>21.4</td><td>5.9</td></tr><tr><td>PtrUnk</td><td>16.4</td><td>2.1</td></tr><tr><td>KV Net</td><td>9.1</td><td>4.3</td></tr><tr><td>Mem2Seq</td><td>27.7</td><td>12.6</td></tr><tr><td>THPN</td><td>30.9</td><td>12.9</td></tr></table>", |
| "type_str": "table", |
| "text": "Evaluation on DSTC2(\u03b8 = 0.5).", |
| "html": null |
| }, |
| "TABREF12": { |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Evaluation on CamRest(\u03b8 = 0.4).", |
| "html": null |
| } |
| } |
| } |
| } |