| { |
| "paper_id": "P19-1003", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:23:33.123728Z" |
| }, |
| "title": "Improving Multi-turn Dialogue Modelling with Utterance ReWriter", |
| "authors": [ |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Su", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Xiaoyu", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "MPI Informatics & Spoken Language Systems (LSV)", |
| "institution": "", |
| "location": {} |
| }, |
| "email": "xshen@mpi-inf.mpg.de" |
| }, |
| { |
| "first": "Rongzhi", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Chinese Academy of Science", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Alibaba Group", |
| "institution": "", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Pengwei", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Cheng", |
| "middle": [], |
| "last": "Niu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Recent research has made impressive progress in single-turn dialogue modelling. In the multi-turn setting, however, current models are still far from satisfactory. One major challenge is the frequently occurred coreference and information omission in our daily conversation, making it hard for machines to understand the real intention. In this paper, we propose rewriting the human utterance as a pre-process to help multi-turn dialgoue modelling. Each utterance is first rewritten to recover all coreferred and omitted information. The next processing steps are then performed based on the rewritten utterance. To properly train the utterance rewriter, we collect a new dataset with human annotations and introduce a Transformer-based utterance rewriting architecture using the pointer network. We show the proposed architecture achieves remarkably good performance on the utterance rewriting task. The trained utterance rewriter can be easily integrated into online chatbots and brings general improvement over different domains. 1", |
| "pdf_parse": { |
| "paper_id": "P19-1003", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Recent research has made impressive progress in single-turn dialogue modelling. In the multi-turn setting, however, current models are still far from satisfactory. One major challenge is the frequently occurred coreference and information omission in our daily conversation, making it hard for machines to understand the real intention. In this paper, we propose rewriting the human utterance as a pre-process to help multi-turn dialgoue modelling. Each utterance is first rewritten to recover all coreferred and omitted information. The next processing steps are then performed based on the rewritten utterance. To properly train the utterance rewriter, we collect a new dataset with human annotations and introduce a Transformer-based utterance rewriting architecture using the pointer network. We show the proposed architecture achieves remarkably good performance on the utterance rewriting task. The trained utterance rewriter can be easily integrated into online chatbots and brings general improvement over different domains. 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Dialogue systems have made dramatic progress in recent years, especially in single-turn chit-chat and FAQ matching (Shang et al., 2015; Ghazvininejad et al., 2018; Molino et al., 2018; . Nonethless, multi-turn dialogue modelling still remains extremely challenging (Vinyals and Le, 2015; Serban et al., 2016 Serban et al., , 2017 Shen et al., 2018a,b) . The challenge is multi-sided. One most important difficulty is the frequently occurred coreference and information omission in our daily conversations, especially in pro-drop languages like Chinese or Japanese. From our preliminary study of 2,000 Chinese multi-turn con- Table 1 : An example of multi-turn dialogue. Each utterance 3 is rewritten into Utterance 3 . Green means coreference and blue means omission.", |
| "cite_spans": [ |
| { |
| "start": 115, |
| "end": 135, |
| "text": "(Shang et al., 2015;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 136, |
| "end": 163, |
| "text": "Ghazvininejad et al., 2018;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 164, |
| "end": 184, |
| "text": "Molino et al., 2018;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 265, |
| "end": 287, |
| "text": "(Vinyals and Le, 2015;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 288, |
| "end": 307, |
| "text": "Serban et al., 2016", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 308, |
| "end": 329, |
| "text": "Serban et al., , 2017", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 330, |
| "end": 351, |
| "text": "Shen et al., 2018a,b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 625, |
| "end": 632, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "versations, different degrees of coreference and omission exist in more than 70% of the utterances. Capturing the hidden intention beneath them requires deeper understanding of the dialogue context, which is difficult for current neural networkbased systems. Table 1 shows two typical examples in multi-turn dialogues. \"\u4ed6\"(he) from Context 1 is a coreference to \"\u6885\u897f\"(Messi) and \"\u4e3a\u4ec0 \u4e48\"(Why) from Context 2 omits the further question of \"\u4e3a\u4ec0\u4e48\u6700\u559c\u6b22\u6cf0\u5766\u5c3c\u514b\"(Why do you like Tatanic most)?. Without expanding the coreference or omission to recover the full information, the chatbot has no idea how to continue the talk.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 259, |
| "end": 266, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To address this concern, we propose simplifying the multi-turn dialogue modelling into a singleturn problem by rewriting the current utterance. The utterance rewriter is expected to perform (1) coreference resolution and (2) information completion to recover all coreferred and omitted mentions. In the two examples from Table 1, each utterance 3 will be rewritten into utterance 3 . Afterwards, the system will generate a reply by only looking into the utterance 3 without considering the previous turns utterance 1 and 2. This simplification shortens the length of dialogue con-text while still maintaining necessary information needed to provide proper responses, which we believe will help ease the difficulty of multi-turn dialogue modelling. Compared with other methods like memory networks (Sukhbaatar et al., 2015) or explicit belief tracking (Mrk\u0161i\u0107 et al., 2017) , the trained utterance rewriter is model-agnostic and can be easily integrated into other black-box dialogue systems. It is also more memory-efficient because the dialogue history information is reflected in a single rewritten utterance.", |
| "cite_spans": [ |
| { |
| "start": 797, |
| "end": 822, |
| "text": "(Sukhbaatar et al., 2015)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 851, |
| "end": 872, |
| "text": "(Mrk\u0161i\u0107 et al., 2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To get supervised training data for the utterance rewriting, we construct a Chinese dialogue dataset containing 20k multi-turn dialogues. Each utterance is paired with corresponding manually annotated rewritings. We model this problem as an extractive generation problem using the Pointer Network . The rewritten utterance is generated by copying words from either the dialogue history or the current utterance based on the attention mechanism (Bahdanau et al., 2014) . Inspired by the recently proposed Transformer architecture (Vaswani et al., 2017) in machine translation which can capture better intra-sentence word dependencies, we modify the Transformer architecture to include the pointer network mechanism. The resulting model outperforms the recurrent neural network (RNN) and original Transformer models, achieving an F1 score of over 0.85 for both the coreference resolution and information completion. Furthermore, we integrate our trained utterance rewriter into two online chatbot platforms and find it leads to more accurate intention detection and improves the user engagement. In summary, our contributions are: 1. We collect a high-quality annotated dataset for coreference resolution and information completion in multi-turn dialogues, which might benefit future related research.", |
| "cite_spans": [ |
| { |
| "start": 444, |
| "end": 467, |
| "text": "(Bahdanau et al., 2014)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 529, |
| "end": 551, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2. We propose a highly effective Transformerbased utterance rewriter outperforming several strong baselines.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "3. The trained utterance rewriter, when integrated into two real-life online chatbots, is shown to bring significant improvement over the original system.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In the next section, we will first go over some related work. Afterwards, in Section 3 and 4, our collected dataset and proposed model are introduced. The experiment results and analysis are presented in Section 5. Finally, some conclusions are drawn in Section 6.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2 Related Work", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Sentence rewriting has been widely adopted in various NLP tasks. In machine translation, people have used it to refine the output generations from seq2seq models (Niehues et al., 2016; Junczys-Dowmunt and Grundkiewicz, 2017; Grangier and Auli, 2017; Gu et al., 2017) . In text summarization, reediting the retrieved candidates can provide more accurate and abstractive summaries (See et al., 2017; Chen and Bansal, 2018; Cao et al., 2018) . In dialogue modelling, Weston et al. (2018) applied it to rewrite outputs from a retrieval model, but they pay no attention to recovering the hidden information under the coreference and omission. Concurrent with our work, Rastogi et al. (2019) adopts a similar idea on English conversations to simplify the downstream SLU task by reformulating the original utterance. Rewriting the source input into some easy-to-process standard format has also gained significant improvements in information retrieval (Riezler and Liu, 2010) , semantic parsing (Chen et al., 2016) or question answering (Abujabal et al., 2018) , but most of them adopt a simple dictionary or template based rewriting strategy. For multi-turn dialogues, due to the complexity of human languages, designing suitable template-based rewriting rules would be timeconsuming.", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 184, |
| "text": "(Niehues et al., 2016;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 185, |
| "end": 224, |
| "text": "Junczys-Dowmunt and Grundkiewicz, 2017;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 225, |
| "end": 249, |
| "text": "Grangier and Auli, 2017;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 250, |
| "end": 266, |
| "text": "Gu et al., 2017)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 379, |
| "end": 397, |
| "text": "(See et al., 2017;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 398, |
| "end": 420, |
| "text": "Chen and Bansal, 2018;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 421, |
| "end": 438, |
| "text": "Cao et al., 2018)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 464, |
| "end": 484, |
| "text": "Weston et al. (2018)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 664, |
| "end": 685, |
| "text": "Rastogi et al. (2019)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 945, |
| "end": 968, |
| "text": "(Riezler and Liu, 2010)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 988, |
| "end": 1007, |
| "text": "(Chen et al., 2016)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1030, |
| "end": 1053, |
| "text": "(Abujabal et al., 2018)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Rewriting", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Coreference resolution aims to link an antecedent for each possible mention. Traditional approaches often adopt a pipeline structure which first identify all pronouns and entities then run clustering algorithms (Haghighi and Klein, 2009; Lee et al., 2011; Durrett and Klein, 2013; Bj\u00f6rkelund and Kuhn, 2014) . At both stages, they rely heavily on complicated, fine-grained features. Recently, several neural coreference resolution systems (Clark and Manning, 2016a,b) utilize distributed representations to reduce human labors. Lee et al. (2017) reported state-of-the-art results with an end-to-end neural coreference resolution system. However, it requires computing the scores for all possible spans, which is computationally inefficient on online dialogue systems. The recently proposed Transformer adopted the self-attention mechanism which could implicitly capture inter-word dependencies in an unsupervised way (Vaswani et al., 2017) . However, when multiple coreferences occur, it has problems properly distinguishing them. Our proposed architecture is built upon the Transformer architecture, but perform coreference resolution in a supervised setting to help deal with ambiguous mentions.", |
| "cite_spans": [ |
| { |
| "start": 211, |
| "end": 237, |
| "text": "(Haghighi and Klein, 2009;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 238, |
| "end": 255, |
| "text": "Lee et al., 2011;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 256, |
| "end": 280, |
| "text": "Durrett and Klein, 2013;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 281, |
| "end": 307, |
| "text": "Bj\u00f6rkelund and Kuhn, 2014)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 439, |
| "end": 467, |
| "text": "(Clark and Manning, 2016a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 528, |
| "end": 545, |
| "text": "Lee et al. (2017)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 917, |
| "end": 939, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreference Resolution", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "To get parallel training data for the sentence rewriting, we crawled 200k candidate multi-turn conversational data from several popular Chinese social media platforms for human annotators to work on. Sensitive information is filtered beforehand for later processing. Before starting the annotation, we randomly sample 2,000 conversational data and analyze how often coreference and omission occurs in multi-turn dialogues. Table 2 lists the statistics. As can be seen, only less than 30% utterances have neither coreference nor omission and quite a few utterances have both. This further validates the importance of addressing the these situations in multi-turn dialogues. In the annotation process, human annotators need to identify these two situations then rewrite the utterance to cover all hidden information. An example is shown in Table 1 . Annotators are required to provide the rewritten utterance 3 given the original conversation [utterance 1,2 and 3]. To ensure the annotation quality, 10% of the annotations from each annotator are daily examined by a project manager and feedbacks are provided. The annotation is considered valid only when the accuracy of examined results surpasses 95%. Apart from the accuracy examination, the project manage is also required to (1) select topics that are more likely to be talked about in daily conversations, (2) try to cover broader domains and (3) balance the proportion of different coreference and omission patterns. The whole annotation takes 4 months to finish. In the end, we get 40k high-quality parallel samples. Half of them are negative samples which do not need any rewriting. The other half are positive samples where rewriting is needed. Table 3 lists the statistics. The rewritten utterance contains 10.5 tokens in average, reducing the context length by 80%.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 423, |
| "end": 430, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 838, |
| "end": 845, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1703, |
| "end": 1710, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "3" |
| }, |
| { |
| "text": "40,000 Avg. length of original conversation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset size:", |
| "sec_num": null |
| }, |
| { |
| "text": "48.8 Avg. length of rewritten utterance:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset size:", |
| "sec_num": null |
| }, |
| { |
| "text": "10.5 ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset size:", |
| "sec_num": null |
| }, |
| { |
| "text": "We denote each training sample as (H, U n \u2192 R). H = {U 1 , U 2 , . . . , U n\u22121 } represents the dialogue history containing the first n \u2212 1 turn of utterances. U n is the nth turn of utterance, the one that needs to be rewritten. R is the rewritten utterance after recovering all corefernced and omitted information in U n . R could be identical to U n if no coreference or omission is detected (negative sample). Our goal is to learn a mapping function p(R|(H, U n )) that can automatically rewrite U n based on the history information H. The process is to first encode (H, U n ) into s sequence of vectors, then decode R using the pointer network.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model 4.1 Problem Formalization", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The next section will explain the steps in order.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model 4.1 Problem Formalization", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We unfold all tokens in (H, U n ) into (w 1 , w 2 , . . . , w m ). m is the number of tokens in the whole dialogue. An end-of-turn delimiter is inserted between each two turns. The unfolded sequence of tokens are then encoded with Transformer. We concatenate all tokens in (H, U n ) as the input, in hope that the Transformer can learn rudimentary coreference information within them by means of the self-attention mechanism. For each token w i , the input embedding is the sum of its word embedding, position embedding and turn embedding:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "I(w i ) = W E(w i ) + P E(w i ) + T E(w i )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The word embedding W E(w i ) and position embedding P E(w i ) are the same as in normal Transformer architectures (Vaswani et al., 2017) . We Figure 1 : Architecture of our proposed model. Green box is the Transformer encoder and pink box is the decoder. The decoder computes the probability \u03bb at each step to decide whether to copy from the context or utterance.", |
| "cite_spans": [ |
| { |
| "start": 114, |
| "end": 136, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 142, |
| "end": 150, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "add an additional turn embedding T E(w i ) to indicate which turn each token belongs to. Tokens from the same turn will share the same turn embedding. The input embeddings are then forwarded into L stacked encoders to get the final encoding representations. Each encoder contains a self-attention layer followed by a feedforward neural network.:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "E (0) = I(w 1 ), I(w 2 ), . . . , I(w m ) E (l) = FNN(MultiHead(E (l\u22121) , E (l\u22121) , E (l\u22121) ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "FNN is the feedforward neural network and MultiHead(Q, K, V) is a multi-head attention function taking a query matrix Q, a key matrix K, and a value matrix V as inputs. Each self-attention and feedforward component comes with a residual connection and layer-normalization step, which we refer to Vaswani et al. (2017) for more details. The final encodings are the output from the Lth encoder E (L) .", |
| "cite_spans": [ |
| { |
| "start": 296, |
| "end": 317, |
| "text": "Vaswani et al. (2017)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 394, |
| "end": 397, |
| "text": "(L)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The decoder also contains L layers, each layer is composed of three sub-layers. The first sub-layer is a multi-head self-attention:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "M l = MultiHead(D (l\u22121) , D (l\u22121) , D (l\u22121) ) D (0) = R.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The second sub-layer is encoderdecoder attention that integrates E (L) into the decoder. In our task, as H and U n serve different purposes, we use separate key-value matrix for tokens coming from the dialogue history H and those coming from U n . The encoded sequence E (L) obtained from the last section is split into E Un (encodings of tokens from U n ) then processed separately. The encoder-decoder vectors are computed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "C(H) l = MultiHead(M (l) , E (L) H , E (L) H ) C(U n ) l = MultiHead(M (l) , E (L) Un , E (L)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Un )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The third sub-layer is a position-wise fully connected feed-forward neural network:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "D (l) = FNN([C(H) l \u2022 C(U n ) l ])", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where \u2022 denotes vector concatenation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In the decoding process, we hope our model could learn whether to copy words from H or U n at different steps. Therefore, we impose a soft gating weight \u03bb to make the decision. The decoding probability is computed by combining the atten-tion distribution from the last decoding layer:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Output Distribution", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "p(R t =w|H, U n , R <t )=\u03bb i:(w i =w)\u2227(w i \u2208H) a t,i +(1\u2212\u03bb) j:(w j =w)\u2227(w j \u2208Un) a t,j a = Attention(M (L) , E (L) Un ) a = Attention(M (L) , E (L) H ) \u03bb = \u03c3 w d D L t + w H C(H) L t + w U C(U n ) L t", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Output Distribution", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "a and a are the attention distribution over tokens in H and U n respectively. w d , w H , and w U are parameters to be learned, \u03c3 is the sigmoid function to output a value between 0 and 1. The gating weight \u03bb works like a sentinel to inform the decoder whether to extract information from the dialogue history H or directly copy from U n . If U n contains neither coreference nor information omission. \u03bb would be always 1 to copy the original U n as the output. Otherwise \u03bb becomes 0 when a coreference or omission is detected. The attention mechanism is then responsible of finding the proper coreferred or omitted information from the dialogue history. The whole model is trained endto-end by maximizing p(R|H, U n ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Output Distribution", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We train our model to perform the utterance rewriting task on our collected dataset. In this section, we focus on answering the following two questions: (1) How accurately our proposed model can perform coreference resolution and information completion respectively and (2) How good the trained utterance rewriter is at helping off-theshelf dialogue systems provide more appropriate responses. To answer the first question, we compare our models with several strong baselines and test them by both automatic evaluation and human judgement. For the second question, we integrate our rewriting model to two online dialogue systems and analyze how it affects the humancomputer interactions. The following section will first introduce the compared models and basic settings, then report our evaluation results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "When choosing compared models, we are mainly curious to see (1) whether the self-attention based Transformer architecture is superior to other networks like LSTMs, (2) whether the pointer-based generator is better than pure generation-based models and (3) whether it is preferred to split the attention by a coefficient \u03bb as in our model. With these intentions, we implement the following four types of models for comparison:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Compared Models", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "1. (L/T)-Gen: Pure generation-based model. Words are generated from a fixed vocabulary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Compared Models", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "2. (L/T)-Ptr-Net: Pure pointer-based model as in . Words can only be copied from the input.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Compared Models", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "3. (L/T)-Ptr-Gen: Hybrid pointer+generation model as in See et al. (2017) . Words can be either copied from the input or generated from a fixed vocabulary.", |
| "cite_spans": [ |
| { |
| "start": 56, |
| "end": 73, |
| "text": "See et al. (2017)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Compared Models", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "4. (L/T)-Ptr-\u03bb: Our proposed model which split the attention by a coefficient \u03bb.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Compared Models", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "(L/T) denotes the encoder-decoder structure is the LSTM or Transformer. For the first three types of models, we unfold all tokens from the dialogue as the input. No difference is made between the dialogue history and the utterance to be rewritten.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Compared Models", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Transformer-based models We set the hidden size as 512. The attention has 8 individual heads and the encoder/decoder have 6 individual stacked layers. Models are optimized with the Adam optimizer. The initial learning rate is 0.0001 and batch size is 64. All hyperparameters are tuned base on the performance on the validation data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Settings", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We encode words with a single-layer bidirectional LSTM and decode with a uni-directional LSTM. We use 128-dimensional word embeddings and 256-dimensional hidden states for both the encoder and decoder. 2 The batch size is set as 128. Models are trained using Adagrad with learning rate 0.15 and initial accumulator value 0.1, same as in See et al. (2017) .", |
| "cite_spans": [ |
| { |
| "start": 202, |
| "end": 203, |
| "text": "2", |
| "ref_id": null |
| }, |
| { |
| "start": 337, |
| "end": 354, |
| "text": "See et al. (2017)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "LSTM-based Models", |
| "sec_num": null |
| }, |
| { |
| "text": "We built our vocabulary based on character-based segmentation for Chinese scripts. For non-Chinese characters, like frequently mentioned entity names \"Kobe\" and \"NBA\", we split them by space and keep all unique tokens which appear more than twice. characters and 816 other tokens), including the end-of-turn delimiter and a special UNK token for all unknown words. In the testing stage, all models decode words by beam search with beam size set to 4. Accuracy of Generation We first evaluate the accuracy of generation leveraging three metrics: BLEU, ROUGE, and the exact match score(EM) (the percentage of decoded sequences that exactly match the human references). For the EM score, we report separately on the positive and negative samples to see the difference. We report BLEU-1, 2, 4 scores and the F1 scores of ROUGE-1, 2, L.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "General Setup", |
| "sec_num": null |
| }, |
| { |
| "text": "The results are listed in Table 4 . We can have several observations in response to the three questions proposed in the beginning of Section 5.1:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 26, |
| "end": 33, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Quality of Sentence ReWriting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "1. Transformer-based models lead to signif-icant improvement compare with LSTMbased counterparts. This implies the selfattention mechanism is helpful in identifying coreferred and omitted information. More analysis on how it helps coreference resolution can be seen in the next section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quality of Sentence ReWriting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "2. The generation mode does not work well in our setting since all words can be retrieved from either H or U n . Pointer-based models outperform the more complex generationbased and hybrid ones.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quality of Sentence ReWriting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "3. Separately processing H and U n then combine their attention with a learned \u03bb performs better than treating the whole dialogue tokens as s single input, though the improvement is less significant compared with previous two mentions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quality of Sentence ReWriting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Overall our proposed model achieves remarkably good performance, with 55.84% of its generations exactly matches the human reference on the positive samples. For negative samples, our model properly copied the the original utterances in 98.14% of the cases. It suggests our model is already able to identify the utterances that do not need rewriting. Future work should work on improving the rewriting ability on positive samples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quality of Sentence ReWriting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Coreference Resolution Apart from the standard metrics for text generation, we specifically test the precision, recall and F1 score of coreference resolution on our task. A pronoun or a noun is considered as properly coreferred if the rewritten utterance contains the correct mention in the corresponding referent. The result is shown in Table 5 . To compare with current state-of-the- art models. We train the model from Lee et al. (2017) on our task and report the results on the first row. The result is quite consistent with the findings from the last section. Our final model outperforms the others by a large margin, reaching a precision score of 93% and recall score of 90%. It implies our model is already quite good at finding the proper coreference. Future challenges would be more about information completion. Figure 2 further provides an examples of how the Transformer can help implicitly learn the coreference resolution through the self-attention mechanism. The same example is also shown in Table 1 . The pronoun \"\u4ed6\"(he) in the utterance is properly aligned to the mention \"\u6885\u897f\"(Messi) in the dialogue history, also partially to \"\u7403\u5458\"(player) which is the occupation of him. The implicitly learned coreference relation should be part of the reason that Transformers outperform LSTM models on the coreference resolution task. Information Completion Similar as coreference resolution, we evaluate the quality of information completeness separately. One omitted information is considered as properly completed if the rewritten utterance recovers the omitted words.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 338, |
| "end": 345, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 822, |
| "end": 830, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 1008, |
| "end": 1015, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Quality of Sentence ReWriting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Since it inserts new words to the original utterance, we further conduct a human evaluation to measure the fluency of rewritten utterances. We randomly sample 600 samples from our positive test set. Three participants were asked to judge whether the rewritten utterance is a fluent sentence with the score 1(not fluent)-5(fluent). The fluency score for each model is averaged over all human evaluated scores. The results are shown in Table 7 . Basically the condition is similar as in Table 5 . T-Ptr-\u03bb achieves the best performance, with the F1 score of 0.86. The performance is slightly worse than coreference resolution since information omission is more implicit. Retrieving all hidden information is sometimes difficult even for humans. Moreover, the fluency of our model's generations is very good, only slightly worse than the human ref- ples of rewritten utterances are shown in Table 6 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 434, |
| "end": 441, |
| "text": "Table 7", |
| "ref_id": "TABREF9" |
| }, |
| { |
| "start": 485, |
| "end": 492, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 887, |
| "end": 894, |
| "text": "Table 6", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Quality of Sentence ReWriting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "In this section, we study how the proposed utterance rewriter can be integrated into off-the-shelf online chatbots to improve the quality of generated responses. We use our best model T-Ptr-\u03bb to rewrite each utterance based on the dialogue context. The rewritten utterance is then forwarded to the system for response generation. We apply on both a task-oriented and chitchat setting. The results are compared with the original system having no utterance rewriter.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integration Testing", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "Task-oriented Our task-oriented dialogue system contains an intention classifier built on Fast-Text (Bojanowski et al., 2017) and a set of templates that perform policy decision and slot-value filling sequentially. Intention detection is a most important component in task-oriented dialogues and its accuracy will affect all the following steps. We define 30 intention classes like weather, hotel booking and shopping. The training data contains 35,447 human annotations. With the combination of our rewriter, the intention classier is able to achieve a precision of 89.91%, outperforming the original system by over 9%. The improved intention classification further lead to better conversations. An example is shown in Table 8 , a multiturn conversation about the weather. The user first asks \"How is the weather in Beijing\", then follows with a further question about \"Then what clothes are suitable to wear\". The original system wrongly classified the user intention as shopping since this is a common conversational pattern in shopping. In contrast, our utterance rewriter is able to recover the omitted information \"under the weather in Beijing\". Based on the rewritten utterance, the classifier is able to correctly detect the intention and provide proper responses.", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 125, |
| "text": "(Bojanowski et al., 2017)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 720, |
| "end": 727, |
| "text": "Table 8", |
| "ref_id": "TABREF11" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Integration Testing", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "Chitchat Our social chatbot contains two separate engines for multi-turn and single-turn dialogues. Each engine is a hybrid retrieval and generation model. In real-life applications, a user query would be simultaneously distributed to these two engines. The returned candidate responses are then reranked to provide the final response. Generally the model is already able to provide rather high-quality responses under the single-turn condition, but under multi-turn conversations, the complex context dependency makes the generation difficult. We integrate our utterance rewriter into the single-turn engine and compare with the original model by conducting the online A/B test. Specifically, we randomly split the users into two groups. One talks with the original system and the other talks with the system integrated with the utterance rewriter. All users are unconscious of the details about our system. The whole test lasted one month. Table 9 shows the Conversation-turns Per Session (CPS), which is the average number of conversation-turns between the chatbot and the user in a session. The utterance rewriter increases the average CPS from 6.3 to 7.7, indicating the user is more engaged with the integrated model. Table 8 shows an example of how the utterance rewriter helps with the generation. After the rewriting, the model can better understand the dialogue is about the NBA team Warriors, but the original model feels confused and only provides a generic response.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 942, |
| "end": 949, |
| "text": "Table 9", |
| "ref_id": "TABREF12" |
| }, |
| { |
| "start": 1224, |
| "end": 1231, |
| "text": "Table 8", |
| "ref_id": "TABREF11" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Integration Testing", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "In this paper, we propose improving multi-turn dialogue modelling by imposing a separate utterance rewriter. The rewriter is trained to recover the coreferred and omitted information of user utterances. We collect a high-quality manually annotated dataset and designed a Transformer-pointer based architecture to train the utterance rewriter. The trained utterance rewriter performs remarkably well and, when integrated into two online chatbot applications, significantly improves the intention detection and user engagement. We hope the collected dataset and proposed model can benefit future related research.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We tried increasing the dimension but find it degrades the performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank all anonymous reviewers and the dialogue system team of Wechat AI for valuable comments. Xiaoyu Shen is supported by IMPRS-CS fellowship.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Never-ending learning for open-domain question answering over knowledge bases", |
| "authors": [ |
| { |
| "first": "Abdalghani", |
| "middle": [], |
| "last": "Abujabal", |
| "suffix": "" |
| }, |
| { |
| "first": "Rishiraj", |
| "middle": [], |
| "last": "Saha Roy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohamed", |
| "middle": [], |
| "last": "Yahya", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerhard", |
| "middle": [], |
| "last": "Weikum", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 World Wide Web Conference on World Wide Web", |
| "volume": "", |
| "issue": "", |
| "pages": "1053--1062", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abdalghani Abujabal, Rishiraj Saha Roy, Mohamed Yahya, and Gerhard Weikum. 2018. Never-ending learning for open-domain question answering over knowledge bases. In Proceedings of the 2018 World Wide Web Conference on World Wide Web, pages 1053-1062. International World Wide Web Confer- ences Steering Committee.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1409.0473" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2014. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Learning structured perceptrons for coreference resolution with latent antecedents and non-local features", |
| "authors": [ |
| { |
| "first": "Anders", |
| "middle": [], |
| "last": "Bj\u00f6rkelund", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Kuhn", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "47--57", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anders Bj\u00f6rkelund and Jonas Kuhn. 2014. Learn- ing structured perceptrons for coreference resolution with latent antecedents and non-local features. In Proceedings of the 52nd Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 47-57.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Enriching word vectors with subword information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "5", |
| "issue": "", |
| "pages": "135--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Retrieve, rerank and rewrite: Soft template based neural summarization", |
| "authors": [ |
| { |
| "first": "Ziqiang", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenjie", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "152--161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ziqiang Cao, Wenjie Li, Sujian Li, and Furu Wei. 2018. Retrieve, rerank and rewrite: Soft template based neural summarization. In Proceedings of the 56th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), vol- ume 1, pages 152-161.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Sentence rewriting for semantic parsing", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Le", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Xianpei", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "An", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Chen, Le Sun, Xianpei Han, and Bo An. 2016. Sentence rewriting for semantic parsing. CoRR, abs/1901.02998.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Driven answer generation for product-related questions in e-commerce", |
| "authors": [ |
| { |
| "first": "Shiqian", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenliang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Feng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Haiqing", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Twelfth ACM International Conference on Web Search and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "411--419", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shiqian Chen, Chenliang Li, Feng Ji, Wei Zhou, and Haiqing Chen. 2019. Driven answer generation for product-related questions in e-commerce. In Pro- ceedings of the Twelfth ACM International Confer- ence on Web Search and Data Mining, pages 411- 419. ACM.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Fast abstractive summarization with reinforce-selected sentence rewriting", |
| "authors": [ |
| { |
| "first": "Yen-Chun", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1805.11080" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yen-Chun Chen and Mohit Bansal. 2018. Fast abstrac- tive summarization with reinforce-selected sentence rewriting. arXiv preprint arXiv:1805.11080.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Deep reinforcement learning for mention-ranking coreference models", |
| "authors": [ |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1609.08667" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevin Clark and Christopher D Manning. 2016a. Deep reinforcement learning for mention-ranking corefer- ence models. arXiv preprint arXiv:1609.08667.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Improving coreference resolution by learning entitylevel distributed representations", |
| "authors": [ |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1606.01323" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevin Clark and Christopher D Manning. 2016b. Im- proving coreference resolution by learning entity- level distributed representations. arXiv preprint arXiv:1606.01323.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Easy victories and uphill battles in coreference resolution", |
| "authors": [ |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Durrett", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1971--1982", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Greg Durrett and Dan Klein. 2013. Easy victories and uphill battles in coreference resolution. In Proceed- ings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1971-1982.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A knowledge-grounded neural conversation model", |
| "authors": [ |
| { |
| "first": "Marjan", |
| "middle": [], |
| "last": "Ghazvininejad", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yih", |
| "middle": [], |
| "last": "Wen-Tau", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marjan Ghazvininejad, Chris Brockett, Ming-Wei Chang, Bill Dolan, Jianfeng Gao, Wen-tau Yih, and Michel Galley. 2018. A knowledge-grounded neural conversation model. In Thirty-Second AAAI Confer- ence on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Quickedit: Editing text & translations via simple delete actions", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1711.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Grangier and Michael Auli. 2017. Quickedit: Editing text & translations via simple delete actions. arXiv preprint arXiv:1711.04805.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Search engine guided nonparametric neural machine translation", |
| "authors": [ |
| { |
| "first": "Jiatao", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [ |
| "K" |
| ], |
| "last": "Victor", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1705.07267" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiatao Gu, Yong Wang, Kyunghyun Cho, and Vic- tor OK Li. 2017. Search engine guided non- parametric neural machine translation. arXiv preprint arXiv:1705.07267.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Simple coreference resolution with rich syntactic and semantic features", |
| "authors": [ |
| { |
| "first": "Aria", |
| "middle": [], |
| "last": "Haghighi", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "3", |
| "issue": "", |
| "pages": "1152--1161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aria Haghighi and Dan Klein. 2009. Simple corefer- ence resolution with rich syntactic and semantic fea- tures. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Process- ing: Volume 3-Volume 3, pages 1152-1161. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "An exploration of neural sequence-tosequence architectures for automatic post-editing", |
| "authors": [ |
| { |
| "first": "Marcin", |
| "middle": [], |
| "last": "Junczys", |
| "suffix": "" |
| }, |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Dowmunt", |
| "suffix": "" |
| }, |
| { |
| "first": "Roman", |
| "middle": [], |
| "last": "Grundkiewicz", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1706.04138" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcin Junczys-Dowmunt and Roman Grundkiewicz. 2017. An exploration of neural sequence-to- sequence architectures for automatic post-editing. arXiv preprint arXiv:1706.04138.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Stanford's multi-pass sieve coreference resolution system at the conll-2011 shared task", |
| "authors": [ |
| { |
| "first": "Heeyoung", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Peirsman", |
| "suffix": "" |
| }, |
| { |
| "first": "Angel", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathanael", |
| "middle": [], |
| "last": "Chambers", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the fifteenth conference on computational natural language learning: Shared task", |
| "volume": "", |
| "issue": "", |
| "pages": "28--34", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heeyoung Lee, Yves Peirsman, Angel Chang, Nathanael Chambers, Mihai Surdeanu, and Dan Ju- rafsky. 2011. Stanford's multi-pass sieve corefer- ence resolution system at the conll-2011 shared task. In Proceedings of the fifteenth conference on com- putational natural language learning: Shared task, pages 28-34. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "End-to-end neural coreference resolution", |
| "authors": [ |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luheng", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1707.07045" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenton Lee, Luheng He, Mike Lewis, and Luke Zettle- moyer. 2017. End-to-end neural coreference resolu- tion. arXiv preprint arXiv:1707.07045.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Cota: Improving the speed and accuracy of customer support through ranking and deep networks", |
| "authors": [ |
| { |
| "first": "Piero", |
| "middle": [], |
| "last": "Molino", |
| "suffix": "" |
| }, |
| { |
| "first": "Huaixiu", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Yi-Chia", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "586--595", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piero Molino, Huaixiu Zheng, and Yi-Chia Wang. 2018. Cota: Improving the speed and accuracy of customer support through ranking and deep net- works. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 586-595. ACM.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Neural belief tracker: Data-driven dialogue state tracking", |
| "authors": [ |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Diarmuid\u00f3", |
| "middle": [], |
| "last": "S\u00e9aghdha", |
| "suffix": "" |
| }, |
| { |
| "first": "Tsung-Hsien", |
| "middle": [], |
| "last": "Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Blaise", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1777--1788", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikola Mrk\u0161i\u0107, Diarmuid\u00d3 S\u00e9aghdha, Tsung-Hsien Wen, Blaise Thomson, and Steve Young. 2017. Neural belief tracker: Data-driven dialogue state tracking. In Proceedings of the 55th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1777-1788.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Pre-translation for neural machine translation", |
| "authors": [ |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Niehues", |
| "suffix": "" |
| }, |
| { |
| "first": "Eunah", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Thanh-Le", |
| "middle": [], |
| "last": "Ha", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Waibel", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1610.05243" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jan Niehues, Eunah Cho, Thanh-Le Ha, and Alex Waibel. 2016. Pre-translation for neural machine translation. arXiv preprint arXiv:1610.05243.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Scaling multi-domain dialogue state tracking via query reformulation", |
| "authors": [ |
| { |
| "first": "Pushpendre", |
| "middle": [], |
| "last": "Rastogi", |
| "suffix": "" |
| }, |
| { |
| "first": "Arpit", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Tongfei", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Lambert", |
| "middle": [], |
| "last": "Mathias", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pushpendre Rastogi, Arpit Gupta, Tongfei Chen, and Lambert Mathias. 2019. Scaling multi-domain dialogue state tracking via query reformulation. NAACL.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Query rewriting using monolingual statistical machine translation", |
| "authors": [ |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Riezler", |
| "suffix": "" |
| }, |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Computational Linguistics", |
| "volume": "36", |
| "issue": "3", |
| "pages": "569--582", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefan Riezler and Yi Liu. 2010. Query rewriting using monolingual statistical machine translation. Com- putational Linguistics, 36(3):569-582.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Get to the point: Summarization with pointer-generator networks", |
| "authors": [ |
| { |
| "first": "Abigail", |
| "middle": [], |
| "last": "See", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1704.04368" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abigail See, Peter J Liu, and Christopher D Man- ning. 2017. Get to the point: Summarization with pointer-generator networks. arXiv preprint arXiv:1704.04368.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Building end-to-end dialogue systems using generative hierarchical neural network models", |
| "authors": [ |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Iulian V Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Sordoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iulian V Serban, Alessandro Sordoni, Yoshua Bengio, Aaron Courville, and Joelle Pineau. 2016. Building end-to-end dialogue systems using generative hier- archical neural network models. AAAI.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "A hierarchical latent variable encoder-decoder model for generating dialogues", |
| "authors": [ |
| { |
| "first": "Iulian", |
| "middle": [], |
| "last": "Vlad Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Sordoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Lowe", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Charlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iulian Vlad Serban, Alessandro Sordoni, Ryan Lowe, Laurent Charlin, Joelle Pineau, Aaron Courville, and Yoshua Bengio. 2017. A hierarchical latent variable encoder-decoder model for generating di- alogues. AAAI.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Neural responding machine for short-text conversation", |
| "authors": [ |
| { |
| "first": "Lifeng", |
| "middle": [], |
| "last": "Shang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengdong", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1503.02364" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lifeng Shang, Zhengdong Lu, and Hang Li. 2015. Neural responding machine for short-text conversa- tion. arXiv preprint arXiv:1503.02364.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Nexus network: Connecting the preceding and the following in dialogue generation", |
| "authors": [ |
| { |
| "first": "Xiaoyu", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenjie", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Dietrich", |
| "middle": [], |
| "last": "Klakow", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "4316--4327", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaoyu Shen, Hui Su, Wenjie Li, and Dietrich Klakow. 2018a. Nexus network: Connecting the preceding and the following in dialogue generation. In Pro- ceedings of the 2018 Conference on Empirical Meth- ods in Natural Language Processing, pages 4316- 4327.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Improving variational encoder-decoders in dialogue generation", |
| "authors": [ |
| { |
| "first": "Xiaoyu", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuzi", |
| "middle": [], |
| "last": "Niu", |
| "suffix": "" |
| }, |
| { |
| "first": "Vera", |
| "middle": [], |
| "last": "Demberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaoyu Shen, Hui Su, Shuzi Niu, and Vera Demberg. 2018b. Improving variational encoder-decoders in dialogue generation. In Thirty-Second AAAI Con- ference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "End-to-end memory networks", |
| "authors": [ |
| { |
| "first": "Sainbayar", |
| "middle": [], |
| "last": "Sukhbaatar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Rob", |
| "middle": [], |
| "last": "Fergus", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "2440--2448", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sainbayar Sukhbaatar, Jason Weston, Rob Fergus, et al. 2015. End-to-end memory networks. In Advances in neural information processing systems, pages 2440-2448.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Pointer networks", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Meire", |
| "middle": [], |
| "last": "Fortunato", |
| "suffix": "" |
| }, |
| { |
| "first": "Navdeep", |
| "middle": [], |
| "last": "Jaitly", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "2692--2700", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, Meire Fortunato, and Navdeep Jaitly. 2015. Pointer networks. In Advances in Neural In- formation Processing Systems, pages 2692-2700.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "A neural conversational model", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1506.05869" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals and Quoc Le. 2015. A neural conversa- tional model. arXiv preprint arXiv:1506.05869.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Retrieve and refine: Improved sequence generation models for dialogue", |
| "authors": [ |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Dinan", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 EMNLP Workshop SCAI: The 2nd International Workshop on Search-Oriented Conversational AI", |
| "volume": "", |
| "issue": "", |
| "pages": "87--92", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason Weston, Emily Dinan, and Alexander Miller. 2018. Retrieve and refine: Improved sequence gen- eration models for dialogue. In Proceedings of the 2018 EMNLP Workshop SCAI: The 2nd Interna- tional Workshop on Search-Oriented Conversational AI, pages 87-92.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "codings of tokens from H) and E (L)", |
| "num": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Visualization of the self-attention weights in Transformer. \"\u4ed6\"(he) is properly aligned to \"\u6885 \u897f\"(Messi).", |
| "num": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "num": null, |
| "text": "", |
| "html": null, |
| "content": "<table/>" |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "num": null, |
| "text": "Statistics of dataset. Length is counted in the unit of Chinese characters.", |
| "html": null, |
| "content": "<table/>" |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "num": null, |
| "text": "BLEU, ROUGE (F 1 ), and EM scores on the test set. EM score is split into the results on the positive (left) and negative (right) test samples. The first half is LSTM-based models and the second half is Transformer-based. Bold denotes best results.", |
| "html": null, |
| "content": "<table/>" |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "num": null, |
| "text": "Precision, recall and F1 score of coreference resolution. First row is the current state-of-the-art coreference resolution model", |
| "html": null, |
| "content": "<table/>" |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "num": null, |
| "text": "Do you read Shakespeare U2: I especially like Romeo and Juliet U1: Do you play League of Legends U2: Yes. \u4ec0\u4e48 \u4e48 \u4e48\u65f6 \u65f6 \u65f6\u5019 \u5019 \u5019\u5f00 \u5f00 \u5f00\u59cb \u59cb \u59cb\u73a9 \u73a9 \u73a9\u82f1 \u82f1 \u82f1\u96c4 \u96c4 \u96c4\u8054 \u8054 \u8054\u76df \u76df \u76df\u7684 \u7684 \u7684 // When did you start to play League of Legends \u4f60\u559c \u559c \u559c\u6b22 \u6b22 \u6b22\u7f57 \u7f57 \u7f57\u5bc6 \u5bc6 \u5bc6\u6b27 \u6b27 \u6b27\u4e0e \u4e0e \u4e0e\u6731 \u6731 \u6731\u4e3d \u4e3d \u4e3d\u53f6 \u53f6 \u53f6\u54ea \u54ea \u54ea\u4e2a \u4e2a \u4e2a\u89d2 \u89d2 \u89d2\u8272 \u8272 \u8272 // Which character do you like Romeo and Juliet \u4ec0 \u4ec0 \u4ec0\u4e48 \u4e48 \u4e48\u65f6 \u65f6 \u65f6\u5019 \u5019 \u5019\u5f00 \u5f00 \u5f00\u59cb \u59cb \u59cb\u73a9 \u73a9 \u73a9\u82f1 \u82f1 \u82f1\u96c4 \u96c4 \u96c4\u8054 \u8054 \u8054\u76df \u76df \u76df\u7684 \u7684 \u7684 // When did you start to play League of Legends", |
| "html": null, |
| "content": "<table><tr><td>History</td><td>U1: \u4f60\u770b\u838e\u58eb\u6bd4\u4e9a\u5417 U2: \u7279\u522b\u559c\u6b22\u7f57\u5bc6\u6b27\u4e0e\u6731\u4e3d\u53f6</td><td>U1: \u4f60\u73a9\u82f1\u96c4\u8054\u76df\u5417 U2: \u662f\u7684</td></tr><tr><td colspan=\"2\">(Translation) U1: Utterance U3:\u559c\u6b22\u54ea\u4e2a\u89d2\u8272</td><td>U3: \u4ec0\u4e48\u65f6\u5019\u5f00\u59cb\u7684</td></tr><tr><td/><td>U3: Which character do you like</td><td>U3: When did it start</td></tr><tr><td colspan=\"2\">Ground Truth \u4f60\u559c\u6b22\u7f57\u5bc6\u6b27\u4e0e\u6731\u4e3d\u53f6\u54ea\u4e2a\u89d2\u8272</td><td>\u4ec0\u4e48\u65f6\u5019\u5f00\u59cb\u73a9\u82f1\u96c4\u8054\u76df\u7684</td></tr><tr><td/><td>Which character do you like in Romeo and Juliet</td><td>When did you start to play League of Legends</td></tr><tr><td>L-Gen</td><td>\u4f60\u559c\u6b22\u838e\u58eb\u6bd4\u4e9a\u5417 // Do you like Shakespeare</td><td>\u4ec0\u4e48\u65f6\u5019\u5f00\u59cb\u5f00\u59cb\u5f00\u59cb // When start start start</td></tr><tr><td>L-Ptr-Gen</td><td>\u4f60\u559c\u6b22\u7f57\u5bc6\u6b27\u89d2\u8272\u89d2\u8272 // You like Romeo character character</td><td>\u4ec0\u4e48\u65f6\u5019\u5f00\u59cb\u7684 // When did it start</td></tr><tr><td>L-Ptr-Net</td><td>\u4f60\u559c\u6b22\u7f57\u5bc6\u6b27\u4e0e\u6731\u4e3d\u53f6 // You like Romeo and Juliet</td><td>\u4ec0\u4e48\u65f6\u5019\u82f1\u96c4\u8054\u76df\u5f00\u59cb\u7684 // When did League of Legends start</td></tr><tr><td colspan=\"3\">L-Ptr-\u03bb \u4ec0 \u4ec0 T-Gen \u4f60\u559c\u6b22\u7f57\u5bc6\u6b27\u4e0e\u6731\u4e3d\u53f6\u89d2\u8272 // You like Romeo and Juliet character \u4f60\u559c\u6b22\u7f57\u5bc6\u6b27\u4e0e\u6731\u4e3d\u53f6 // You like Romeo and Juliet \u662f\u7684\u4ec0\u4e48\u65f6\u5019\u5f00\u59cb\u73a9\u7684 // Yes When start to play</td></tr><tr><td>T-Ptr-Gen</td><td>\u4f60\u559c\u6b22\u7f57\u5bc6\u6b27\u4e0e\u6731\u4e3d\u53f6\u54ea\u4e2a // Which do you like in Romeo and Juliet</td><td>\u4ec0\u4e48\u65f6\u5019\u5f00\u59cb\u7684 // When did it start</td></tr><tr><td>T-Ptr-Net</td><td>\u4f60\u559c\u6b22\u7f57\u5bc6\u6b27\u4e0e\u6731\u4e3d\u53f6\u89d2\u8272 // Character you like Romeo and Juliet</td><td>\u82f1\u96c4\u8054\u76df\u4ec0\u4e48\u65f6\u5019\u5f00\u59cb\u73a9\u7684 // League of Legends When did you start to play</td></tr><tr><td>T-Ptr-\u03bb</td><td>\u4f60 \u4f60</td><td/></tr></table>" |
| }, |
| "TABREF7": { |
| "type_str": "table", |
| "num": null, |
| "text": "Examples of rewritten utterances. Highlighted utterances are exactly the same as the ground truth.", |
| "html": null, |
| "content": "<table/>" |
| }, |
| "TABREF9": { |
| "type_str": "table", |
| "num": null, |
| "text": "Recall, Precision, F1 score on information completion and Human evaluation results on fluency.", |
| "html": null, |
| "content": "<table/>" |
| }, |
| "TABREF10": { |
| "type_str": "table", |
| "num": null, |
| "text": "How is the weather in Beijing U2: The weather is fine and the temperature is suitable Utterance U3: \u90a3\u7a7f\u4ec0\u4e48\u8863\u670d\u5408\u9002 U3: \u5317\u4eac\u5929\u6c14\u7a7f\u4ec0\u4e48\u5408\u9002 U3: Then what clothes are suitable to wear U3: What clothes are suitable for weather in Beijing Intention \u751f\u6d3b\u8d2d\u7269 \u57ce\u5e02\u5929\u6c14", |
| "html": null, |
| "content": "<table><tr><td colspan=\"2\">Task-Oriented Chatbot</td><td/></tr><tr><td>Context</td><td colspan=\"2\">U1: \u5317\u4eac\u5929\u6c14\u600e\u4e48\u6837 U2: \u5929\u6c14\u6674\u6717\uff0c\u6e29\u5ea6\u9002\u5b9c</td></tr><tr><td>(Translation)</td><td>U1: Life Shopping</td><td>City Weather</td></tr><tr><td colspan=\"2\">Chatbot Answer \u60a8\u60f3\u8981\u8d2d\u4e70\u4ec0\u4e48\u7c7b\u578b\u7684\u8863\u670d</td><td>\u6839\u636e\u5929\u6c14\u63a8\u8350\u7a7f\u4e00\u4ef6\u5916\u5957</td></tr><tr><td/><td>What type of clothes do you want to buy</td><td>You'd better wear a coat according to the weather</td></tr><tr><td>Chit-Chat Chatbot</td><td/><td/></tr><tr><td>Context</td><td colspan=\"2\">U1: \u5e93\u91cc\u7684\u4e09\u5206\u771f\u51c6\u554a U2: \u52c7\u58eb\u4eca\u5e74\u53c8\u662f\u51a0\u519b</td></tr><tr><td/><td colspan=\"2\">U1: Curry's 3-pointer is really good U2: The Warriors are the champion again this year</td></tr><tr><td>Utterance</td><td>U3: \u6211\u4e5f\u89c9\u5f97</td><td>U3: \u6211\u4e5f\u89c9\u5f97\u52c7\u58eb\u4eca\u5e74\u53c8\u662f\u51a0\u519b</td></tr><tr><td/><td>U3: I agree</td><td>U3: I agree that the Warriors are the champion again this year</td></tr><tr><td colspan=\"2\">Chatbot Answer \u89c9\u5f97\u4ec0\u4e48</td><td>\u52c7\u58eb\u771f\u7684\u5389\u5bb3\u554a</td></tr><tr><td/><td>agree what</td><td>The Warriors are so strong</td></tr><tr><td/><td/><td>erence (4.90 vs 4.97). Information completeness</td></tr><tr><td/><td/><td>does not have much effects on the fluency. Exam-</td></tr></table>" |
| }, |
| "TABREF11": { |
| "type_str": "table", |
| "num": null, |
| "text": "Examples of integrated test. Left column is the original system and right is the one with utterance rewriter. Blue words denote completed information by the utterance rewriter.", |
| "html": null, |
| "content": "<table><tr><td>Model</td><td colspan=\"2\">Intention Precision CPS</td></tr><tr><td>Original</td><td>80.77</td><td>6.3</td></tr><tr><td>With Rewrite</td><td>89.91</td><td>7.7</td></tr></table>" |
| }, |
| "TABREF12": { |
| "type_str": "table", |
| "num": null, |
| "text": "Results of integrated testing. Intention precision for task-oriented and conversation-turns-persession (CPS) for chitchat.", |
| "html": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |