| { |
| "paper_id": "P19-1001", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:28:00.332758Z" |
| }, |
| "title": "One Time of Interaction May Not Be Enough: Go Deep with an Interaction-over-Interaction Network for Response Selection in Dialogues", |
| "authors": [ |
| { |
| "first": "Chongyang", |
| "middle": [], |
| "last": "Tao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Peking University", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "chongyangtao@pku.edu.cn" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Microsoft Corporation", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "wuwei@microsoft.com" |
| }, |
| { |
| "first": "Can", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Microsoft Corporation", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "caxu@microsoft.com" |
| }, |
| { |
| "first": "Wenpeng", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Peking University", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "wenpeng.hu@pku.edu.cn" |
| }, |
| { |
| "first": "Dongyan", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Peking University", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "zhaody@pku.edu.cn" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Peking University", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "ruiyan@pku.edu.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Currently, researchers have paid great attention to retrieval-based dialogues in opendomain. In particular, people study the problem by investigating context-response matching for multi-turn response selection based on publicly recognized benchmark data sets. State-of-the-art methods require a response to interact with each utterance in a context from the beginning, but the interaction is performed in a shallow way. In this work, we let utterance-response interaction go deep by proposing an interaction-over-interaction network (IoI). The model performs matching by stacking multiple interaction blocks in which residual information from one time of interaction initiates the interaction process again. Thus, matching information within an utterance-response pair is extracted from the interaction of the pair in an iterative fashion, and the information flows along the chain of the blocks via representations. Evaluation results on three benchmark data sets indicate that IoI can significantly outperform state-of-theart methods in terms of various matching metrics. Through further analysis, we also unveil how the depth of interaction affects the performance of IoI.", |
| "pdf_parse": { |
| "paper_id": "P19-1001", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Currently, researchers have paid great attention to retrieval-based dialogues in opendomain. In particular, people study the problem by investigating context-response matching for multi-turn response selection based on publicly recognized benchmark data sets. State-of-the-art methods require a response to interact with each utterance in a context from the beginning, but the interaction is performed in a shallow way. In this work, we let utterance-response interaction go deep by proposing an interaction-over-interaction network (IoI). The model performs matching by stacking multiple interaction blocks in which residual information from one time of interaction initiates the interaction process again. Thus, matching information within an utterance-response pair is extracted from the interaction of the pair in an iterative fashion, and the information flows along the chain of the blocks via representations. Evaluation results on three benchmark data sets indicate that IoI can significantly outperform state-of-theart methods in terms of various matching metrics. Through further analysis, we also unveil how the depth of interaction affects the performance of IoI.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Building a chitchat style dialogue systems in opendomain for human-machine conversations has attracted increasing attention in the conversational artificial intelligence (AI) community. Generally speaking, there are two approaches to implementing such a conversational system. The first approach leverages techniques of information retrieval (Lowe et al., 2015; , and selects a proper response from an index; while the second approach directly synthesizes a response with a natural lan-guage generation model estimated from a largescale conversation corpus (Serban et al., 2016; Li et al., 2017b) . In this work, we study the problem of multi-turn response selection for retrievalbased dialogue systems where the input is a conversation context consisting of a sequence of utterances. Compared with generation-based methods, retrieval-based methods are superior in terms of response fluency and diversity, and thus have been widely applied in commercial chatbots such as the social bot XiaoIce (Shum et al., 2018) from Microsoft, and the e-commerce assistant AliMe Assist from Alibaba Group (Li et al., 2017a) .", |
| "cite_spans": [ |
| { |
| "start": 342, |
| "end": 361, |
| "text": "(Lowe et al., 2015;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 557, |
| "end": 578, |
| "text": "(Serban et al., 2016;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 579, |
| "end": 596, |
| "text": "Li et al., 2017b)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 994, |
| "end": 1013, |
| "text": "(Shum et al., 2018)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 1091, |
| "end": 1109, |
| "text": "(Li et al., 2017a)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A key step in multi-turn response selection is to measure the matching degree between a conversation context and a response candidate. Stateof-the-art methods perform matching within a representationinteraction-aggregation framework where matching signals in each utteranceresponse pair are distilled from their interaction based on their representations, and then are aggregated as a matching score. Although utteranceresponse interaction has proven to be crucial to the performance of the matching models , it is executed in a rather shallow manner where matching between an utterance and a response candidate is determined only by one step of interaction on each type or each layer of representations. In this paper, we attempt to move from shallow interaction to deep interaction, and consider context-response matching with multiple steps of interaction where residual information from one time of interaction, which is generally ignored by existing methods, is leveraged for additional interactions. The underlying motivation is that if a model extracts some matching information from utterance-response pairs in one step of interaction, then by stacking multiple such steps, the model can gradually accumulate useful signals for matching and finally capture the semantic relationship between a context and a response candidate in a more comprehensive way.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We propose an interaction-over-interaction network (IoI) for context-response matching, through which we aim to investigate: (1) how to make interaction go deep in a matching model; and (2) if the depth of interaction really matters in terms of matching performance. A key component in IoI is an interaction block. Taking a pair of utteranceresponse as input, the block first lets the utterance and the response attend to themselves, and then measures interaction of the pair by an attentionbased interaction function. The results of the interaction are concatenated with the self-attention representations and then compressed to new representations of the utterance-response pair as the output of the block. Built on top of the interaction block, IoI initializes each utterance-response pair via pre-trained word embeddings, and then passes the initial representations through a chain of interaction blocks which conduct several rounds of representation-interaction-representation operations and let the utterance and the response interact with each other in an iterative way. Different blocks could distill different levels of matching information in an utterance-response pair. To sufficiently leverage the information, a matching score is first calculated in each block through aggregating matching vectors of all utterance-response pairs, and then the block-wise matching scores are combined as the final matching degree of the context and the response candidate.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We conduct experiments on three benchmark data sets: the Ubuntu Dialogue Corpus (Lowe et al., 2015) , the Douban Conversation Corpus , and the E-commerce Dialogue Corpus (Zhang et al., 2018b) . Evaluation results indicate that IoI can significantly outperform stateof-the-art methods with 7 interaction blocks over all metrics on all the three benchmarks. Compared with deep attention matching network (DAM), the best performing baseline on all the three data sets, IoI achieves 2.9% absolute improvement on R 10 @1 on the Ubuntu data, 2.3% absolute improvement on MAP on the Douban data, and 3.7% absolute improvement on R 10 @1 on the Ecommerce data. Through more quantitative analysis, we also show that depth indeed brings improvement to the performance of IoI, as IoI with 1 interaction block performs worse than DAM on the Douban data and the E-commerce data, and on the Ubuntu data, the gap on R 10 @1 between IoI and DAM is only 1.1%. Moreover, the improvement brought by depth mainly comes from short contexts.", |
| "cite_spans": [ |
| { |
| "start": 80, |
| "end": 99, |
| "text": "(Lowe et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 170, |
| "end": 191, |
| "text": "(Zhang et al., 2018b)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our contributions in this paper are three-folds: (1) proposal of a novel interaction-over-interaction network which enables deep-level matching with carefully designed interaction block chains; (2) empirical verification of the effectiveness of the model on three benchmarks; and (3) empirical study on the relationship between interaction depth and model performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Existing methods for building an open-domain dialogue system can be categorized into two groups. The first group learns response generation models under an encoder-decoder framework. On top of the basic sequence-to-sequence with attention architecture (Vinyals and Le, 2015; Shang et al., 2015; Tao et al., 2018) , various extensions have been made to tackle the \"safe response\" problem (Li et al., 2015; Mou et al., 2016; Zhao et al., 2017; Song et al., 2018) ; to generate responses with specific personas or emotions (Li et al., 2016a; ; and to pursue better optimization strategies (Li et al., 2017b (Li et al., , 2016b .", |
| "cite_spans": [ |
| { |
| "start": 252, |
| "end": 274, |
| "text": "(Vinyals and Le, 2015;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 275, |
| "end": 294, |
| "text": "Shang et al., 2015;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 295, |
| "end": 312, |
| "text": "Tao et al., 2018)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 387, |
| "end": 404, |
| "text": "(Li et al., 2015;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 405, |
| "end": 422, |
| "text": "Mou et al., 2016;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 423, |
| "end": 441, |
| "text": "Zhao et al., 2017;", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 442, |
| "end": 460, |
| "text": "Song et al., 2018)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 520, |
| "end": 538, |
| "text": "(Li et al., 2016a;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 586, |
| "end": 603, |
| "text": "(Li et al., 2017b", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 604, |
| "end": 623, |
| "text": "(Li et al., , 2016b", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The second group learns a matching model of a human input and a response candidate for response selection. Along this line, the focus of research starts from single-turn response selection by setting the human input as a single message (Wang et al., 2013; Hu et al., 2014; Wang et al., 2015) , and moves to context-response matching for multi-turn response selection recently. Representative methods include the dual LSTM model (Lowe et al., 2015) , the deep learning to respond architecture , the multi-view matching model (Zhou et al., 2016) , the sequential matching network , and the deep attention matching network . Besides model design, some attention is also paid to the learning problem of matching models (Wu et al., 2018a) . Our work belongs to the second group. The proposed interaction-over-interaction network is unique in that it performs matching by stacking multiple interaction blocks, and thus extends the shallow interaction in state-of-the-art methods to a deep ", |
| "cite_spans": [ |
| { |
| "start": 236, |
| "end": 255, |
| "text": "(Wang et al., 2013;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 256, |
| "end": 272, |
| "text": "Hu et al., 2014;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 273, |
| "end": 291, |
| "text": "Wang et al., 2015)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 428, |
| "end": 447, |
| "text": "(Lowe et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 524, |
| "end": 543, |
| "text": "(Zhou et al., 2016)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 715, |
| "end": 733, |
| "text": "(Wu et al., 2018a)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GRU", |
| "sec_num": null |
| }, |
| { |
| "text": "...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GRU", |
| "sec_num": null |
| }, |
| { |
| "text": "g(c,r)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block 1 Interaction Block 2 Interaction Block L GRU GRU", |
| "sec_num": null |
| }, |
| { |
| "text": ": Self-attention : Interaction Operation : Add Operation T1 1 v1 1 Tn 1 vn 1 T1 2 v1 2 Tn 2 vn 2 Tn L vn L T1 L v1 L", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block 1 Interaction Block 2 Interaction Block L GRU GRU", |
| "sec_num": null |
| }, |
| { |
| "text": "Figure 1: Architecture of interaction-over-interaction network.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block 1 Interaction Block 2 Interaction Block L GRU GRU", |
| "sec_num": null |
| }, |
| { |
| "text": "form. As far as we know, this is the first architecture that realizes deep interaction for multi-turn response selection. Encouraged by the big success of deep neural architectures such as Resnet (He et al., 2016) and inception (Szegedy et al., 2015) in computer vision, researchers have studied if they can achieve similar results with deep neural networks on NLP tasks. Although deep models have not yet brought breakthroughs to NLP as they do to computer vision, they have proven effective in a few tasks such as text classification (Conneau et al., 2017) , natural language inference (Kim et al., 2018; Tay et al., 2018) , and question answering (Tay et al., 2018; Kim et al., 2018) , etc. In this work, we attempt to improve the accuracy of multi-turn response selection in retrieval-based dialogue systems by increasing the depth of context-response interaction in matching. Through extensive studies on benchmarks, we show that depth can bring significant improvement to model performance on the task.", |
| "cite_spans": [ |
| { |
| "start": 196, |
| "end": 213, |
| "text": "(He et al., 2016)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 228, |
| "end": 250, |
| "text": "(Szegedy et al., 2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 536, |
| "end": 558, |
| "text": "(Conneau et al., 2017)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 588, |
| "end": 606, |
| "text": "(Kim et al., 2018;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 607, |
| "end": 624, |
| "text": "Tay et al., 2018)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 650, |
| "end": 668, |
| "text": "(Tay et al., 2018;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 669, |
| "end": 686, |
| "text": "Kim et al., 2018)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block 1 Interaction Block 2 Interaction Block L GRU GRU", |
| "sec_num": null |
| }, |
| { |
| "text": "Suppose that there is a conversation data set", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formalization", |
| "sec_num": "3" |
| }, |
| { |
| "text": "D = {(y i , c i , r i )} N i=1 . \u2200i \u2208 {1, . . . , N }, c i = {u i,1 , . . . , u i,l i } represents a conversation context", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formalization", |
| "sec_num": "3" |
| }, |
| { |
| "text": "with u i,k the k-th turn, r i is a response candidate, and y i \u2208 {0, 1} denotes a label with y i = 1 indicating r i a proper response for c i , otherwise y i = 0. The task is to learn a matching model g(\u2022, \u2022) from D, and thus for a new context-response pair (c, r), g(c, r) measures the matching degree between c and r.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formalization", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In the following sections, we will elaborate how to define g(\u2022, \u2022) to achieve deep interaction between c and r, and how to learn such a deep model from D.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formalization", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We define g(\u2022, \u2022) as an interaction-over-interaction network (IoI). Figure 1 illustrates the architecture of IoI. The model pairs each utterance in a context with a response candidate, and then aggregates matching information from all the pairs as a matching score of the context and the response candidate. For each pair, IoI starts from initial representations of the utterance and the response, and then feeds the pair to stacked interaction blocks. Each block represents the utterance and the response by letting them interact with each other based on the interactions before. Matching signals are first accumulated along the sequence of the utterances in each block, and then combined along the chain of blocks as the final matching score. Below we will describe details of components of IoI and how to learn the model with D.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 68, |
| "end": 76, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Interaction-over-Interaction Network", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Given an utterance u in a context c and a response candidate r, u and r are initialized as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Representations", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "E u = [e u,1 , \u2022 \u2022 \u2022 , e u,m ] and E r = [e r,1 , \u2022 \u2022 \u2022 , e r,n ] respec- tively. \u2200i \u2208 {1, .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Representations", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": ". . , m} and \u2200j \u2208 {1, . . . , n}, e u,i and e r,j are representations of the i-th word of u and the j-th word of r respectively which are obtained by pre-training Word2vec (Mikolov et al., 2013) on D. E u and E r are then processed by stacked interaction blocks that model different levels of interaction between u and r and generate matching signals.", |
| "cite_spans": [ |
| { |
| "start": 172, |
| "end": 194, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Representations", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The stacked interaction blocks share the same internal structure. In a nutshell, each block is composed of a self-attention module that captures long-term dependencies within an utterance and a response, an interaction module that models the interaction between the utterance and the response, and a compression module that condenses the results of the first two modules into representations of the utterance and the response as output of the block. The output is then utilized as the input of the next block.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Before diving to details of the block, we first generally describe an attention mechanism that lays a foundation for the self-attention module and the interaction module. Let Q \u2208 R nq\u00d7d and K \u2208 R n k \u00d7d be a query and a key respectively, where n q and n k denote numbers of words and d is the embedding size, then attention from Q to K is defined asQ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "= S(Q, K) \u2022 K,", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where S(\u2022, \u2022) is a function for attention weight calculation. Here, we exploit the symmetric function in (Huang et al., 2017b) as S(\u2022, \u2022) which is given by:", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 126, |
| "text": "(Huang et al., 2017b)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "S(Q, K) = softmax(f (QW)Df (KW) ). (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In Equation 2, f is a ReLU activation function, D is a diagonal matrix, and both D \u2208 R d\u00d7d and W \u2208 R d\u00d7d are parameters to estimate from training data. Intuitively, in Equation 1, each entry of K is weighted by an importance score defined by the similarity of an entry of Q and an entry of K. The entries of K are then linearly combined with the weights to form a new representation of Q. A residual connection (He et al., 2016 ) and a layer normalization (Ba et al., 2016) are then applied toQ asQ. After that,Q is fed to a feed forward network which is formulated as", |
| "cite_spans": [ |
| { |
| "start": 411, |
| "end": 427, |
| "text": "(He et al., 2016", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 456, |
| "end": 473, |
| "text": "(Ba et al., 2016)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "ReLU(QW 1 + b 1 )W 2 + b 2 ,", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where W {1,2} \u2208 R d\u00d7d and b {1,2} are parameters. The output of the attention mechanism is defined with the result of Equation (3) after another round of residual connection and layer normalization. For ease of presentation, we denote the entire attention mechanism as f AT T (Q, K). Let U k\u22121 and R k\u22121 be the input of the k-th block where U 0 = E u and R 0 = E r , then the self-attention module is defined a\u015d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "U k = f ATT (U k\u22121 , U k\u22121 ),", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "R k = f ATT (R k\u22121 , R k\u22121 ).", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The interaction module first lets U k\u22121 and R k\u22121 attend to each other by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "U k = f ATT (U k\u22121 , R k\u22121 ),", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "R k = f ATT (R k\u22121 , U k\u22121 ).", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Then U k\u22121 and R k\u22121 further interact with U k and R k respectively, which can be formulated as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "U k = U k\u22121 U k ,", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "R k = R k\u22121 R k ,", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where denotes element-wise multiplication. Finally, the compression module updates U k\u22121 and R k\u22121 to U k and R k as the output of the block.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Suppose that e k u,i and e k r,i are the i-th entries of U k and R k respectively, then e k u,i and e k r,i are calculated by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "e k u,i = ReLU(w p \uf8ee \uf8ef \uf8ef \uf8f0 e k\u22121 u,\u00ee e k u,i e k u,\u0129 e k u,i \uf8f9 \uf8fa \uf8fa \uf8fb + b p ) + e k\u22121 u,i , (10) e k r,i = ReLU(w p \uf8ee \uf8ef \uf8ef \uf8f0 e k\u22121 r,\u00ee e k r,i e k r,\u0129 e k r,i \uf8f9 \uf8fa \uf8fa \uf8fb + b p ) + e k\u22121 r,i ,", |
| "eq_num": "(11)" |
| } |
| ], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where w p \u2208 R 4d\u00d7d and b p are learnable projection weights and biases,\u00ea k {u,r},i , e k {u,r},i ,\u1ebd k {u,r},i , and e k\u22121 {u,r},i are the i-th entries of {\u00db,R} k , {U, R} k , {\u0168,R} k , and {U, R} k\u22121 , respectively. Inspired by , we also introduce direct connections from initial representations to all their corresponding subsequent blocks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Block", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Suppose that c = (u 1 , . . . , u l ) is a conversation context with u i the i-th utterance, then in the kth interaction block, we construct three similarity matrices by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "M k i,1 = U k\u22121 i \u2022 (R k\u22121 ) \u221a d , M k i,2 =\u00db k i \u2022 (R k ) \u221a d , M k i,3 = U k i \u2022 (R k ) \u221a d ,", |
| "eq_num": "(12)" |
| } |
| ], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where U k\u22121 i and R k\u22121 are the input of the k-th block,\u00db k i andR k are defined by Equations (4-5), and U k i and R k are calculated by Equations (6-7).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The three matrices are then concatenated into a 3-D matching tensor T k i \u2208 R m i \u00d7n\u00d73 which can be written as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "T k i = M k i,1 \u2295 M k i,2 \u2295 M k i,3 ,", |
| "eq_num": "(13)" |
| } |
| ], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where \u2295 denotes a concatenation operation, and m i and n refer to numbers of words in u i and r respectively. We exploit a convolutional neural network (Krizhevsky et al., 2012) to extract matching features from T k i . The output of the final feature maps are flattened and mapped to a d-dimensional matching vector v k i with a linear transformation.", |
| "cite_spans": [ |
| { |
| "start": 152, |
| "end": 177, |
| "text": "(Krizhevsky et al., 2012)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "(v k 1 , \u2022 \u2022 \u2022 , v k l )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "is then fed to a GRU (Chung et al., 2014) to capture temporal relationship among (u 1 , . . . , u l ). \u2200i \u2208 {1, . . . , l}, the i-th hidden state of the GRU model is given by", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 41, |
| "text": "(Chung et al., 2014)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h k i = GRU(v k i , h k i\u22121 ),", |
| "eq_num": "(14)" |
| } |
| ], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where h k 0 is randomly initialized. A matching score for context c and response candidate r in the k-th block is defined as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "g k (c, r) = \u03c3(h k l \u2022 w o + b o ),", |
| "eq_num": "(15)" |
| } |
| ], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where w o and b o are parameters, and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03c3(\u2022) is a sig- moid function. Finally, g(c, r) is defined by g(c, r) = L k=1 g k (c, r),", |
| "eq_num": "(16)" |
| } |
| ], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where L is the number of interaction blocks in IoI. Note that we define g(c, r) with all blocks rather than only with the last block. This is motivated by (1) only using the last block will make training of IoI difficult due to the gradient vanishing/exploding problem; and (2) different blocks may capture different levels of matching information in (c, r), and thus leveraging all of them could enhance matching accuracy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Aggregation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We consider two strategies to learn an IoI model from the training data D. The first strategy estimates the parameters of IoI (denoted as \u0398) by minimizing a global loss function that is formulated as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Methods", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2212 N i=1 y i log(g(c i , r i ))+(1\u2212y i ) log(1\u2212g(c i , r i )) .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Methods", |
| "sec_num": "5" |
| }, |
| { |
| "text": "(17) In the second strategy, we construct a local loss function for each block and minimize the summation of the local loss functions. By this means, each block can be directly supervised by the labels in D during learning. The learning objective is then defined as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Methods", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2212 L k=1 N i=1 y i log(g k (c i , r i )) + (1 \u2212 y i ) log(1 \u2212 g k (c i , r i )) .", |
| "eq_num": "(18)" |
| } |
| ], |
| "section": "Learning Methods", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We compare the two learning strategies through empirical studies, as will be reported in the next section. In both strategies, \u0398 are optimized using back-propagation with Adam algorithm (Kingma and Ba, 2015).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Methods", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We test the proposed IoI on three benchmark data sets for multi-turn response selection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The first data we use is the Ubuntu Dialogue Corpus (Lowe et al., 2015) which is a multi-turn English conversation data set constructed from chat logs of the Ubuntu forum. We use the version provided by Xu et al. (2017) . The data contains 1 million context-response pairs for training, and 0.5 million pairs for validation and test. In all the three sets, positive responses are human responses, while negative ones are randomly sampled. The ratio of the positive and the negative is 1:1 in the training set, and 1:9 in both the validation set and the test set. Following Lowe et al. (2015), we employ recall at position k in n candidates (R n @k) as evaluation metrics.", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 71, |
| "text": "(Lowe et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 203, |
| "end": 219, |
| "text": "Xu et al. (2017)", |
| "ref_id": "BIBREF37" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "The second data set is the Douban Conversation Corpus ) that consists of multiturn Chinese conversations collected from Douban group 1 . There are 1 million context-response pairs for training, 50 thousand pairs for validation, and 6, 670 pairs for testing. In the training set and the validation set, the last turn of each conversation is taken as a positive response and a negative response is randomly sampled. For each context in the test set, 10 response candidates are retrieved from an index and their appropriateness regarding to the context is annotated by human labelers. Following Wu et al. 2017, we employ R n @ks, mean average precision (MAP), mean reciprocal rank (MRR) and precision at position 1 (P@1) as evaluation metrics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Finally, we choose the E-commerce Dialogue Corpus (Zhang et al., 2018b) as an experimental data set. The data consists of multi-turn realworld conversations between customers and customer service staff in Taobao 2 , which is the largest e-commerce platform in China. It contains 1 million context-response pairs for training, and 10 thousand pairs for validation and test. Positive responses in this data are real human responses, and negative candidates are automatically constructed by ranking the response corpus based on conversation history augmented messages using Apache Lucene 3 . The ratio of the positive and the negative is 1:1 in training and validation, and 1:9 in test. Following (Zhang et al., 2018b) , we employ R 10 @1, R 10 @2, and R 10 @5 as evaluation metrics.", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 71, |
| "text": "(Zhang et al., 2018b)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 694, |
| "end": 715, |
| "text": "(Zhang et al., 2018b)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "We compare IoI with the following models:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "Single-turn Matching Models: these models, including RNN (Lowe et al., 2015) , CNN (Lowe et al., 2015) , LSTM (Lowe et al., 2015) , BiL-STM (Kadlec et al., 2015) , MV-LSTM (Wan et al., 2016) and Match-LSTM (Wang and Jiang, 2016) , perform context-response matching by concatenating all utterances in a context into a single long document and calculating a matching score between the document and a response candidate.", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 76, |
| "text": "(Lowe et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 83, |
| "end": 102, |
| "text": "(Lowe et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 110, |
| "end": 129, |
| "text": "(Lowe et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 140, |
| "end": 161, |
| "text": "(Kadlec et al., 2015)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 164, |
| "end": 190, |
| "text": "MV-LSTM (Wan et al., 2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 206, |
| "end": 228, |
| "text": "(Wang and Jiang, 2016)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "Multi-View (Zhou et al., 2016) : the model calculates matching degree between a context and a response candidate from both a word sequence view and an utterance sequence view.", |
| "cite_spans": [ |
| { |
| "start": 11, |
| "end": 30, |
| "text": "(Zhou et al., 2016)", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "DL2R : the model first reformulates the last utterance with previous turns in a context with different approaches. A response candidate and the reformulated message are then represented by a composition of RNN and CNN.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "Finally, a matching score is computed with the concatenation of the representations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "SMN : the model lets each utterance in a context interact with a response candidate at the beginning, and then transforms interaction matrices into a matching vector with CNN. The matching vectors are finally accumulated with an RNN as a matching score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "DUA (Zhang et al., 2018b) : the model considers the relationship among utterances within a context by exploiting deep utterance aggregation to form a fine-grained context representation. Each refined utterance then matches with a response candidate, and their matching degree is finally calculated through an aggregation on turns.", |
| "cite_spans": [ |
| { |
| "start": 4, |
| "end": 25, |
| "text": "(Zhang et al., 2018b)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "DAM : the model lets each utterance in a context interact with a response candidate at different levels of representations obtained by a stacked self-attention module and a cross-attention module.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "For the Ubuntu data and the Douban data, since results of all baselines under fine-tuning are available in , we directly copy the numbers from the paper. For the E-commerce data, Zhang et al. (2018b) report performance of all baselines except DAM. Thus, we copy all available numbers from the paper and implement DAM with the published code 4 . In order to conduct statistical tests, we also run the code of DAM on the Ubuntu data and the Douban data.", |
| "cite_spans": [ |
| { |
| "start": 179, |
| "end": 199, |
| "text": "Zhang et al. (2018b)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "In IoI, we set the size of word embedding as 200. For the CNN in matching aggregation, we set the window size of convolution and pooling kernels as (3, 3), and the strides as (1, 1) and (3, 3) respectively. The number of convolution kernels is 32 in the first layer and 16 in the second layer. The dimension of the hidden states of GRU is set as 200. Following Wu et al. 2017, we limit the length of a context to 10 turns and the length of an utterance (either from a context or from a response candidate) to 50 words. Truncation or zero-padding is applied to a context or a response candidate when necessary. We gradually increase the number of interaction blocks (i.e., L) in IoI, and finally set L = 7 in comparison with the baseline models. In optimization, we choose 0.2 as a dropout rate, and 50 as the size of mini-batches. The learning rate is initialized as 0.0005, and exponentially decayed", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "Ubuntu Corpus Douban Corpus R 2 @1 R 10 @1 R 10 @2 R 10 @5 MAP MRR P@1 R 10 @1 R 10 @2 R 10 @5 RNN (Lowe et al., 2015) 0.768 0.403 0.547 0.819 0.390 0.422 0.208 0.118 0.223 0.589 CNN (Lowe et al., 2015) 0.848 0.549 0.684 0.896 0.417 0.440 0.226 0.121 0.252 0.647 LSTM (Lowe et al., 2015) 0.901 0.638 0.784 0.949 0.485 0.527 0.320 0.187 0.343 0.720 BiLSTM (Kadlec et al., 2015) 0.895 0.630 0.780 0.944 0.479 0.514 0.313 0.184 0.330 0.716 DL2R 0 Table 1 : Evaluation results on the Ubuntu data and the Douban data. Numbers in bold mean that the improvement to the best performing baseline is statistically significant (t-test with p-value < 0.05).", |
| "cite_spans": [ |
| { |
| "start": 99, |
| "end": 118, |
| "text": "(Lowe et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 183, |
| "end": 202, |
| "text": "(Lowe et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 268, |
| "end": 287, |
| "text": "(Lowe et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 355, |
| "end": 376, |
| "text": "(Kadlec et al., 2015)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 444, |
| "end": 451, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Metrics", |
| "sec_num": null |
| }, |
| { |
| "text": "Models Metrics R 10 @1 R 10 @2 R 10 @5", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metrics", |
| "sec_num": null |
| }, |
| { |
| "text": "RNN (Lowe et al., 2015) 0.325 0.463 0.775 CNN (Lowe et al., 2015) 0.328 0.515 0.792 LSTM (Lowe et al., 2015) 0.365 0.536 0.828 BiLSTM (Kadlec et al., 2015) 0.355 0.525 0.825 DL2R 0.399 0.571 0.842 MV-LSTM (Wan et al., 2016) 0.412 0.591 0.857 Match-LSTM (Wang and Jiang, 2016) 0.410 0.590 0.858 Multi-View (Zhou et al., 2016) 0.421 0.601 0.861 SMN 0.453 0.654 0.886 DUA (Zhang et al., 2018b) 0.501 0.700 0.921 DAM 0 during training. Table 1 and Table 2 report evaluation results on the three data sets where IoI-global and IoI-local represent models learned with Objective (17) and Objective (18) respectively. We can see that both IoIlocal and IoI-global outperform the best performing baseline, and improvements from IoI-local on all metrics and from IoI-global on a few metrics are statistically significant (t-test with p-value < 0.05). IoI-local is consistently better than IoIglobal over all metrics on all the three data sets, demonstrating that directly supervising each block in learning can lead to a more optimal deep structure than optimizing the final matching model.", |
| "cite_spans": [ |
| { |
| "start": 4, |
| "end": 23, |
| "text": "(Lowe et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 46, |
| "end": 65, |
| "text": "(Lowe et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 89, |
| "end": 108, |
| "text": "(Lowe et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 134, |
| "end": 155, |
| "text": "(Kadlec et al., 2015)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 197, |
| "end": 223, |
| "text": "MV-LSTM (Wan et al., 2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 253, |
| "end": 275, |
| "text": "(Wang and Jiang, 2016)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 305, |
| "end": 324, |
| "text": "(Zhou et al., 2016)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 369, |
| "end": 390, |
| "text": "(Zhang et al., 2018b)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 432, |
| "end": 451, |
| "text": "Table 1 and Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Metrics", |
| "sec_num": null |
| }, |
| { |
| "text": "In this section, we make some further analysis with IoI-local to understand (1) how depth of in- teraction affects the performance of IoI;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussions", |
| "sec_num": "6.5" |
| }, |
| { |
| "text": "(2) how context length affects the performance of IoI; and (3) importance of different components of IoI with respect to matching accuracy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussions", |
| "sec_num": "6.5" |
| }, |
| { |
| "text": "Impact of interaction depth. Figure 2 illustrates how the performance of IoI changes with respect to the number of interaction blocks on test sets of the three data. From the chart, we observe a consistent trend over the three data sets: there is significant improvement during the first few blocks, and then the performance of the model becomes stable. The results indicate that depth of interaction indeed matters in terms of matching accuracy. With shallow interaction (L = 1), IoI performs worse than DAM on the Douban data and the E-commerce data. Only after the interaction goes deep (L \u2265 5), improvement from IoI Models Metrics Ubuntu data Douban data E-commerce data R 2 @1 R 10 @1 R 10 @2 MAP MRR P@1 R 10 @1 R 10 @2 R 10 @ to DAM on the two data becomes significant. On the Ubuntu data, improvement to DAM from the deep model (L = 7) is more than twice as much as that from the shallow model (L = 1). The performance of IoI becomes stable earlier on the Ubuntu data than it does on the other two data. This may stem from the different nature of test sets of the three data. The test set of the Ubuntu data is in large size and built by random sampling, while the test sets of the other two data are smaller and constructed through response retrieval.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 29, |
| "end": 37, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussions", |
| "sec_num": "6.5" |
| }, |
| { |
| "text": "Impact of context length. Context length is measured by (1) number of turns in a context and (2) average length of utterances in a context. Figure 3 shows how the performance of IoI varies across contexts with different lengths, where we bin test examples of the Ubuntu data into buckets and compare IoI (L = 7) with its shallow version (L = 1) and DAM. We find that (1) IoI, either in a deep form or in a shallow form, is good at dealing with contexts with long utterances, as the model achieves better performance on longer utterances;", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 140, |
| "end": 148, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussions", |
| "sec_num": "6.5" |
| }, |
| { |
| "text": "(2) overall, IoI performs well on contexts with more turns, although too many turns (e.g., \u2265 8) is still challenging; (3) a deep form of our model is always better than its shallow form, no matter how we measure context length, and the gap between the two forms is bigger on short contexts than it is on long contexts, indicating that depth mainly improves matching accuracy on short contexts; and (4) trends of DAM in both charts are consistent with those reported in , and on both short contexts and long contexts, IoI is superior to DAM.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussions", |
| "sec_num": "6.5" |
| }, |
| { |
| "text": "Ablation study. Finally, we examine how different components of IoI affects its performance. First, we remove e k\u22121 u,i (e k\u22121 r,i ),\u00ea k u,i (\u00ea k r,i ), e k u,i (e k r,i ), and\u1ebd k u,i (\u1ebd k r,i ) one by one from Equation (10) and Equation 11, and denote the models as IoI-E, IoI-\u00ca, IoI-E, and IoI-\u1ebc respectively. Then, we keep all representations in Equation 10and Equation (11), and remove M k i,1 , M k i,2 , and M k i,3 one by one from Equation (13). The models are named IoI-M 1 , IoI-M 2 , and IoI-M 3 respectively. Table 3 reports the ablation results 5 . We conclude that (1) all representations are useful in representing the information flow along the chain of interaction blocks and capturing the matching information between an utterance-response pair within the blocks, as removing any component gener-ally causes performance drop on all the three data sets; and (2) in terms of component importance, E > E > E >\u1ebc and M 2 > M 1 \u2248 M 3 , meaning that self-attention (i.e.,\u00ca) and cross-attention (i.e., E) are more important than others in information flow representation, and self-attention (i.e., those used for calculating M 2 ) convey more matching signals. Note that these results are obtained with IoI (L = 7). We also check the ablation results of IoI (L = 1) and do not see much difference on overall trends and relative gaps among different ablated models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 520, |
| "end": 527, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussions", |
| "sec_num": "6.5" |
| }, |
| { |
| "text": "We present an interaction-over-interaction network (IoI) that lets utterance-response interaction in context-response matching go deep. Depth of the model comes from stacking multiple interaction blocks that execute representationinteraction-representation in an iterative manner. Evaluation results on three benchmarks indicate that IoI can significantly outperform baseline methods with moderate depth. In the future, we plan to integrate our IoI model with models like ELMo (Peters et al., 2018) and BERT (Devlin et al., 2018) to study if the performance of IoI can be further improved.", |
| "cite_spans": [ |
| { |
| "start": 508, |
| "end": 529, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "https://www.douban.com/group", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://www.taobao.com 3 http://lucene.apache.org/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/baidu/Dialogue", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Due to space limitation, we only report results on main metrics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the anonymous reviewers for their constructive comments. This work was supported by the National Key Research and Development Program of China (No. 2017YFC0804001), the National Science Foundation of China (NSFC Nos. 61672058 and 61876196).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgement", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Empirical evaluation of gated recurrent neural networks on sequence modeling", |
| "authors": [ |
| { |
| "first": "Junyoung", |
| "middle": [], |
| "last": "Chung", |
| "suffix": "" |
| }, |
| { |
| "first": "Caglar", |
| "middle": [], |
| "last": "Gulcehre", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.3555" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Junyoung Chung, Caglar Gulcehre, KyungHyun Cho, and Yoshua Bengio. 2014. Empirical evaluation of gated recurrent neural networks on sequence model- ing. arXiv preprint arXiv:1412.3555.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Very deep convolutional networks for text classification", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Lecun", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1107--1116", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Holger Schwenk, Lo\u00efc Barrault, and Yann Lecun. 2017. Very deep convolutional net- works for text classification. In Proceedings of the 15th Conference of the European Chapter of the As- sociation for Computational Linguistics: Volume 1, Long Papers, pages 1107-1116.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: Pre-training of deep bidirectional transformers for language under- standing. arXiv preprint arXiv:1810.04805.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Deep residual learning for image recognition", |
| "authors": [ |
| { |
| "first": "Kaiming", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiangyu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaoqing", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "770--778", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recog- nition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770- 778.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Convolutional neural network architectures for matching natural language sentences", |
| "authors": [ |
| { |
| "first": "Baotian", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengdong", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Qingcai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "2042--2050", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Baotian Hu, Zhengdong Lu, Hang Li, and Qingcai Chen. 2014. Convolutional neural network archi- tectures for matching natural language sentences. In Advances in Neural Information Processing Sys- tems, pages 2042-2050.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Densely connected convolutional networks", |
| "authors": [ |
| { |
| "first": "Gao", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhuang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurens", |
| "middle": [], |
| "last": "Van Der Maaten", |
| "suffix": "" |
| }, |
| { |
| "first": "Kilian Q", |
| "middle": [], |
| "last": "Weinberger", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "4700--4708", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. 2017a. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recogni- tion, pages 4700-4708.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "FusionNet: Fusing via fullyaware attention with application to machine comprehension", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hsin-Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenguang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yelong", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Weizhu", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hsin-Yuan Huang, Chenguang Zhu, Yelong Shen, and Weizhu Chen. 2017b. FusionNet: Fusing via fully- aware attention with application to machine compre- hension. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Improved deep learning baselines for ubuntu corpus dialogs", |
| "authors": [ |
| { |
| "first": "Rudolf", |
| "middle": [], |
| "last": "Kadlec", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Schmid", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1510.03753" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rudolf Kadlec, Martin Schmid, and Jan Kleindienst. 2015. Improved deep learning baselines for ubuntu corpus dialogs. arXiv preprint arXiv:1510.03753.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Semantic sentence matching with densely-connected recurrent and co-attentive information", |
| "authors": [ |
| { |
| "first": "Seonhoon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Jin-Hyuk", |
| "middle": [], |
| "last": "Hong", |
| "suffix": "" |
| }, |
| { |
| "first": "Inho", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| }, |
| { |
| "first": "Nojun", |
| "middle": [], |
| "last": "Kwak", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1805.11360" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Seonhoon Kim, Jin-Hyuk Hong, Inho Kang, and No- jun Kwak. 2018. Semantic sentence matching with densely-connected recurrent and co-attentive infor- mation. arXiv preprint arXiv:1805.11360.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Imagenet classification with deep convolutional neural networks", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1097--1105", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hin- ton. 2012. Imagenet classification with deep con- volutional neural networks. In Advances in neural information processing systems, pages 1097-1105.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "AliMe assist: An intelligent assistant for creating an innovative e-commerce experience", |
| "authors": [ |
| { |
| "first": "Feng-Lin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Minghui", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Haiqing", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiongwei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xing", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Juwei", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhongzhou", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Weipeng", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 ACM on Conference on Information and Knowledge Management", |
| "volume": "", |
| "issue": "", |
| "pages": "2495--2498", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Feng-Lin Li, Minghui Qiu, Haiqing Chen, Xiong- wei Wang, Xing Gao, Jun Huang, Juwei Ren, Zhongzhou Zhao, Weipeng Zhao, Lei Wang, et al. 2017a. AliMe assist: An intelligent assistant for cre- ating an innovative e-commerce experience. In Pro- ceedings of the 2017 ACM on Conference on Infor- mation and Knowledge Management, pages 2495- 2498.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A diversity-promoting objective function for neural conversation models", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "110--119", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N16-1014" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2015. A diversity-promoting objec- tive function for neural conversation models. Pro- ceedings of the 2016 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 110-119.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A persona-based neural conversation model", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "Georgios", |
| "middle": [], |
| "last": "Spithourakis", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "994--1003", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1094" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Georgios Sp- ithourakis, Jianfeng Gao, and Bill Dolan. 2016a. A persona-based neural conversation model. In Asso- ciation for Computational Linguistics, pages 994- 1003.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Deep reinforcement learning for dialogue generation", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Monroe", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1192--1202", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1127" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Will Monroe, Alan Ritter, Dan Jurafsky, Michel Galley, and Jianfeng Gao. 2016b. Deep rein- forcement learning for dialogue generation. In Pro- ceedings of the 2016 Conference on Empirical Meth- ods in Natural Language Processing, pages 1192- 1202.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Adversarial learning for neural dialogue generation", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Monroe", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianlin", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "S\u0117bastien", |
| "middle": [], |
| "last": "Jean", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2157--2169", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Will Monroe, Tianlin Shi, S\u0117bastien Jean, Alan Ritter, and Dan Jurafsky. 2017b. Adversarial learning for neural dialogue generation. In Proceed- ings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2157-2169.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "The ubuntu dialogue corpus: A large dataset for research in unstructured multi-turn dialogue systems", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Lowe", |
| "suffix": "" |
| }, |
| { |
| "first": "Nissan", |
| "middle": [], |
| "last": "Pow", |
| "suffix": "" |
| }, |
| { |
| "first": "Iulian", |
| "middle": [], |
| "last": "Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 16th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "285--294", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W15-4640" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Lowe, Nissan Pow, Iulian Serban, and Joelle Pineau. 2015. The ubuntu dialogue corpus: A large dataset for research in unstructured multi-turn dia- logue systems. In Proceedings of the 16th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 285-294.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in neural information processing systems, pages 3111-3119.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Sequence to backward and forward sequences: A content-introducing approach to generative short-text conversation", |
| "authors": [ |
| { |
| "first": "Lili", |
| "middle": [], |
| "last": "Mou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiping", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ge", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhi", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "3349--3358", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lili Mou, Yiping Song, Rui Yan, Ge Li, Lu Zhang, and Zhi Jin. 2016. Sequence to backward and for- ward sequences: A content-introducing approach to generative short-text conversation. In Proceedings of COLING 2016, the 26th International Confer- ence on Computational Linguistics: Technical Pa- pers, pages 3349-3358.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1202" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of the 2018 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long Papers), pages 2227-2237.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "End-to-end dialogue systems using generative hierarchical neural network models", |
| "authors": [ |
| { |
| "first": "Iulian", |
| "middle": [], |
| "last": "Vlad Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Sordoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [ |
| "C" |
| ], |
| "last": "Courville", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "3776--3784", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iulian Vlad Serban, Alessandro Sordoni, Yoshua Ben- gio, Aaron C. Courville, and Joelle Pineau. 2016. End-to-end dialogue systems using generative hier- archical neural network models. In AAAI, pages 3776-3784.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Neural responding machine for short-text conversation", |
| "authors": [ |
| { |
| "first": "Lifeng", |
| "middle": [], |
| "last": "Shang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengdong", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1577--1586", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/P15-1152" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lifeng Shang, Zhengdong Lu, and Hang Li. 2015. Neural responding machine for short-text conversa- tion. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing, pages 1577-1586.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "From Eliza to XiaoIce: Challenges and opportunities with social chatbots", |
| "authors": [ |
| { |
| "first": "Heung-Yeung", |
| "middle": [], |
| "last": "Shum", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Di", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Frontiers of IT & EE", |
| "volume": "19", |
| "issue": "1", |
| "pages": "10--26", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heung-Yeung Shum, Xiaodong He, and Di Li. 2018. From Eliza to XiaoIce: Challenges and opportuni- ties with social chatbots. Frontiers of IT & EE, 19(1):10-26.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "An ensemble of retrieval-based and generation-based humancomputer conversation systems", |
| "authors": [ |
| { |
| "first": "Yiping", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Cheng-Te", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian-Yun", |
| "middle": [], |
| "last": "Nie", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongyan", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "In IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "4382--4388", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yiping Song, Rui Yan, Cheng-Te Li, Jian-Yun Nie, Ming Zhang, and Dongyan Zhao. 2018. An ensem- ble of retrieval-based and generation-based human- computer conversation systems. In IJCAI, pages 4382-4388.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Going deeper with convolutions", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Szegedy", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yangqing", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierre", |
| "middle": [], |
| "last": "Sermanet", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Reed", |
| "suffix": "" |
| }, |
| { |
| "first": "Dragomir", |
| "middle": [], |
| "last": "Anguelov", |
| "suffix": "" |
| }, |
| { |
| "first": "Dumitru", |
| "middle": [], |
| "last": "Erhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Vanhoucke", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Rabinovich", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Du- mitru Erhan, Vincent Vanhoucke, and Andrew Ra- binovich. 2015. Going deeper with convolutions. In Proceedings of the IEEE conference on computer vi- sion and pattern recognition, pages 1-9.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Get the point of my utterance! learning towards effective responses with multi-head attention mechanism", |
| "authors": [ |
| { |
| "first": "Chongyang", |
| "middle": [], |
| "last": "Tao", |
| "suffix": "" |
| }, |
| { |
| "first": "Shen", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingyue", |
| "middle": [], |
| "last": "Shang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongyan", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "4418--4424", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chongyang Tao, Shen Gao, Mingyue Shang, Wei Wu, Dongyan Zhao, and Rui Yan. 2018. Get the point of my utterance! learning towards effective responses with multi-head attention mechanism. In IJCAI, pages 4418-4424.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Co-stack residual affinity networks with multi-level attention refinement for matching text sequences", |
| "authors": [ |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Tay", |
| "suffix": "" |
| }, |
| { |
| "first": "Anh", |
| "middle": [], |
| "last": "Luu", |
| "suffix": "" |
| }, |
| { |
| "first": "Siu Cheung", |
| "middle": [], |
| "last": "Tuan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hui", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "4492--4502", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi Tay, Luu Anh Tuan, and Siu Cheung Hui. 2018. Co-stack residual affinity networks with multi-level attention refinement for matching text sequences. In Proceedings of the 2018 Conference on Empiri- cal Methods in Natural Language Processing, pages 4492-4502.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "A neural conversational model", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1506.05869" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals and Quoc Le. 2015. A neural conversa- tional model. arXiv preprint arXiv:1506.05869.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Match-srnn: Modeling the recursive matching structure with spatial rnn", |
| "authors": [ |
| { |
| "first": "Yanyan", |
| "middle": [], |
| "last": "Shengxian Wan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiafeng", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Xueqi", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "2922--2928", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shengxian Wan, Yanyan Lan, Jun Xu, Jiafeng Guo, Liang Pang, and Xueqi Cheng. 2016. Match-srnn: Modeling the recursive matching structure with spa- tial rnn. In IJCAI, pages 2922-2928.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "A dataset for research on short-text conversations", |
| "authors": [ |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengdong", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Enhong", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "935--945", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hao Wang, Zhengdong Lu, Hang Li, and Enhong Chen. 2013. A dataset for research on short-text conversations. In Proceedings of the 2013 Confer- ence on Empirical Methods in Natural Language Processing, pages 935-945.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Syntax-based deep matching of short texts", |
| "authors": [ |
| { |
| "first": "Mingxuan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengdong", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Twenty-Fourth International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "1354--1361", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mingxuan Wang, Zhengdong Lu, Hang Li, and Qun Liu. 2015. Syntax-based deep matching of short texts. In Proceedings of the Twenty-Fourth Inter- national Joint Conference on Artificial Intelligence, pages 1354-1361.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Learning natural language inference with LSTM", |
| "authors": [ |
| { |
| "first": "Shuohang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1442--1451", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N16-1170" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuohang Wang and Jing Jiang. 2016. Learning nat- ural language inference with LSTM. In Proceed- ings of the 2016 Conference of the North Ameri- can Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1442-1451.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Learning matching models with weak supervision for response selection in retrieval-based chatbots", |
| "authors": [ |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhoujun", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "420--425", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu Wu, Wei Wu, Zhoujun Li, and Ming Zhou. 2018a. Learning matching models with weak supervision for response selection in retrieval-based chatbots. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 2: Short Papers), pages 420-425.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "A sequential matching framework for multi-turn response selection in retrieval-based chatbots", |
| "authors": [ |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Xing", |
| "suffix": "" |
| }, |
| { |
| "first": "Can", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhoujun", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Computational Linguistics", |
| "volume": "45", |
| "issue": "1", |
| "pages": "163--197", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/coli_a_00345" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu Wu, Wei Wu, Chen Xing, Can Xu, Zhoujun Li, and Ming Zhou. 2018b. A sequential match- ing framework for multi-turn response selection in retrieval-based chatbots. Computational Linguis- tics, 45(1):163-197.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Sequential matching network: A new architecture for multi-turn response selection in retrieval-based chatbots", |
| "authors": [ |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Xing", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhoujun", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "496--505", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1046" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu Wu, Wei Wu, Chen Xing, Ming Zhou, and Zhou- jun Li. 2017. Sequential matching network: A new architecture for multi-turn response selection in retrieval-based chatbots. In Proceedings of the 55th Annual Meeting of the Association for Compu- tational Linguistics, volume 1, pages 496-505.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Topic aware neural response generation", |
| "authors": [ |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Xing", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yalou", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Ying", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "3351--3357", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen Xing, Wei Wu, Yu Wu, Jie Liu, Yalou Huang, Ming Zhou, and Wei-Ying Ma. 2017. Topic aware neural response generation. In AAAI, pages 3351- 3357.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Incorporating loosestructured knowledge into LSTM with recall gate for conversation modeling", |
| "authors": [ |
| { |
| "first": "Zhen", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bingquan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Baoxun", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengjie", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaolong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 International Joint Conference on Neural Networks", |
| "volume": "", |
| "issue": "", |
| "pages": "3506--3513", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhen Xu, Bingquan Liu, Baoxun Wang, Chengjie Sun, and Xiaolong Wang. 2017. Incorporating loose- structured knowledge into LSTM with recall gate for conversation modeling. In Proceedings of the 2017 International Joint Conference on Neural Networks, pages 3506-3513.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Learning to respond with deep neural networks for retrievalbased human-computer conversation system", |
| "authors": [ |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiping", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "SI-GIR", |
| "volume": "", |
| "issue": "", |
| "pages": "55--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rui Yan, Yiping Song, and Hua Wu. 2016. Learning to respond with deep neural networks for retrieval- based human-computer conversation system. In SI- GIR, pages 55-64.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Coupled context modeling for deep chit-chat: towards conversations between human and computer", |
| "authors": [ |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongyan", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "2574--2583", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rui Yan and Dongyan Zhao. 2018. Coupled context modeling for deep chit-chat: towards conversations between human and computer. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 2574- 2583. ACM.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Personalizing dialogue agents: I have a dog, do you have pets too?", |
| "authors": [ |
| { |
| "first": "Saizheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Dinan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jack", |
| "middle": [], |
| "last": "Urbanek", |
| "suffix": "" |
| }, |
| { |
| "first": "Arthur", |
| "middle": [], |
| "last": "Szlam", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "2204--2213", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saizheng Zhang, Emily Dinan, Jack Urbanek, Arthur Szlam, Douwe Kiela, and Jason Weston. 2018a. Personalizing dialogue agents: I have a dog, do you have pets too? In Proceedings of the 56th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2204- 2213.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Modeling multiturn conversation with deep utterance aggregation", |
| "authors": [ |
| { |
| "first": "Zhuosheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiangtong", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Pengfei", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Gongshen", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3740--3752", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhuosheng Zhang, Jiangtong Li, Pengfei Zhu, Hai Zhao, and Gongshen Liu. 2018b. Modeling multi- turn conversation with deep utterance aggregation. In Proceedings of the 27th International Conference on Computational Linguistics, pages 3740-3752. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Learning discourse-level diversity for neural dialog models using conditional variational autoencoders", |
| "authors": [ |
| { |
| "first": "Tiancheng", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Ran", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxine", |
| "middle": [], |
| "last": "Eskenazi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "654--664", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tiancheng Zhao, Ran Zhao, and Maxine Eskenazi. 2017. Learning discourse-level diversity for neural dialog models using conditional variational autoen- coders. In Proceedings of the 55th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 654-664.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Emotional chatting machine: Emotional conversation generation with internal and external memory", |
| "authors": [ |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Minlie", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianyang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "The Thirty-Second AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "730--738", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hao Zhou, Minlie Huang, Tianyang Zhang, Xiaoyan Zhu, and Bing Liu. 2018a. Emotional chatting ma- chine: Emotional conversation generation with in- ternal and external memory. In The Thirty-Second AAAI Conference on Artificial Intelligence, pages 730-738.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Multi-view response selection for human-computer conversation", |
| "authors": [ |
| { |
| "first": "Xiangyang", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Daxiang", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shiqi", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Dianhai", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Tian", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "372--381", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1036" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiangyang Zhou, Daxiang Dong, Hua Wu, Shiqi Zhao, Dianhai Yu, Hao Tian, Xuan Liu, and Rui Yan. 2016. Multi-view response selection for human-computer conversation. In Proceedings of the 2016 Confer- ence on Empirical Methods in Natural Language Processing, pages 372-381.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Multi-turn response selection for chatbots with deep attention matching network", |
| "authors": [ |
| { |
| "first": "Xiangyang", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Daxiang", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ying", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wayne", |
| "middle": [ |
| "Xin" |
| ], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Dianhai", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1118--1127", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiangyang Zhou, Lu Li, Daxiang Dong, Yi Liu, Ying Chen, Wayne Xin Zhao, Dianhai Yu, and Hua Wu. 2018b. Multi-turn response selection for chatbots with deep attention matching network. In Proceed- ings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), volume 1, pages 1118-1127.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Performance of IoI under different numbers of the interaction blocks." |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "(b) R10@1 vs. Number of turns" |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Performance of IoI across contexts with different lengths on the Ubuntu data." |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>Numbers in bold mean that the improvement to the</td></tr><tr><td>best performing baseline is statistically significant (t-</td></tr><tr><td>test with p-value < 0.05).</td></tr></table>", |
| "html": null, |
| "text": "Evaluation results on the E-commerce data." |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td/><td/><td/><td/><td/><td>0.81</td><td/><td/></tr><tr><td/><td>0.850</td><td/><td/><td/><td>0.80</td><td/><td/></tr><tr><td/><td>0.825</td><td/><td/><td/><td>0.79</td><td/><td/></tr><tr><td>R10@1</td><td>0.775 0.800</td><td/><td/><td>R10@1</td><td>0.78</td><td/><td/></tr><tr><td/><td/><td/><td/><td/><td>0.77</td><td/><td/></tr><tr><td/><td>0.750 0.725</td><td/><td/><td>DAM IoI-1L IoI-7L</td><td>0.76</td><td/><td/><td>DAM IoI-1L IoI-7L</td></tr><tr><td/><td>(0, 10]</td><td>(10, 20]</td><td>(20, 30]</td><td>(30, 50]</td><td>0.75</td><td>[2, 4]</td><td>[5, 7]</td><td>[8, 10]</td></tr><tr><td/><td/><td colspan=\"3\">Average utterance length (words)</td><td/><td/><td>Context length (turns)</td></tr><tr><td/><td colspan=\"4\">(a) R10@1 vs. Average utterance length</td><td/><td/><td/></tr></table>", |
| "html": null, |
| "text": "Evaluation results of the ablation study on the three data sets." |
| } |
| } |
| } |
| } |