| { |
| "paper_id": "2022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:31:28.537279Z" |
| }, |
| "title": "Conversation-and Tree-Structure Losses for Dialogue Disentanglement", |
| "authors": [ |
| { |
| "first": "Tianda", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Nankai University", |
| "location": { |
| "settlement": "Tianjin", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Jia-Chen", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Science and Technology of China", |
| "location": { |
| "settlement": "Hefei", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Science and Technology of China", |
| "location": { |
| "settlement": "Hefei", |
| "country": "China" |
| } |
| }, |
| "email": "zhling@ustc.edu.cn" |
| }, |
| { |
| "first": "Quan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "State Key Laboratory of Cognitive Intelligence", |
| "institution": "iFLYTEK Research", |
| "location": { |
| "settlement": "Hefei", |
| "country": "China" |
| } |
| }, |
| "email": "quanliu@iflytek.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "When multiple conversations occur simultaneously, a listener must decide which conversation each utterance is part of in order to interpret and respond to it appropriately. This task is referred as dialogue disentanglement. A significant drawback of previous studies on disentanglement lies in that they only focus on pair-wise relationships between utterances while neglecting the conversation structure which is important for conversation structure modeling. In this paper, we propose a hierarchical model, named Dialogue BERT (DIALBERT), which integrates the local and global semantics in the context range by using BERT to encode each message-pair and using BiLSTM to aggregate the chronological context information into the output of BERT. In order to integrate the conversation structure information into the model, two types of loss of conversation-structure loss and tree-structure loss are designed. In this way, our model can implicitly learn and leverage the conversation structures without being restricted to the lack of explicit access to such structures during the inference stage. Experimental results on two large datasets show that our method outperforms previous methods by substantial margins, achieving great performance on dialogue disentanglement.", |
| "pdf_parse": { |
| "paper_id": "2022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "When multiple conversations occur simultaneously, a listener must decide which conversation each utterance is part of in order to interpret and respond to it appropriately. This task is referred as dialogue disentanglement. A significant drawback of previous studies on disentanglement lies in that they only focus on pair-wise relationships between utterances while neglecting the conversation structure which is important for conversation structure modeling. In this paper, we propose a hierarchical model, named Dialogue BERT (DIALBERT), which integrates the local and global semantics in the context range by using BERT to encode each message-pair and using BiLSTM to aggregate the chronological context information into the output of BERT. In order to integrate the conversation structure information into the model, two types of loss of conversation-structure loss and tree-structure loss are designed. In this way, our model can implicitly learn and leverage the conversation structures without being restricted to the lack of explicit access to such structures during the inference stage. Experimental results on two large datasets show that our method outperforms previous methods by substantial margins, achieving great performance on dialogue disentanglement.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In a multi-party chat stream (Traum, 2004; Uthus and Aha, 2013; Ouchi and Tsuboi, 2016; Gu et al., 2021) , messages related to different topics are entangled with each other, which makes it difficult for a new user to understand the context of the discussion in the chat room. Dialogue disentanglement (Kummerfeld et al., 2019; Gu et al., 2020b; Yu and Joty, 2020; Liu et al., 2021a,b) aims at disentangling a whole conversation into several threads from a data stream so that each thread is about a specific topic. Early research either did not release their datasets (Adams and In this example, conversations marked with different colours are entangled together. This task aims to separate this chat stream by conversations. Martell, 2008; Wang et al., 2008) or used small datasets (Elsner and Charniak, 2008; Elsner and Schudy, 2009; Wang and Oard, 2009; Charniak, 2010, 2011; Jiang et al., 2018) . Kummerfeld et al. (2019) released a new largescale dataset that made it possible to train a more complex model and to fairly compare different models. Figure 1 shows an example of dialogue disentanglement in this dataset.", |
| "cite_spans": [ |
| { |
| "start": 29, |
| "end": 42, |
| "text": "(Traum, 2004;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 43, |
| "end": 63, |
| "text": "Uthus and Aha, 2013;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 64, |
| "end": 87, |
| "text": "Ouchi and Tsuboi, 2016;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 88, |
| "end": 104, |
| "text": "Gu et al., 2021)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 302, |
| "end": 327, |
| "text": "(Kummerfeld et al., 2019;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 328, |
| "end": 345, |
| "text": "Gu et al., 2020b;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 346, |
| "end": 364, |
| "text": "Yu and Joty, 2020;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 365, |
| "end": 385, |
| "text": "Liu et al., 2021a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 727, |
| "end": 741, |
| "text": "Martell, 2008;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 742, |
| "end": 760, |
| "text": "Wang et al., 2008)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 784, |
| "end": 811, |
| "text": "(Elsner and Charniak, 2008;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 812, |
| "end": 836, |
| "text": "Elsner and Schudy, 2009;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 837, |
| "end": 857, |
| "text": "Wang and Oard, 2009;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 858, |
| "end": 879, |
| "text": "Charniak, 2010, 2011;", |
| "ref_id": null |
| }, |
| { |
| "start": 880, |
| "end": 899, |
| "text": "Jiang et al., 2018)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 902, |
| "end": 926, |
| "text": "Kummerfeld et al. (2019)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1053, |
| "end": 1061, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Currently, most of the existing methods for dialogue disentanglement employ a two-step approach framework. Firstly, a model is employed to determine the relation between two messages. Then a clustering algorithm is employed to separate these messages into different conversation clusters. Following this framework, proposed a BERT-based model named Masked Hierarchical Transformer (MHT), which aims at making use of the conversation structures. This method uses a mask mechanism to explicitly build connections between context messages and their corresponding ancestors in a conversation. However, the main drawback of their approach is that the designed mask is computed based on the parents' relation of each message given the whole conversation, which is only available during the training stage. In order to deal with the lack of masks during the inference stage, they construct the pseudo mask label based on the predicted relations between any message-pair. However, the pseudo mask label cannot introduce reliable conversation structure information, especially when models cannot achieve a perfect prediction performance on relevant datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we follow this two-step approach framework and propose a hierarchical BERT-based model, named Dialogue BERT (DIALBERT) for dialogue disentanglement. DIALBERT first use BERT (Devlin et al., 2019) to capture the matching information in each message pair. Then, a contextlevel BiLSTM is employed to aggregate and incorporate the context information. The semantics similarity of each message pair is measured by calculating their matching scores, and the message that has the highest matching score with the target message is regarded as the parent message of it. In addition, we aim at introducing and making use of conversation structures to help DIALBERT to make decision by training DIALBERT with two extra types of loss of conversation-structure loss and tree-structure loss. In this way, the model can implicitly learn and leverage conversation structures without being restricted to the lack of explicit access to such structures during inference.", |
| "cite_spans": [ |
| { |
| "start": 187, |
| "end": 208, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We evaluate our method on two large datasets releasaed by Kummerfeld et al. (2019) and respectively. Experimental results show our proposed method outperforms previous methods in terms of various evaluation metrics.", |
| "cite_spans": [ |
| { |
| "start": 58, |
| "end": 82, |
| "text": "Kummerfeld et al. (2019)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In summary, our contributions in this paper are three-fold: (1) A hierarchical model named DIAL-BERT is proposed for dialogue disentanglement.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "(2) Two losses of conversation-and tree-structure losses are introduced to make use of the structures of the conversation history. (3) The performance of the proposed method is evaluated on two large datasets, and the ablation studies further verified the effectiveness.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The research for dialogue disentanglement dates back to Aoki et al. (2003) which conducted a study of voice conversations among 8-10 people with an average of 1.76 activate conversations at any given time. In recent studies, the mainstream method for dialogue disentanglement is the two-step approach: firstly, a neural network is used to determine the relation between two messages. Then a clustering algorithm is adopted to separate messages into different conversations. In the first step, Mehri and Carenini (2017) used recurrent neural networks(RNNs) to model adjacent messages. Jiang et al. (2018) was the first work that used convolutional neural networks to estimate the conversationlevel similarity between closely posted messages. proposed a Masked Hierarchical Transformer based on BERT to calculate the matching score by using conversation structures. In addition to neural networks, statistical (Du et al., 2017) and linguistic features (Elsner and Charniak, 2008 , 2010 , 2011 Mayfield et al., 2012) have also been used in the existing research. In the clustering stage, some research proposed the clustering algorithm by using threshold such as Jiang et al. (2018) . Most studies grouped two messages with the highest matching score into the same conversation. In our study, we follow this mainstream setting. (N ) represents a list of messages and each message belongs to a specific conversation. Following the setting of previous studies (Elsner and Charniak, 2008 , 2010 , 2011 Mayfield et al., 2012; Jiang et al., 2018) , in order to find the parent message of a target message, T \u2212 1 messages occurring before this target message and itself form the context message set of this target message. The target message is a word sequence that can be represented by", |
| "cite_spans": [ |
| { |
| "start": 56, |
| "end": 74, |
| "text": "Aoki et al. (2003)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 493, |
| "end": 518, |
| "text": "Mehri and Carenini (2017)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 584, |
| "end": 603, |
| "text": "Jiang et al. (2018)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 908, |
| "end": 925, |
| "text": "(Du et al., 2017)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 950, |
| "end": 976, |
| "text": "(Elsner and Charniak, 2008", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 977, |
| "end": 983, |
| "text": ", 2010", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 984, |
| "end": 990, |
| "text": ", 2011", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 991, |
| "end": 1013, |
| "text": "Mayfield et al., 2012)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 1160, |
| "end": 1179, |
| "text": "Jiang et al. (2018)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1325, |
| "end": 1329, |
| "text": "(N )", |
| "ref_id": null |
| }, |
| { |
| "start": 1455, |
| "end": 1481, |
| "text": "(Elsner and Charniak, 2008", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1482, |
| "end": 1488, |
| "text": ", 2010", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1489, |
| "end": 1495, |
| "text": ", 2011", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1496, |
| "end": 1518, |
| "text": "Mayfield et al., 2012;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 1519, |
| "end": 1538, |
| "text": "Jiang et al., 2018)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Given a dataset D, M (1) , M (2) , ..., M", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "M T = m T 1 , m T 2 , .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "3" |
| }, |
| { |
| "text": ".., m T n T , and each context message is a word sequence that can be represented by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "M i = m i 1 , m i 2 , .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "3" |
| }, |
| { |
| "text": ".., m i n i , where n T and n i are the sequence length of messages and i \u2208 {1, 2, ..., T }. Every target message has a label Y \u2208 {1, 2, ..., T } indicating which message in context range is the parent message of the target message (each message has and only has one parent message). Our goal is to learn a prediction model to predict which message in M 1 , M 2 , ..., M T is the parent message of the target message M T for T \u2208 {1, 2, .., N }. Note that if the target message is the first message of a conversation, the parent of the target message is itself.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "DIALBERT calculates the matching scores between the target message and its context messages. The overall architecture is shown in Figure 2 . The context message that has the largest matching score with the target message will be regarded as the parent message. For the second step, after we get the parent message of each target message, we group messages into different conversations based on the parental relations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 130, |
| "end": 138, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "DIALBERT", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In order to take context semantics in a chat into consideration, T \u2212 1 preceding messages of the target message are used along with the target message to form the context message set. Specifically, every context message will be concatenated with the target message to form a message pair. Then, all the message pairs will be combined together as a single input to predict the parent message of each target message. The input u i can be formulated as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context-Aware Input", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "u i = cls, m T 1 , ..., m T n T , sep, m i 1 , ..., m i n i , sep , where U = {u i } T i=1 . i \u2208 [1, 2, ..., T ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context-Aware Input", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "is the index of the context message. cls and sep are the start and separation tokens predefined in BERT, respectively. Note that u T is composed of two target messages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context-Aware Input", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "A strategy to consider context is to concatenate the context messages with the target message. But this strategy weakens the relationships between each context message as they are organized in chronological order in the chat stream.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context BERT Module", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "In order to better consider the chronological order information of context messages, we propose a context BERT module to encode the history context by using both BERT and a BiLSTM model. Specifically, we encode input U by adopting BERT, and the output of the reserved cls will be used as feature vectors E = {e i } T i=1 . Each feature vector e i contains the semantics in its corresponding message pair. In addition, we further encode the feature vectors E with a single layer Bi-LSTM to obtain the high-order feature vectors F, which have captured the semantics of history context and can be represented as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context BERT Module", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "{f i } T i=1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context BERT Module", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "The formulae of the calculation are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context BERT Module", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "e i = BERT(u i ), \u2200i \u2208 [1, 2, ..., T ],", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context BERT Module", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "(1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context BERT Module", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "f i = BiLSTM(e i ), \u2200i \u2208 [1, 2, ..., T ], (2) m = Softmax(Linear(F)),", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Context BERT Module", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "where the dimension of the hidden units in a BiLSTM layer is k. m = {m i } T i=1 are matching degrees that will be used to calculate the treestructure loss in Section 4.2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context BERT Module", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "To model the higher-order interaction between the target message and its context messages, a heuristic classifier which has proved to be effective in different studies (Yoon et al., 2018; Chen et al., 2017 , is employed. Specifically, the interaction vectors G = {g i } T i=1 will be fed into a single layer classifier to get matching scores, with the following formulae:", |
| "cite_spans": [ |
| { |
| "start": 168, |
| "end": 187, |
| "text": "(Yoon et al., 2018;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 188, |
| "end": 205, |
| "text": "Chen et al., 2017", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heuristic Classifier", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "g i = [f i , f T , f i \u2022 f T , f i \u2212 f T ],", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Heuristic Classifier", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p = Softmax(Linear(tanh(GW T 3 + b 3 ))),", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Heuristic Classifier", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "where W 3 \u2208 R 4k\u00d78k is weight matrix and b 3 \u2208 R 4k is the bias.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heuristic Classifier", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "\u2022 is element-wise product and \u2212 is element-wise subtraction. p = {p i } T i=1 are the matching scores, and will be used to calculate crossentropy loss L CE (shown below) and conversationstructure loss L CV .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heuristic Classifier", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L CE = \u2212 1 T T i=1 y i log (p i ),", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Heuristic Classifier", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heuristic Classifier", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "{y i } T i=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heuristic Classifier", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "is the one-hot embedding of golden label Y . T is the context range. The overall loss for DIALBERT model can be formalized as :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heuristic Classifier", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L overall = L CE + \u03b1L CV + \u03b2L T S ,", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Heuristic Classifier", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "where \u03b1 and \u03b2 are hyperparameters. The conversation-structure loss L CV and tree-structure loss L T S will be introduced in Section 4.2. Finally, the context message with the largest matching score is regarded as the parent message of target message, and we group these two messages into the same conversation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heuristic Classifier", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "In the list of messages, different conversations are entangled together, and each conversation has its own semantic coherence and cohesion. Most previous studies failed to use the structure of each conversation when the parent message of a target message in the context is determined. In order to encourage our model to find the parent message of the target message based on the context coherence of the conversation, we introduce conversation-structure loss and tree-structure loss in addition to the cross-entropy loss. In this way, our model can learn and leverage the structure of the conversation implicitly and will not suffer from a lack of conversation structure information during the inference/testing stage. Intuitively, both conversation-structure loss and tree-structure loss can encourage the model to select most relevant message as the parent message.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conversation-and Tree-Structure Loss", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The conversation-structure loss is computed based on the matching score:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conversation-Structure Loss", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L CV = \u2212 1 T T i=1 y c i log(p i ),", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Conversation-Structure Loss", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "where {y c i } T i=1 are the conversation labels and each y c i is a binary label indicating whether the i-th context message is in the conversation same as the target message. {p i } T i=1 are matching scores of A chat stream consists of multiple these structures. Conversation-structure loss will help the model distinguish which conversation structure does target message belong to and Tree-Structure loss will help the model further distinguish ancestor messages of target message in the structure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conversation-Structure Loss", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "message pairs. T is context range. The intention of the conversation-structure loss is to encourage the model to choose the parent message for a target message from the messages in the same conversation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conversation-Structure Loss", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "In order to further make use of the structure of conversation, we propose tree-structure loss. Intuitively, in a structure of a conversation (shown in Figure 3 ), ancestors of the target message (i.e., message 0, message 1 and message 4) are most relevant to the target message. Because the target message can be regarded as the response to its ancestor messages or as an extension of the topic discussed in the ancestor messages, the intention of the tree-structure loss is to help the model further narrow down the candidates. The tree-structure loss encourages the model to choose the parent message for a target message from all ancestor messages in the same conversation. The tree-structure loss has two terms that are designed for ancestor nodes and other nodes, respectively. The first term of the treestructure loss can be computed with the following formulae:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 151, |
| "end": 159, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Tree-Structure Loss", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "y a i = \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3 0.5 if d = 0, 1.2-0.2*d if 0 < d \u2264 5 0.1 if 5 < d, ,", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Tree-Structure Loss", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L F irstT erm = \u2212 1 T T i=1 y a i log(m i ),", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Tree-Structure Loss", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "where d is the distance between the specific context message and target message in the structure of a conversation. For example, in Figure 3 , d of message 1 and the target message is 2. Note that d = 0 is the distance for the special message pair in which the target message is paired with itself. Because our target is to find the parent message. In order to add the penalty to the model, if nonancestor messages in the conversation are selected as the parent of the target message, we designed three strategies for calculating the second term of the tree-structure loss: uniform-penalty, penaltyby-distance, and penalty-by-layer-difference. For uniform strategy, y b i = 0.1 if the i-th context message is not an ancestor message of the target message. For penalty-by-distance, the strategy is formalized as follows:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 132, |
| "end": 140, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Tree-Structure Loss", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "y b i = \uf8f1 \uf8f2 \uf8f3 1- d 20 if 0 \u2264 d < 20 0.1 if 20 \u2264 d ,", |
| "eq_num": "(11)" |
| } |
| ], |
| "section": "Tree-Structure Loss", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "where d is the distance between the target message and the corresponding message in the structure of the conversation; e.g., in Figure 3 , d between message 3 and target message is 3. For penalty-bylayer-difference, the strategy can be formalized as:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 128, |
| "end": 136, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Tree-Structure Loss", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "y b i = \uf8f1 \uf8f2 \uf8f3 1- l i 10 if 0 \u2264 l i < 10 0.1 if 10 \u2264 l i ,", |
| "eq_num": "(12)" |
| } |
| ], |
| "section": "Tree-Structure Loss", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "l i =| layer target \u2212 layer i |,", |
| "eq_num": "(13)" |
| } |
| ], |
| "section": "Tree-Structure Loss", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "where layer target is the layer number of the target message in the structure of the conversation. layer i is the layer number of message i; e.g., the layer difference between message 2 and target message is | 4 \u2212 2 |= 2. The tree-structure loss L T S can be formulated as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tree-Structure Loss", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L SecondT erm = \u2212 1 T T i=1 y b i log(m i ),", |
| "eq_num": "(14)" |
| } |
| ], |
| "section": "Tree-Structure Loss", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "L", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tree-Structure Loss", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "T S = L F irstT erm \u2212 L SecondT erm .", |
| "eq_num": "(15)" |
| } |
| ], |
| "section": "Tree-Structure Loss", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "Note that if the i-th context message is not in the same conversation as the target message, then y a i = 0, y b i = 0.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tree-Structure Loss", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "Our proposed method was evaluated on the Ubuntu IRC dataset (Kummerfeld et al., 2019), which is manually annotated with reply-to relationship between messages. The statistics of distances between the target and its parent message is shown in Figure 4 . In addition, we also evaluated our proposed method on the Reddit-large dataset proposed in . 1 We followed the settings in to further filter the Reddit-large dataset: if a comment or the user who posted the comment is deleted, the comment itself and all its descendants are not included in the dataset. These conversations were splitted into train/validation/testing sets in a ratio of 8:1:1. The overall statistics of the two datasets are shown in Table 1 and data examples from these two datasets are shown in Table 2 .", |
| "cite_spans": [ |
| { |
| "start": 346, |
| "end": 347, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 242, |
| "end": 250, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 702, |
| "end": 709, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 765, |
| "end": 772, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "For the Ubuntu IRC dataset, we follow the setting in Kummerfeld et al. et al., 2019), Adjusted Rand Index (ARI), Oneto-One Overlap (1-1) of the cluster (Elsner and Charniak, 2008) , as well as the precision, recall, and F1 score between the cluster prediction and ground truth. Note that the precision, recall, and F1 score are calculated using the number of perfectly matching conversations, excluding conversations that have only one message (mostly system messages). We take VI as the main metric. For the Reddit dataset, we follow the setting of . Specifically, the graph accuracy and the conversation accuracy are adopted. The graph accuracy is used to measure the average agreement between the ground truth and predicted parent for each utterance. The conversation accuracy is used to measure the average agreement between conversation structures and predicted structures. Specifically, only if all messages in a conversation are predicted correctly, the predicted structure is regarded as correct. We take graph accuracy as the main metric.", |
| "cite_spans": [ |
| { |
| "start": 152, |
| "end": 179, |
| "text": "(Elsner and Charniak, 2008)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metrics", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The base version of BERT was used in our experiments. The initial learning rate was set to 2e-5. The maximum sequence length was set to 100. The number of hidden unit k was 384. For the two extra losses, \u03b1 = 0.15 and \u03b2 = 1 achieved the best performance. The value of \u03b1 was selected from [0.1, 0.15, 0.2], and that of \u03b2 was selected from [0.5, 1]. Dropout was applied on the output layer of the ConBERT and heuristic classifier with a ratio of 0.1. For the IRC dataset, batch size was set to 4 and the context range T was set to 50. For the Reddit dataset, batch size was set to 3 and the context range T was set to 16. All experiments were conducted on a 24G RTX TITAN GPU. All codes were implemented in the TensorFlow framework (Abadi et al., 2016) and are published to help replicate our results. 2", |
| "cite_spans": [ |
| { |
| "start": 729, |
| "end": 749, |
| "text": "(Abadi et al., 2016)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "We compare our models with those reported in Kummerfeld et al. (2019) and , which are shown in the Table 3 . Below we list variants of our models, which are also shown in the bottom part of Table 3 . DIALBERT: Domain adaptation has shown great effectiveness to improve dialogue performance (Gu et al., 2020a; Whang et al., 2020) .In this setting, DIALBERT with adaptation 3 will be used to find parent message according to the ranking scores. Table 3 : Results on the Ubuntu IRC development and test sets. Note that feature was introduced along with the original dataset (Kummerfeld et al., 2019), so the \"feature\" used with different models was the same. The results marked with * were copied from their corresponding publications. Dec. Att. denoted the decomposable attention model (Parikh et al., 2016) , ESIM denoted the enhanced sequential inference model (Chen et al., 2017) , and MHT denoted masked hierarchical Transformer . Numbers in bold denoted the best performance without comparing with Ptr-Net (Yu and Joty, 2020) and structural characterization (Ma et al., 2022) , which are the latest proposed methods for dialogue disentanglement and are included for reference.", |
| "cite_spans": [ |
| { |
| "start": 45, |
| "end": 69, |
| "text": "Kummerfeld et al. (2019)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 290, |
| "end": 308, |
| "text": "(Gu et al., 2020a;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 309, |
| "end": 328, |
| "text": "Whang et al., 2020)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 784, |
| "end": 805, |
| "text": "(Parikh et al., 2016)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 861, |
| "end": 880, |
| "text": "(Chen et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1009, |
| "end": 1028, |
| "text": "(Yu and Joty, 2020)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 1061, |
| "end": 1078, |
| "text": "(Ma et al., 2022)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 99, |
| "end": 106, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 190, |
| "end": 197, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 443, |
| "end": 450, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison Baselines", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "DIALBERT + feature: The same setting as DIALBERT, but also combined with the features used in Kummerfeld et al. (2019) . The features consist of three parts: (1) Global-level features, including year and frequency of the conversation.", |
| "cite_spans": [ |
| { |
| "start": 94, |
| "end": 118, |
| "text": "Kummerfeld et al. (2019)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Baselines", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "(2) Utterance level features, including types of message, targeted or not, time difference between the last message, etc. (3) Utterance pair features including how far apart in position and the time between the messages, whether one message targets another, etc. Specifically, we concatenate these external features with high-order feature vectors F in our model. These features are same as those used in other baseline models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Baselines", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "DIALBERT + ensemble: In this setting, the weights of the model prediction probability were averaged for each sample across 8 DIALBERT models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Baselines", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "DIALBERT w/o. adaptation: In this setting, the adaptation process was ablated. DIALBERT was finetuned on the IRC dataset directly.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Baselines", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "DIALBERT + cov: The conversation-structure loss was employed in addition to the cross-entropy loss.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Baselines", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "DIALBERT + cov + (uni or dis or layer): Three results of using different tree-structure losses were reported.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Baselines", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "The performances of different models on the IRC test set are shown in Table 3 . Our model outperforms all of the previous models in all evaluation metrics. Specifically, on the test set, the previous work using an ensemble of 10 feedforward models obtained through a vote is capable of reaching the previous best performance. We can see that our best model (DIALBERT+cov+dis) achieves better performance by a large margin. To compare our results with those reported in , we report the performances of DIALBERT and DIALBERT w/o. adaptation on the development set as well. 4 We can see even without domain Table 6 : An example that DIALBERT cannot predict correctly, but DIALBERT + extra losses does. In this table, Parent is the golden label; DIALBERT and DIALBERT + extra losses is the the perdiction of different models; Index is the message index.", |
| "cite_spans": [ |
| { |
| "start": 571, |
| "end": 572, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 70, |
| "end": 77, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 604, |
| "end": 611, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "adaptation and extra losses, DIALBERT already outperforms MHT+feature. All our other models perform even better on the development set, but due to the space limit, we only report the above two models on the development set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "The same observation can be seen on the Reddit dataset as shown in Table 4 . Note that the values of conversation accuracy (Conv. Acc.) are small, due to the definition of the metric itself.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 67, |
| "end": 74, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "Different from other NLP tasks, according to the results, BERT does not have much advantages over other models, which indicates semantic knowledge learned from pre-training is not a direct indicator of improvement for disentanglement. The result that DIALBERT outperforms BERT on all six evaluation metrics could be explained by the vital importance of context in conversations disentanglement, and DIALBERT makes better use of pre-trained knowledge. The substantial margin between DIALBERT and DIALBERT w/o. adaptation demonstrates adaptation does give further improvement of DIALBERT. It is also notable that DIALBERT+feature does not have much performance improvement compared with DIALBERT, which means the information contained in feature has been implicitly learned during the domain adaptation process. As the result, we further report the ensemble results and external loss results based on DIALBERT with adaptation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "The results that DIALBERT+cov outperforms DIALBERT shows that the conversation-structure loss does help. Among the three strategies of tree-structure losses, only the penalty-by-distance strategy can further improve the performance of DI-ALBERT+cov. The reason might be both uniformpenalty strategy and penalty-by-layer-difference strategy ignore the distance between each message and target message in tree structures, and distance information is of vital importance to understand the conversation structures. That explains why penaltyby-distance strategy can further improve the result in both the IRC test set and in Reddit test set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "It can be seen that the results of DIALBERT and DIALBERT with conversation-structure loss doesn't show a substantial margin in the Reddit test set. The reason might be the differences in data collection. For the IRC dataset, data are collected from Linux IRC channel which means different conversations can happen at the same time and messages in context range are not necessary within the same conversation with the target message. But for the Reddit dataset, data are crawled by a list of all posts in a conversation which means messages of each conversation are together in the dataset. As a result, the conversation information can not give as much improvement as in IRC dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "The selection of d and l is based on the statistic of both datasets that we used. For equation 9, d = 5 will cover most of samples. Because our target is to find the parent message of target message. So we set d = 0 a smaller value to give the \"real\" parent message more \"credit\". For the same reason, we set threshold d and l to be 20 and 10 in equation 11 and equation 12 respectively. Please note that the d in equation 9 is designed for ancestor messages. The d in equation 11, however, is designed for non-ancestor messages which are generally further away from the target message. The d in equation 11 will not be 0. As the result, we set different threshold d value. The intention that we designed descending y b i based on distance (or layer-difference) is the assumption that the nearer a message and the target message is the more semantic relevant it could be. We designed the uniform-penalty strategy to verify the correctness of the assumption (as shown in Table 3 ), and results show that penalty-by-distance and penaltyby-layer-difference do reach better performance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 970, |
| "end": 977, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Value Design for Tree-Structure Loss", |
| "sec_num": "5.6" |
| }, |
| { |
| "text": "To find out how each component contributes to the final results, we display the ablation analysis of different component based on our best system DIALBERT+cov+dis (as shown in Table 5 ). The performance of the model drops in all of 6 evaluation metrics after the removal of extra losses, which demonstrates the effectiveness of integrating conversation structure information into the losses.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 176, |
| "end": 183, |
| "text": "Table 5", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation", |
| "sec_num": "5.7" |
| }, |
| { |
| "text": "Moreover, the performance of the model drops in 5 out of 6 evaluation metrics after the removal of adaptation process, which indicates adaptation learns useful semantic information, especially under the condition that the dataset is in a specific domain. After the removal of BiLSTM, in which the model has to make a prediction without any context consideration, results fall remarkably according to all evaluation metrics. As we discussed before, context is very important for disentangling a conversation. We can see from the ablation results, every component added on BERT in our model contributes to the final result.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ablation", |
| "sec_num": "5.7" |
| }, |
| { |
| "text": "Our model can not only introduce global and local conversation semantics but also introduce the conversation structures implicitly, resulting in achieving a new state-of-the-art results by outperforming other models substantially.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ablation", |
| "sec_num": "5.7" |
| }, |
| { |
| "text": "As shown in Table 6 , there are three conversations involve in this example, i.e., {1232, 1234, 1236, 1237 }, {1233, 1235, 1238, 1240, 1241, 1242, 1243} and {1249} , where these numbers denote the index for each message. For the messages 1236 and 1242, DIALBERT + extra losses can find the correct parent message, which indicates that extra losses do help the DIALBERT in dialogue disentanglement. Specifically, for the message 1236, conversation-structure loss plays a more important role, because the preceding messages after parent message are from two conversation. For the message 1242, tree-structure loss plays a more important role, because the preceding messages after parent message are from the same conversation. For message 1239, both DIALBERT and DIALBERT + extra losses cannot predict correctly, the reason might be that the distance from parent message is too far in this case, which demonstrates that dialogue disentanglement is still hard and extra losses can not handle all the cases.", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 163, |
| "text": "{1232, 1234, 1236, 1237 }, {1233, 1235, 1238, 1240, 1241, 1242, 1243} and {1249}", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 19, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Case Study", |
| "sec_num": "5.8" |
| }, |
| { |
| "text": "In this paper, we propose a novel framework for dialogue disentanglement. Different from previous work, we integrate both local and global semantics by proposing an adapted hierarchical BERT-based model (DIALBERT) to disentangle conversations. Moreover, in order to make use of conversation structures, we finetune our model with two losses (i.e., conversation-structure loss and tree-structure loss). We evaluate our method on two large datasets. Results show that our method achieves a new state-of-the-art performances on both datasets and outperforms models from previous work with a substantial margin. In the future, we will design non-heuristic methods for modeling the conversation structure with less hyperparameters which is a challenge worth exploring.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "only provide the comment IDs and crawling scripts. The data collected in our paper is crawled on March 23, 2020 using the provided scripts and IDs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/TeddLi/Disentangle 3 The Ubuntu forum data published in Dialog System Technology Challenges 8 (DSTC 8) -Track 2 as external data was adopted to perform domain adaptation. The input was constructed as {[CLS], title, question, [SEP], answer, [SEP]}. Both tasks of masked language model (MLM) and next sentence prediction (NSP) were employed during domain adaptation. Note that domain adaptation was only employed in Ubuntu IRC dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "did not include results on the test set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank anonymous reviewers for their valuable comments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "@e-sin then it's xscreensaver and xscreensave-gl for opengl ones", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [ |
| ". . . ." |
| ], |
| "last": "Parent Index Message", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Amaranth, hahahaha 1003 1003 === welshbyte has joined #ubuntu", |
| "volume": "996", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Parent Index Message ... ... ... 996 1000 [03:04] Amaranth: @cliche American 992 1001 [03:04] Xenguy: @Amaranth I thought you were -welcome mortal ;-) 1000 1002 [03:04] cliche: @ Amaranth, hahahaha 1003 1003 === welshbyte has joined #ubuntu 997 1004 [03:04] e-sin: no i just want the normal screensavers 995 1005 [03:04] Amaranth: @benoy Do you have cygwinx installed and running? 1006 1006 [03:04] babelfishi: can anyone help me install my Netgear MA111 USB adapter? 1004 1007 [03:04] e-sin: i have a 16mb video card 1008 1008 === regeya has joined #ubuntu 1007 1009 [03:04] e-sin: TNT2 :) 1001 1010 [03:05] Amaranth: @Xenguy hehe, i do side development 1007 1011 [03:05] jobezone: @e-sin then it's xscreensaver and xscreensave-gl for opengl ones. 1005 1012 [03:05] benoy: how do i install that? I couldn't find that in the list of things 1010 1013 [03:05] Amaranth: @Xenguy things like alacarte and easyubuntu ... ...", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "EriC: paste grep Prompt /etc/update-manager/release-upgrades 1232 1232 1232 1234 [19:15] franendar: im getting this: sudo apt-get install", |
| "authors": [ |
| { |
| "first": "Dialbert", |
| "middle": [], |
| "last": "Parent", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [ |
| ". . . ." |
| ], |
| "last": "Dialbert Index Message Extra Losses", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "1234", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Parent DIALBERT DIALBERT Index Message extra losses ... ... ... 1232 1232 1232 1232 [19:15] franendar: how can I install a specific glibc version? 1226 1226 1226 1233 [19:15] EriC: paste grep Prompt /etc/update-manager/release-upgrades 1232 1232 1232 1234 [19:15] franendar: im getting this: sudo apt-get install build-essential 1233 1233 1233 1235 [19:15] EriC: empty 1232 1236 1232 1236 [19:15] MonkeyDust: many glibc questions these days, i wonder how come 1234 1234 1234 1237 [19:15] franendar: im getting this: version 'GLIBCXX_3.4.21' not found 1235 1235 1235 1238 [19:15] EriC: cat /etc/update-manager/release-upgrades 1223 1231 1231 1239 [19:15] nick420: Unable to locate package java8-installer 1238 1238 1238 1240 [19:16] EriC: prompt=never 1240 1240 1240 1241 [19:16] EriC: So, prompt=lts? 1241 1240 1241 1242 [19:16] EriC: yeah 1242 1242 1242 1243 [19:16] EriC: Thanks ... ...", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Tensorflow: A system for large-scale machine learning", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "References Mart\u00edn Abadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianmin", |
| "middle": [], |
| "last": "Barham", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Andy", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Davis", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthieu", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanjay", |
| "middle": [], |
| "last": "Devin", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Ghemawat", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Irving", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Isard", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "12th {USENIX} Symposium on Operating Systems Design and Implementation ({OSDI} 16)", |
| "volume": "", |
| "issue": "", |
| "pages": "265--283", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "References Mart\u00edn Abadi, Paul Barham, Jianmin Chen, Zhifeng Chen, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Geoffrey Irving, Michael Isard, et al. 2016. Tensorflow: A system for large-scale machine learning. In 12th {USENIX} Symposium on Operating Systems Design and Implementation ({OSDI} 16), pages 265-283.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Topic detection and extraction in chat", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Paige", |
| "suffix": "" |
| }, |
| { |
| "first": "Craig H", |
| "middle": [], |
| "last": "Adams", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Martell", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "IEEE international conference on Semantic computing", |
| "volume": "", |
| "issue": "", |
| "pages": "581--588", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paige H Adams and Craig H Martell. 2008. Topic detection and extraction in chat. In 2008 IEEE international conference on Semantic computing, pages 581-588. IEEE.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "The mad hatter's cocktail party: a social mobile audio space supporting multiple simultaneous conversations", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Aoki", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Romaine", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Margaret", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "D" |
| ], |
| "last": "Szymanski", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Thornton", |
| "suffix": "" |
| }, |
| { |
| "first": "Allison", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Woodruff", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul M Aoki, Matthew Romaine, Margaret H Szymanski, James D Thornton, Daniel Wilson, and Allison Woodruff. 2003. The mad hatter's cocktail party: a social mobile audio space supporting multiple simultaneous conversations. In Proceedings", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Enhancing sentence embedding with generalized pooling", |
| "authors": [ |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1815--1826", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qian Chen, Zhen-Hua Ling, and Xiaodan Zhu. 2018. Enhancing sentence embedding with generalized pooling. In Proceedings of the 27th International Conference on Computational Linguistics, pages 1815-1826.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Enhanced lstm for natural language inference", |
| "authors": [ |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1657--1668", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qian Chen, Xiaodan Zhu, Zhen-Hua Ling, Si Wei, Hui Jiang, and Diana Inkpen. 2017. Enhanced lstm for natural language inference. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1657-1668.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Discovering conversational dependencies between messages in dialogs", |
| "authors": [ |
| { |
| "first": "Wenchao", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Poupart", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Thirty-First AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenchao Du, Pascal Poupart, and Wei Xu. 2017. Discovering conversational dependencies between messages in dialogs. In Thirty-First AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "You talking to me? a corpus and algorithm for conversation disentanglement", |
| "authors": [ |
| { |
| "first": "Micha", |
| "middle": [], |
| "last": "Elsner", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL-08: HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "834--842", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Micha Elsner and Eugene Charniak. 2008. You talking to me? a corpus and algorithm for conversation disentanglement. In Proceedings of ACL-08: HLT, pages 834-842.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Disentangling chat", |
| "authors": [ |
| { |
| "first": "Micha", |
| "middle": [], |
| "last": "Elsner", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Computational Linguistics", |
| "volume": "36", |
| "issue": "3", |
| "pages": "389--409", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Micha Elsner and Eugene Charniak. 2010. Disentan- gling chat. Computational Linguistics, 36(3):389- 409.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Disentangling chat with local coherence models", |
| "authors": [ |
| { |
| "first": "Micha", |
| "middle": [], |
| "last": "Elsner", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1179--1189", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Micha Elsner and Eugene Charniak. 2011. Disen- tangling chat with local coherence models. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 1179-1189.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Bounding and comparing methods for correlation clustering beyond ilp", |
| "authors": [ |
| { |
| "first": "Micha", |
| "middle": [], |
| "last": "Elsner", |
| "suffix": "" |
| }, |
| { |
| "first": "Warren", |
| "middle": [], |
| "last": "Schudy", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Workshop on Integer Linear Programming for Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "19--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Micha Elsner and Warren Schudy. 2009. Bounding and comparing methods for correlation clustering beyond ilp. In Proceedings of the Workshop on Integer Linear Programming for Natural Language Processing, pages 19-27.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Speaker-aware BERT for multi-turn response selection in retrieval-based chatbots", |
| "authors": [ |
| { |
| "first": "Jia-Chen", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianda", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Quan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiming", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 29th ACM International Conference on Information & Knowledge Management", |
| "volume": "", |
| "issue": "", |
| "pages": "2041--2044", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jia-Chen Gu, Tianda Li, Quan Liu, Zhen-Hua Ling, Zhiming Su, Si Wei, and Xiaodan Zhu. 2020a. Speaker-aware BERT for multi-turn response selection in retrieval-based chatbots. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, pages 2041- 2044.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Pre-trained and attention-based neural networks for building noetic task-oriented dialogue systems", |
| "authors": [ |
| { |
| "first": "Jia-Chen", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianda", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Quan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu-Ping", |
| "middle": [], |
| "last": "Ruan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, Workshop on the Eighth Dialog System Technology Challenge", |
| "volume": "8", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jia-Chen Gu, Tianda Li, Quan Liu, Xiaodan Zhu, Zhen- Hua Ling, and Yu-Ping Ruan. 2020b. Pre-trained and attention-based neural networks for building noetic task-oriented dialogue systems. In Proceedings of the Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, Workshop on the Eighth Dialog System Technology Challenge, DSTC8, New York, NY, USA, February 7-12, 2020.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "MPC-BERT: A pre-trained language model for multiparty conversation understanding", |
| "authors": [ |
| { |
| "first": "Jia-Chen", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chongyang", |
| "middle": [], |
| "last": "Tao", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhenhua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Can", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiubo", |
| "middle": [], |
| "last": "Geng", |
| "suffix": "" |
| }, |
| { |
| "first": "Daxin", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "3682--3692", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jia-Chen Gu, Chongyang Tao, Zhenhua Ling, Can Xu, Xiubo Geng, and Daxin Jiang. 2021. MPC- BERT: A pre-trained language model for multi- party conversation understanding. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3682-3692.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Learning to disentangle interleaved conversational threads with a siamese hierarchical network and similarity ranking", |
| "authors": [ |
| { |
| "first": "Jyun-Yu", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Francine", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan-Ying", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1812--1822", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jyun-Yu Jiang, Francine Chen, Yan-Ying Chen, and Wei Wang. 2018. Learning to disentangle interleaved conversational threads with a siamese hierarchical network and similarity ranking. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1812-1822.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A largescale corpus for conversation disentanglement", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jonathan K Kummerfeld", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Sai", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [ |
| "J" |
| ], |
| "last": "Gouravajhala", |
| "suffix": "" |
| }, |
| { |
| "first": "Vignesh", |
| "middle": [], |
| "last": "Peper", |
| "suffix": "" |
| }, |
| { |
| "first": "Chulaka", |
| "middle": [], |
| "last": "Athreya", |
| "suffix": "" |
| }, |
| { |
| "first": "Jatin", |
| "middle": [], |
| "last": "Gunasekara", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ganhotra", |
| "suffix": "" |
| }, |
| { |
| "first": "Sankalp", |
| "middle": [], |
| "last": "Siva", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Patel", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Lazaros", |
| "suffix": "" |
| }, |
| { |
| "first": "Walter", |
| "middle": [], |
| "last": "Polymenakos", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lasecki", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3846--3856", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan K Kummerfeld, Sai R Gouravajhala, Joseph J Peper, Vignesh Athreya, Chulaka Gunasekara, Jatin Ganhotra, Siva Sankalp Patel, Lazaros C Polymenakos, and Walter Lasecki. 2019. A large- scale corpus for conversation disentanglement. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3846-3856.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "End-to-end transition-based online dialogue disentanglement", |
| "authors": [ |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhan", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jia-Chen", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Quan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the Twenty-Ninth International Conference on International Joint Conferences on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "3868--3874", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hui Liu, Zhan Shi, Jia-Chen Gu, Quan Liu, Si Wei, and Xiaodan Zhu. 2021a. End-to-end transition-based online dialogue disentanglement. In Proceedings of the Twenty-Ninth International Conference on Inter- national Joint Conferences on Artificial Intelligence, pages 3868-3874.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Unsupervised conversation disentanglement through cotraining", |
| "authors": [ |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhan", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2345--2356", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hui Liu, Zhan Shi, and Xiaodan Zhu. 2021b. Unsu- pervised conversation disentanglement through co- training. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 2345-2356.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Structural characterization for dialogue disentanglement", |
| "authors": [ |
| { |
| "first": "Xinbei", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhuosheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2022, |
| "venue": "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xinbei Ma, Zhuosheng Zhang, and Hai Zhao. 2022. Structural characterization for dialogue disentangle- ment. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Hierarchical conversation structure prediction in multi-party chat", |
| "authors": [ |
| { |
| "first": "Elijah", |
| "middle": [], |
| "last": "Mayfield", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Adamson", |
| "suffix": "" |
| }, |
| { |
| "first": "Carolyn", |
| "middle": [], |
| "last": "Rose", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 13th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "60--69", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elijah Mayfield, David Adamson, and Carolyn Rose. 2012. Hierarchical conversation structure prediction in multi-party chat. In Proceedings of the 13th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 60-69.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Chat disentanglement: Identifying semantic reply relationships with random forests and recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Shikib", |
| "middle": [], |
| "last": "Mehri", |
| "suffix": "" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Carenini", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "615--623", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shikib Mehri and Giuseppe Carenini. 2017. Chat disentanglement: Identifying semantic reply rela- tionships with random forests and recurrent neural networks. In Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 615-623.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Addressee and response selection for multi-party conversation", |
| "authors": [ |
| { |
| "first": "Hiroki", |
| "middle": [], |
| "last": "Ouchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuta", |
| "middle": [], |
| "last": "Tsuboi", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2133--2143", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hiroki Ouchi and Yuta Tsuboi. 2016. Addressee and response selection for multi-party conversation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2133-2143.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "A decomposable attention model for natural language inference", |
| "authors": [ |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Oscar", |
| "middle": [], |
| "last": "T\u00e4ckstr\u00f6m", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2249--2255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankur Parikh, Oscar T\u00e4ckstr\u00f6m, Dipanjan Das, and Jakob Uszkoreit. 2016. A decomposable attention model for natural language inference. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2249-2255.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Issues in multiparty dialogues", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Traum", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Workshop on Agent Communication Languages", |
| "volume": "", |
| "issue": "", |
| "pages": "201--211", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Traum. 2004. Issues in multiparty dialogues. In Workshop on Agent Communication Languages, pages 201-211. Springer.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Multiparticipant chat analysis: A survey", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "W" |
| ], |
| "last": "Uthus", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Aha", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Artificial Intelligence", |
| "volume": "199", |
| "issue": "", |
| "pages": "106--121", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David C Uthus and David W Aha. 2013. Multipartici- pant chat analysis: A survey. Artificial Intelligence, 199:106-121.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Contextbased message expansion for disentanglement of interleaved text conversations", |
| "authors": [ |
| { |
| "first": "Lidan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Douglas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Oard", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of human language technologies: The 2009 annual conference of the North American chapter of the association for computational linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "200--208", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lidan Wang and Douglas W Oard. 2009. Context- based message expansion for disentanglement of interleaved text conversations. In Proceedings of human language technologies: The 2009 annual conference of the North American chapter of the association for computational linguistics, pages 200- 208.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Recovering implicit thread structure in newsgroup style conversations", |
| "authors": [ |
| { |
| "first": "Yi-Chia", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mahesh", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Carolyn", |
| "middle": [ |
| "Penstein" |
| ], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ros\u00e9", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Second International Conference on Weblogs and Social Media, ICWSM", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi-Chia Wang, Mahesh Joshi, William W Cohen, and Carolyn Penstein Ros\u00e9. 2008. Recovering implicit thread structure in newsgroup style conversations. In Proceedings of the Second International Conference on Weblogs and Social Media, ICWSM 2008.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "An effective domain adaptive post-training method for BERT in response selection", |
| "authors": [ |
| { |
| "first": "Taesun", |
| "middle": [], |
| "last": "Whang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongyub", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Chanhee", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kisu", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongsuk", |
| "middle": [], |
| "last": "Oh", |
| "suffix": "" |
| }, |
| { |
| "first": "Heuiseok", |
| "middle": [], |
| "last": "Lim", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Annual Conference of the International Speech Communication Association, INTERSPEECH", |
| "volume": "2020", |
| "issue": "", |
| "pages": "1585--1589", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taesun Whang, Dongyub Lee, Chanhee Lee, Kisu Yang, Dongsuk Oh, and Heuiseok Lim. 2020. An effective domain adaptive post-training method for BERT in response selection. In Proceedings of the Annual Conference of the International Speech Communi- cation Association, INTERSPEECH, volume 2020, pages 1585-1589.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Dynamic self-attention: Computing attention over words dynamically for sentence embedding", |
| "authors": [ |
| { |
| "first": "Deunsol", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongbok", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Sangkeun", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1808.07383" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deunsol Yoon, Dongbok Lee, and SangKeun Lee. 2018. Dynamic self-attention: Computing attention over words dynamically for sentence embedding. arXiv preprint arXiv:1808.07383.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Online conversation disentanglement with pointer networks", |
| "authors": [ |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shafiq", |
| "middle": [], |
| "last": "Joty", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "6321--6330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tao Yu and Shafiq Joty. 2020. Online conversation disentanglement with pointer networks. In Proceed- ings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6321-6330.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Who did they respond to? conversation structure modeling using masked hierarchical transformer", |
| "authors": [ |
| { |
| "first": "Henghui", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Feng", |
| "middle": [], |
| "last": "Nan", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiguo", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ramesh", |
| "middle": [], |
| "last": "Nallapati", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Xiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "34", |
| "issue": "", |
| "pages": "9741--9748", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Henghui Zhu, Feng Nan, Zhiguo Wang, Ramesh Nallapati, and Bing Xiang. 2020. Who did they respond to? conversation structure modeling using masked hierarchical transformer. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 9741-9748.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "An example of dialogue disentanglement.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "text": "The overall architecture of DIALBERT. CLS T is the [CLS] hidden state of the T-th message pair. Note that the hand-craft features designed before the heuristic classifier is introduced in Kummerfeld et al. (2019). These features are not used on the Reddit dataset.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "text": "An example of the conversation structure.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF3": { |
| "text": "The percentage of distances between the target message and its parent message in the Ubuntu IRC dataset.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF2": { |
| "text": "", |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "text": "", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td colspan=\"3\">: Results of different models on the Reddit test set in terms of the accuracy (%).</td></tr><tr><td>VI ARI 1-1 F1</td><td>P</td><td>R</td></tr><tr><td colspan=\"3\">Our model -extra losses 92.7 69.2 78.5 44.3 42.1 46.7 93.9 76.3 81.2 46.5 43.3 50.1</td></tr><tr><td colspan=\"3\">-adaptation 92.5 67.8 78.6 41.0 37.6 45.1</td></tr><tr><td colspan=\"3\">-BiLSTM 90.8 62.9 75.0 32.5 29.3 36.6</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF6": { |
| "text": "Ablation analysis on different components using the Ubuntu IRC dataset.", |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |