| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:40:14.542026Z" |
| }, |
| "title": "Disentangling Online Chats with DAG-Structured LSTMs", |
| "authors": [ |
| { |
| "first": "Duccio", |
| "middle": [], |
| "last": "Pappadopulo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "UNC Chapel Hill", |
| "location": {} |
| }, |
| "email": "dpappadopulo@bloomberg.net" |
| }, |
| { |
| "first": "Lisa", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "UNC Chapel Hill", |
| "location": {} |
| }, |
| "email": "lbauer6@cs.unc.edu" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Farina", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "UNC Chapel Hill", |
| "location": {} |
| }, |
| "email": "mfarina19@bloomberg.net" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ozanirsoy", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "UNC Chapel Hill", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "UNC Chapel Hill", |
| "location": {} |
| }, |
| "email": "mbansal@cs.unc.edu" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bloomberg", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "UNC Chapel Hill", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Many modern messaging systems allow fast and synchronous textual communication among many users. The resulting sequence of messages hides a more complicated structure in which independent sub-conversations are interwoven with one another. This poses a challenge for any task aiming to understand the content of the chat logs or gather information from them. The ability to disentangle these conversations is then tantamount to the success of many downstream tasks such as summarization and question answering. Structured information accompanying the text such as user turn, user mentions, timestamps, is used as a cue by the participants themselves who need to follow the conversation and has been shown to be important for disentanglement. DAG-LSTMs, a generalization of Tree-LSTMs that can handle directed acyclic dependencies, are a natural way to incorporate such information and its non-sequential nature. In this paper, we apply DAG-LSTMs to the conversation disentanglement task. We perform our experiments on the Ubuntu IRC dataset. We show that the novel model we propose achieves state of the art status on the task of recovering reply-to relations and it is competitive on other disentanglement metrics.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Many modern messaging systems allow fast and synchronous textual communication among many users. The resulting sequence of messages hides a more complicated structure in which independent sub-conversations are interwoven with one another. This poses a challenge for any task aiming to understand the content of the chat logs or gather information from them. The ability to disentangle these conversations is then tantamount to the success of many downstream tasks such as summarization and question answering. Structured information accompanying the text such as user turn, user mentions, timestamps, is used as a cue by the participants themselves who need to follow the conversation and has been shown to be important for disentanglement. DAG-LSTMs, a generalization of Tree-LSTMs that can handle directed acyclic dependencies, are a natural way to incorporate such information and its non-sequential nature. In this paper, we apply DAG-LSTMs to the conversation disentanglement task. We perform our experiments on the Ubuntu IRC dataset. We show that the novel model we propose achieves state of the art status on the task of recovering reply-to relations and it is competitive on other disentanglement metrics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Online chat and text messaging systems like Facebook Messenger, Slack, WeChat, WhatsApp, are common tools used by people to communicate in groups and in real time. In these venues multiple independent conversations often occur simultaneously with their individual utterances interspersed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "It is reasonable to assume the existence of an underlying thread structure partitioning the full conversation into disjoint sets of utterances, which ideally represent independent sub-conversations. * Equal contribution [12:19] Figure 1 : Excerpt from the IRC dataset (left) and our reply-to classifier architecture (right). Blue dots represent a unidirectional DAG-LSTM unit processing the states coming from the children of the current node. Red dots represent the GRU units performing thread encoding. At this point in time, we are computing the score (log-odds) of fifth utterance replying to the third.", |
| "cite_spans": [ |
| { |
| "start": 220, |
| "end": 227, |
| "text": "[12:19]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 228, |
| "end": 236, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u03c7 i \u03c6 i \u03c4 j \u03c8 j = [\u03c6 j ; \u03c4 j ] s ij", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The task of identifying these sub-units, disentanglement, is a prerequisite for further downstream tasks among which question answering, summarization, and topic modeling (Traum et al., 2004; Shen et al., 2006; Adams and Martell, 2008; Elsner and Charniak, 2010) . Additional structure can generally be found in these logs, as a particular utterance could be a response or a continuation of a previous one. Such reply-to relationships implicitly define threads as the connected components of the resulting graph topology, and can then be used for disentanglement (Mehri and Carenini, 2017; Dulceanu, 2016; Wang et al., 2008; Gaoyang Guo et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 171, |
| "end": 191, |
| "text": "(Traum et al., 2004;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 192, |
| "end": 210, |
| "text": "Shen et al., 2006;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 211, |
| "end": 235, |
| "text": "Adams and Martell, 2008;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 236, |
| "end": 262, |
| "text": "Elsner and Charniak, 2010)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 563, |
| "end": 589, |
| "text": "(Mehri and Carenini, 2017;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 590, |
| "end": 605, |
| "text": "Dulceanu, 2016;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 606, |
| "end": 624, |
| "text": "Wang et al., 2008;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 625, |
| "end": 650, |
| "text": "Gaoyang Guo et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Modeling work on conversation disentanglement spans more than a decade. Charniak (2008, 2010) use feature based linear models to find pairs of utterances belonging to the same thread and heuristic global algorithms to assign posts to threads. Mehri and Carenini (2017) and Jiang et al. (2018) , while also adopting similar heuristics, use features extracted through neural models, LSTMSs (Hochreiter and Schmidhuber, 1997) and siamese CNNs (Bromley et al., 1993) respectively. Wang et al. (2011) follow a different approach by modeling the interactions between the predicted reply-to relations as a conditional random field.", |
| "cite_spans": [ |
| { |
| "start": 72, |
| "end": 93, |
| "text": "Charniak (2008, 2010)", |
| "ref_id": null |
| }, |
| { |
| "start": 243, |
| "end": 268, |
| "text": "Mehri and Carenini (2017)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 273, |
| "end": 292, |
| "text": "Jiang et al. (2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 388, |
| "end": 422, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 440, |
| "end": 462, |
| "text": "(Bromley et al., 1993)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 477, |
| "end": 495, |
| "text": "Wang et al. (2011)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "One challenge in building automatic systems that perform disentanglement is the scarcity of large annotated datasets to be used to train expressive models. A remarkable effort in this direction is the work of Kummerfeld et al. (2019a) and the release of a dataset containing more that 77k utterances from the IRC #Ubuntu channel with annotated reply-to structure. In the same paper, it is shown how a set of simple handcrafted features, pooling of utterances GloVe embeddings (Pennington et al., 2014) , and a feed-forward classifier can achieve good performances on the disentanglement task. Most of the follow-up work on the dataset relies on BERT (Devlin et al., 2019) embeddings to generate utterance representations . use an additional transformer module to contextualize these representations, while ; use an LSTM. Two exceptions are , which models thread membership in an online fashion and discards reply-to relationships, and the recent Yu and Joty (2020a) which uses pointer networks (Vinyals et al., 2015) .", |
| "cite_spans": [ |
| { |
| "start": 209, |
| "end": 234, |
| "text": "Kummerfeld et al. (2019a)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 476, |
| "end": 501, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 650, |
| "end": 671, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 994, |
| "end": 1016, |
| "text": "(Vinyals et al., 2015)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this short paper, we use DAG-structured LSTMs (\u0130rsoy et al., 2019) to study disentanglement. As a generalization of Tree-LSTMs (Tai et al., 2015a) , DAG-LSTMs allow to faithfully represent the structure of a conversation, which is more properly described as a directed acyclic graph (DAG) than a sequence. Furthermore, DAG-LSTMs allow for the systematic inclusion of structured information like user turn and mentions in the learned representation of the conversation context. We enrich the representation learned by the DAG-LSTM by concatenating to it a representation of the thread to which the utterance belongs. This thread encoding is obtained by means of a GRU unit (Cho et al., 2014) and captures thread specific features like style, topic, or persona. Finally we manually construct new features to improve username matching, which is crucial for detecting user mentions, one of the most important features for disentanglement.", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 69, |
| "text": "DAG-structured LSTMs (\u0130rsoy et al., 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 130, |
| "end": 149, |
| "text": "(Tai et al., 2015a)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 675, |
| "end": 693, |
| "text": "(Cho et al., 2014)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our results are summarized in Table 1 . The DAG-LSTM significantly outperforms the BiL-STM baseline. Ablation studies show the importance of the new features we introduce. When augmented by thread encoding and a careful handling of posts predicted to be thread starters, the DAG-LSTM architecture achieves state of the art performances on reply-to relation extraction on the IRC Ubuntu dataset and it is competitive on the other metrics which are relevant to disentanglement.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 30, |
| "end": 37, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A multi-party chat C is a sequence of posts (c i ) i , i = 1, . . . , |C|. For each query post c i we look for the set of link posts R(c i ) such that c i replies to, or links to, c j for c j \u2208 R(c i ). When a post c is a conversation starter we define, consistently with Kummerfeld et al. (2019a) , R(c) = {c}, that is c replies to itself, it is a self-link. This reply-to binary relation defines a DAG over C. By taking the union of the reply-to relation with its converse and by calculating its transitive closure, we obtain an equivalence relation on C whose equivalence classes are threads, thus solving the disentanglement problem.", |
| "cite_spans": [ |
| { |
| "start": 272, |
| "end": 297, |
| "text": "Kummerfeld et al. (2019a)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology 2.1 Problem Statement", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We frame the problem as a sequence classification task. For each query post c i we consider its L preceding posts O c i \u2261 {c i\u2212L\u22121 , . . . , c i } and predict one of them as its link. In the IRC Ubuntu dataset, predicting a single link per query post is a good approximation, holding true for more than 95% of the annotated utterances. We use L = 50 in the following. As described in Sections 2.2 and 2.3, for each query utterance c i , we construct a contextualized representation, \u03c6 i \u2261 \u03c6(c i , C). We do the same for each of the links c j \u2208 O c i , using a representation \u03c8 that can in principle differ from \u03c6. We then calculate p(", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology 2.1 Problem Statement", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "c i replies-to c j ) \u2261 p(c j |c i ) as p(c j |c i ) \u2261 exp(s ij ) c k \u2208Oc i exp(s ik ) ,", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Methodology 2.1 Problem Statement", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology 2.1 Problem Statement", |
| "sec_num": "2" |
| }, |
| { |
| "text": "s ij \u2261 s(\u03c6 i , \u03c8 j , f ij )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology 2.1 Problem Statement", |
| "sec_num": "2" |
| }, |
| { |
| "text": "is a real-valued scoring function described in Section 2.4 and f ij are additional features. The parameters of the resulting model are learned by maximizing the likelihood associated to Eq. 1. At inference time we predict j = argmax c j \u2208Oc i p(c j |c i ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology 2.1 Problem Statement", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The construction of the \u03c6 and \u03c8 representations closely follows\u0130rsoy et al. 2019. Every post c i is represented as a sequence of tokens (t i n ) n . An embedding layer maps the tokens to a sequence of d I -dimensional real vectors (\u03c9 i n ) n . We use the tokenizer and the word embeddings from Kummerfeld et al. 2019a, d I = 50. We generate a representation \u03c7 i of c i by means of a single BiLSTM layer unrolled over the sequence of the token embeddings (", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Contextual Post Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u03c5 i n ) n \u2261 BiLSTM[(\u03c9 i n ) n ] fol- lowed by elementwise max-affine pooling \u03c7 i = max n Affine[(\u03c5 i n ) n ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Contextual Post Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "To obtain the contextualized representations \u03c6, we use a DAG-LSTM layer. This is an N-ary Tree-LSTM (Tai et al., 2015a) in which the sum over children in the recursive definition of the memory cell is replaced with an elementwise max operation (see Appendix). This allows the existence of multiple paths between two nodes (as it is the case if a node has multiple children) without the associated state explosion (\u0130rsoy et al., 2019) . This is crucial to handle long sequences, as in our case.", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 119, |
| "text": "(Tai et al., 2015a)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 413, |
| "end": 433, |
| "text": "(\u0130rsoy et al., 2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Contextual Post Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "At each time step the DAG-LSTM unit receives the utterance representation \u03c7 i of the current post c i as the input and all the hidden and cell states coming from a labeled set of children, C(c i ), see Figure 1. In our case C(c i ) contains three elements: the previous post in the conversation (c i\u22121 ), the previous post by the same user of c i , the previous post by the user mentioned in c i if any. More dependencies can be easily added making this architecture well suited to handle structured information. The DAG-LSTM is unrolled over the sequence ({\u03c7 i , C(c i )}) i , providing a sequence of contextualized post representations (\u03c6 i ) i . We also consider a bidirectional DAG-LSTM defined by a second unit processing the reversed sequencec i \u2261 c |C|\u2212i+1 . Forward and backward DAG-LSTM representations are then concatenated to obtain \u03c6.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 202, |
| "end": 208, |
| "text": "Figure", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Contextual Post Representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The link post representation \u03c8 can coincide with the query one, \u03c8 j \u2261 \u03c6 j . One potential issue with this approach is that \u03c8 does not depend on past thread assignments. Furthermore, thread-specific features such as topic and persona, cannot be easily captured by the hierarchical but sequential model described in the previous section. Thus we augment the link representations by means of thread encoding . Given a query, c i , and a link c j posts pair, we consider the thread T (c j ) = (c t i ), t i < t i+1 , t |T (c j )| = j, to which c j has been assigned. We construct a representation \u03c4 j of such thread by means of a GRU cell,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Thread Encoding", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "\u03c4 j = GRU[(\u03c7(c)) c\u2208T (c j ) ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Thread Encoding", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": ". \u03c8 j is then obtained by concatenating \u03c6 j and \u03c4 j . At training time we use the gold threads to generate the \u03c4 representations, while at evaluation time we use the predicted ones.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Thread Encoding", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Once query and link representations are constructed we use the scoring function in Eq. 1 to score each link against the query utterance, with s a three-layer feed-forward neural network. The input of the network is the concatenation [\u03c6 i ; \u03c8 j ; f ij ], where f ij are the 77 features introduced by Kummerfeld et al. (2019a). We augment them by 42 additional features based on Levenshtein distance and longest common prefix between query's username and words in the link utterance (and viceversa). These are introduced to improve mention detection by being more lenient on spelling mistakes (see 2.5 for precise definitions).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Function", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "While IRC chats allow systematically tagging other participants (a single mention per post), users can address each other explicitly by typing usernames. This allows for abbreviations and typos to be introduced, which are not efficiently captured by the set of features used by Kummerfeld et al. (2019b) . To ameliorate this problem we construct additional features. Given a pair of utterances c 1 and c 2 we define the following:", |
| "cite_spans": [ |
| { |
| "start": 278, |
| "end": 303, |
| "text": "Kummerfeld et al. (2019b)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "User Features", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "\u2022 Smallest Levenshtein distance (D L ) between c 1 (c 2 )'s username and each of the word in c 2 (c 1 ); 5 bins, D L = i for i = 0, . . . , 4 or D L > 4 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "User Features", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "\u2022 Largest length of common prefix ( ) between c 1 (c 2 )'s username and each of the word in c 2 (c 1 ); 5 bins, = i for i = 3, . . . , 6 or > 6.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "User Features", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "\u2022 Binary variable indicating whether c 1 (c 2 )'s username is a prefix of any of the words in c 2 (c 1 ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "User Features", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "These amount to a total of 42 additional features for each pair of posts. 3 Results", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "User Features", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "We conduct our experiments on the Ubuntu IRC dataset for disentanglement (Kummerfeld et al., 2019a; Kim et al., 2019) . We focus on two evaluation metrics defined in Kummerfeld et al. (2019a) : graph F 1 , the F-score calculated using the number of correctly predicted reply-to pairs; cluster F 1 , the F-score calculated using the number of matching threads of length greater than 1.", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 99, |
| "text": "(Kummerfeld et al., 2019a;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 100, |
| "end": 117, |
| "text": "Kim et al., 2019)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 166, |
| "end": 191, |
| "text": "Kummerfeld et al. (2019a)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "As a baseline, we use a BiLSTM model in which \u03c6 i (= \u03c8 i ) is obtained as the hidden states of a bidirectional LSTM unrolled over the sequence (\u03c7 i ) i . The base DAG-LSTM model uses both username and mentions to define the children set C of an utterance. Bidirectionality is left as a hyperparameter. All our experiments use the same architecture from section 2 to construct the utterance representation \u03c7. We train each model by minimizing the negative log-likelihood for Eq. 1 using Adam optimizer (Kingma and Ba, 2019). We tune the hyperparameters of each architecture through random search. 1 Table 1 shows the test set performances of the models which achieve the best graph F 1 score 1 We refer to the Appendix for details. over the dev set. Optimizing graph over cluster score is motivated by an observation: dev set cluster F 1 score displays a much larger variance than graph F 1 score, which is roughly four-fold after subtracting the score rolling average. By picking the iteration with the best cluster F 1 score we would be more exposed to fluctuation and to worse generalization, which we observe.", |
| "cite_spans": [ |
| { |
| "start": 691, |
| "end": 692, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 598, |
| "end": 605, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "As noted by Yu and Joty (2020b) , the ability of the model to detect self-links is crucial for its final performances. In line with their findings, we also report that all our models are skewed towards high recall for self-link detection (Table 2) . To help with this, we introduce two thresholds \u03b8 and \u03b4, which we compare withp, the argmax probability Eq. 1, and \u2206p, the difference between the top-2 predicted probabilities. Whenever the argmax is a self-link: if p < \u03b8, we predict the next-to-argmax link, otherwise we predict both the top-2 links if also \u2206p < \u03b4. On the dev set, we first fine-tune \u03b8 to maximize the self-link F 1 score and the fine-tune \u03b4 to maximize the cluster F 1 score. Table 1 shows our main results. Our DAG-LSTM model significantly outperforms the BiLSTM baseline. We perform ablation studies on our best DAG-LSTM model showing that while both user features and mention link provide a performance improvement for both cluster and graph score, only user features ablation results in a significant change. Self-links threshold tuning improves performances, particularly on cluster score for both models, highlighting the importance of correctly identifying thread starters.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 31, |
| "text": "Yu and Joty (2020b)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 238, |
| "end": 247, |
| "text": "(Table 2)", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 694, |
| "end": 701, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Self-Links Threshold Tuning", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The DAG-LSTM model with thread encoding achieves state of the art performances in predicting reply-to relations. This is particularly interesting especially when we compare with models employ-ing contextual embeddings like . For the cluster scores, the best model is the pointer network model of Yu and Joty (2020a) , which is anyway within less than 0.5% of the best contextual model, and within 2.5% of our model. The difference mainly arises from a difference in recall and corresponds to an absolute difference of less than 10 true positive clusters on the test set. Further comparisons with existing literature are limited by code not being available at the moment.", |
| "cite_spans": [ |
| { |
| "start": 296, |
| "end": 315, |
| "text": "Yu and Joty (2020a)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results Discussion", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "In this paper we apply, for the first time, DAG-LSTMs to the disentanglement task; they provide a flexible architecture that allows to incorporate into the learned neural representations the structured information which comes alongside multi-turn dialogue. We propose thread encoding and a new set of features to aid identification of user mentions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "There are possible directions left to explore. We modeled the reply-to relationships in a conversation by making an assumption of conditional independence of reply-to assignments. This is possibly a poor approximation and it would be interesting to lift it. A challenge with this approach is the computational complexity resulting from the large dimension of the output space of the reply-to classifier. We notice that thread encoding allows a non-greedy decoding strategy through beam search which would be interesting to further explore.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "A DAG-LSTM is a variation on the Tree-LSTM (Tai et al., 2015b) architecture, that is defined over DAGs. Given a DAG, G, we assume that for every vertex v of G, the edges e(v, v ) connecting the children v \u2208 C(v) to v can be assigned a unique label v,v from a fixed set of labels.", |
| "cite_spans": [ |
| { |
| "start": 43, |
| "end": 62, |
| "text": "(Tai et al., 2015b)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.1 DAG-LSTM Equations", |
| "sec_num": null |
| }, |
| { |
| "text": "A pair of states vectors (h v , c v ) and an input x v are associated to every vertex v. The DAG-LSTM equations define the states (h v , c v ), as a function of the input x v and the states of its children:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.1 DAG-LSTM Equations", |
| "sec_num": null |
| }, |
| { |
| "text": "(h v , c v ) = DAG-LSTM(x v ; {(h w , c w )|w \u2208 C(v)}).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.1 DAG-LSTM Equations", |
| "sec_num": null |
| }, |
| { |
| "text": "(2) The equations defining such functions are the following:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.1 DAG-LSTM Equations", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "i v = \u03c3 W ix x v + v \u2208C(v) W v,v ih h v (3) f vv = \u03c3 W f x x v + v \u2208C(v) W v,v v,v f h h v (4) c v = i v u v + max v \u2208C(v) f vv c v (5) h v = o v tanh(c v )", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "A.1 DAG-LSTM Equations", |
| "sec_num": null |
| }, |
| { |
| "text": "The equations for the o and u gates are the same as those for the i gate by replacing everywhere i \u2192 o, u. Bias vectors are left implicit in the definition of i, f , o, and u.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.1 DAG-LSTM Equations", |
| "sec_num": null |
| }, |
| { |
| "text": "represents Hadamard product and max in Eq. 5 represent elementwise max operation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.1 DAG-LSTM Equations", |
| "sec_num": null |
| }, |
| { |
| "text": "A bidirectional DAG-LSTM, is just a pair of independent DAG-LSTM, one of which is unrolled over the time reversed sequence of utterances. The output of a bidirectional DAG-LSTM is the concatenation of the h states of the forward and backward unit for a given utterance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.1 DAG-LSTM Equations", |
| "sec_num": null |
| }, |
| { |
| "text": "We use adjudicated training, development, and test sets from (Kummerfeld et al., 2019b) . Each of these dataset is composed a set of conversation (153 in the training set and 10 in both development and test set) each representing a chunk of contiguous posts from the IRC #Ubuntu channel. Each of these conversation contains strictly more than 1000 posts (exactly 1250 and 1500 for dev and test set respectively). Annotations are available for all but the first 1000 posts in every conversation. We apply some preprocessing to these conversations.", |
| "cite_spans": [ |
| { |
| "start": 61, |
| "end": 87, |
| "text": "(Kummerfeld et al., 2019b)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Training and Hyperparameter Tuning", |
| "sec_num": null |
| }, |
| { |
| "text": "We chunk the annotated section of every training conversation in contiguous chunks of 50 posts each, starting from the first annotated post. 2 To each of these chunks we attach a past context of 100 posts and a future context of 50, resulting in 200 utterances long chunks. For each of these chunks we keep only those annotated links for which the response utterance lies in the central 50 posts. We do not chunk development and test set, but drop the first 900 post in every conversation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Training and Hyperparameter Tuning", |
| "sec_num": null |
| }, |
| { |
| "text": "The various architectures we consider share the same set of parameters to fine-tune. One parameter d h controls the dimension of the hidden state of the LSTMs and one parameter d F F controls the dimension of the hidden layers of the feed-forward scorer. We use word dropout, apply dropout after the max-affine layer, and apply dropout after activation at every layer of the feed-forward scorer. We clip all gradient entries at 5. We use a single layer of LSTMs and DAG-LSTMs to build the \u03c7 and \u03c6, \u03c8 representations and we do not dropout any of their units. Similarly we use a single layer GRU for the thread encoder. We list all the hyperparameters in Table 3 together with their range and distribution used for the random search.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 653, |
| "end": 660, |
| "text": "Table 3", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "A.2 Training and Hyperparameter Tuning", |
| "sec_num": null |
| }, |
| { |
| "text": "Hyperparameter optimization is performed by running 100 training jobs for the base BiLSTM architecture, DAG-LSTM, and DAG-LSTM with thread encoding. Our published results are from the best among these runs. The best sets of parameters we find for each of these architectures are:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Training and Hyperparameter Tuning", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 BiLSTM: d h = 256, d F F = 128, no word and max-affine dropout, a feed forwarddropout equal to 0.3, and a learning rate of 2.4 \u00d7 10 \u22124 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Training and Hyperparameter Tuning", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 DAG-LSTM: d h = 64, d F F = 256, no word and max-affine dropout, a feed forwarddropout equal to 0.3, and a learning rate of 7.3 \u00d7 10 \u22124 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Training and Hyperparameter Tuning", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 DAG-LSTM with thread encoding: d h = d F F = 256, word and max-affine dropout equal to 0.3, a feed forward-dropout equal to 0.5, and a learning rate of 7.9 \u00d7 10 \u22124 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Training and Hyperparameter Tuning", |
| "sec_num": null |
| }, |
| { |
| "text": "User feature and mention link ablations are obtained by fixing all parameters of the best DAG-LSTM run (removing the feature we are experimenting with) and running 10 jobs by only changing the random seed. Each training job is performed on a single GPU and, depending on the architectures, takes from 6 to 12 hours.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Training and Hyperparameter Tuning", |
| "sec_num": null |
| }, |
| { |
| "text": "We use McNemar test (McNemar, 1947) to evaluate the significance of performance differences between model. Given two models M A and M B , we define n AB as the number of links correctly predicted by A but not by B. Under the null hypothesis both n AB \u223c Bin(n AB , n, 1/2), where n \u2261 n AB + n BA . We define a model A to be significantly better than a model B if the null hypothesis is excluded at 95% confidence level.", |
| "cite_spans": [ |
| { |
| "start": 20, |
| "end": 35, |
| "text": "(McNemar, 1947)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.3 Significance Estimates", |
| "sec_num": null |
| }, |
| { |
| "text": "This may result in the last chunk to have less than 50 posts. This happens for 45 conversations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank the reviewers for their useful feedback. We thank Andy Liu, Camilo Ortiz, Huayan Zhong, Philipp Meerkamp, Rakesh Gosangi, Raymond (Haimin) Zhang for their initial collaboration. We thank Jonathan Kummerfeld for discussions. This work was performed while Lisa interned at Bloomberg, and was later supported by DARPA MCS Grant N66001-19-2-4031, NSF-CAREER Award 1846185, and a NSF PhD Fellowship. The views are those of the authors and not of the funding agencies.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Topic Detection and Extraction in Chat", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [ |
| "H" |
| ], |
| "last": "Adams", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "H" |
| ], |
| "last": "Martell", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "IEEE International Conference on Semantic Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "581--588", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. H. Adams and C. H. Martell. 2008. Topic Detec- tion and Extraction in Chat. In 2008 IEEE Inter- national Conference on Semantic Computing, pages 581-588.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Signature Verification Using a \"Siamese\" Time Delay Neural Network", |
| "authors": [ |
| { |
| "first": "Jane", |
| "middle": [], |
| "last": "Bromley", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "James", |
| "suffix": "" |
| }, |
| { |
| "first": "L\u00e9on", |
| "middle": [], |
| "last": "Bentz", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabelle", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Guyon", |
| "suffix": "" |
| }, |
| { |
| "first": "Cliff", |
| "middle": [], |
| "last": "Lecun", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Moore", |
| "suffix": "" |
| }, |
| { |
| "first": "Roopak", |
| "middle": [], |
| "last": "S\u00e4ckinger", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "International Journal of Pattern Recognition and Artificial Intelligence", |
| "volume": "7", |
| "issue": "04", |
| "pages": "669--688", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jane Bromley, James W Bentz, L\u00e9on Bottou, Is- abelle Guyon, Yann LeCun, Cliff Moore, Eduard S\u00e4ckinger, and Roopak Shah. 1993. Signature Veri- fication Using a \"Siamese\" Time Delay Neural Net- work. International Journal of Pattern Recognition and Artificial Intelligence, 7(04):669-688.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "On the Properties of Neural Machine Translation: Encoder-Decoder Approaches", |
| "authors": [ |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Van Merri\u00ebnboer", |
| "suffix": "" |
| }, |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of SSST-8, Eighth Workshop on Syntax, Semantics and Structure in Statistical Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "103--111", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyunghyun Cho, Bart van Merri\u00ebnboer, Dzmitry Bah- danau, and Yoshua Bengio. 2014. On the Properties of Neural Machine Translation: Encoder-Decoder Approaches. In Proceedings of SSST-8, Eighth Workshop on Syntax, Semantics and Structure in Sta- tistical Translation, pages 103-111.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Recovering Implicit Thread Structure in Chat Conversations", |
| "authors": [ |
| { |
| "first": "Andrei", |
| "middle": [], |
| "last": "Dulceanu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Romanian Journal of Human-Computer Interaction", |
| "volume": "9", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrei Dulceanu. 2016. Recovering Implicit Thread Structure in Chat Conversations. Romanian Journal of Human-Computer Interaction, 9(3).", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "You Talking to Me? a Corpus and Algorithm for Conversation Disentanglement. ACL-08: HLT", |
| "authors": [ |
| { |
| "first": "Micha", |
| "middle": [], |
| "last": "Elsner", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Micha Elsner and Eugene Charniak. 2008. You Talking to Me? a Corpus and Algorithm for Conversation Disentanglement. ACL-08: HLT, page 834.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Disentangling Chat", |
| "authors": [ |
| { |
| "first": "Micha", |
| "middle": [], |
| "last": "Elsner", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Computational Linguistics", |
| "volume": "36", |
| "issue": "3", |
| "pages": "389--409", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Micha Elsner and Eugene Charniak. 2010. Disentan- gling Chat. Computational Linguistics, 36(3):389- 409.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Who is Answering Whom? finding \"Reply-To\" Relations in Group Chats with Deep Bidirectional LSTM Networks. Cluster Computing", |
| "authors": [ |
| { |
| "first": "Gaoyang", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Chaokun", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Pengcheng", |
| "middle": [], |
| "last": "Ge", |
| "suffix": "" |
| }, |
| { |
| "first": "Weijun", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1--12", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gaoyang Guo, Chaokun Wang, Jun Chen, Pengcheng Ge, and Weijun Chen. 2018. Who is Answering Whom? finding \"Reply-To\" Relations in Group Chats with Deep Bidirectional LSTM Networks. Cluster Computing, pages 1-12.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Pre-Trained and Attention-Based Neural Networks for Building Noetic Task-Oriented Dialogue Systems", |
| "authors": [ |
| { |
| "first": "Jia-Chen", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianda", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Quan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu-Ping", |
| "middle": [], |
| "last": "Ruan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "AAAI 2020 Workshop on The Eighth Dialog System Technology Challenge", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jia-Chen Gu, Tianda Li, Quan Liu, Xiaodan Zhu, Zhen- Hua Ling, and Yu-Ping Ruan. 2020. Pre-Trained and Attention-Based Neural Networks for Building Noetic Task-Oriented Dialogue Systems. In AAAI 2020 Workshop on The Eighth Dialog System Tech- nology Challenge.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Long Short-Term Memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long Short-Term Memory. Neural computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Learning to Disentangle Interleaved Conversational Threads with a Siamese Hierarchical Network and Similarity Ranking", |
| "authors": [ |
| { |
| "first": "Jyun-Yu", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Francine", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan-Ying", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1812--1822", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jyun-Yu Jiang, Francine Chen, Yan-Ying Chen, and Wei Wang. 2018. Learning to Disentangle Inter- leaved Conversational Threads with a Siamese Hi- erarchical Network and Similarity Ranking. In Pro- ceedings of the 2018 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 1 (Long Papers), pages 1812-1822.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "The Eighth Dialog System Technology Challenge", |
| "authors": [ |
| { |
| "first": "Seokhwan", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Chulaka", |
| "middle": [], |
| "last": "Gunasekara", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungjin", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Atkinson", |
| "suffix": "" |
| }, |
| { |
| "first": "Baolin", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannes", |
| "middle": [], |
| "last": "Schulz", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinchao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Mahmoud", |
| "middle": [], |
| "last": "Adada", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1911.06394" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Seokhwan Kim, Michel Galley, Chulaka Gunasekara, Sungjin Lee, Adam Atkinson, Baolin Peng, Hannes Schulz, Jianfeng Gao, Jinchao Li, Mahmoud Adada, et al. 2019. The Eighth Dialog System Technology Challenge. arXiv preprint arXiv:1911.06394.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A Method for Stochastic Optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "J Adam", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6980" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and J Adam Ba. 2019. A Method for Stochastic Optimization. arxiv 2014. arXiv preprint arXiv:1412.6980, 434.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A Large-Scale Corpus for Conversation Disentanglement", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jonathan K Kummerfeld", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Sai", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [ |
| "J" |
| ], |
| "last": "Gouravajhala", |
| "suffix": "" |
| }, |
| { |
| "first": "Vignesh", |
| "middle": [], |
| "last": "Peper", |
| "suffix": "" |
| }, |
| { |
| "first": "Chulaka", |
| "middle": [], |
| "last": "Athreya", |
| "suffix": "" |
| }, |
| { |
| "first": "Jatin", |
| "middle": [], |
| "last": "Gunasekara", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ganhotra", |
| "suffix": "" |
| }, |
| { |
| "first": "Sankalp", |
| "middle": [], |
| "last": "Siva", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Patel", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Lazaros", |
| "suffix": "" |
| }, |
| { |
| "first": "Walter", |
| "middle": [], |
| "last": "Polymenakos", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lasecki", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3846--3856", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan K Kummerfeld, Sai R Gouravajhala, Joseph J Peper, Vignesh Athreya, Chulaka Gunasekara, Jatin Ganhotra, Siva Sankalp Patel, Lazaros C Poly- menakos, and Walter Lasecki. 2019a. A Large-Scale Corpus for Conversation Disentanglement. In Pro- ceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 3846- 3856.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A large-scale corpus for conversation disentanglement", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [ |
| "K" |
| ], |
| "last": "Kummerfeld", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Sai", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [ |
| "J" |
| ], |
| "last": "Gouravajhala", |
| "suffix": "" |
| }, |
| { |
| "first": "Vignesh", |
| "middle": [], |
| "last": "Peper", |
| "suffix": "" |
| }, |
| { |
| "first": "Chulaka", |
| "middle": [], |
| "last": "Athreya", |
| "suffix": "" |
| }, |
| { |
| "first": "Jatin", |
| "middle": [], |
| "last": "Gunasekara", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ganhotra", |
| "suffix": "" |
| }, |
| { |
| "first": "Sankalp", |
| "middle": [], |
| "last": "Siva", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Patel", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Lazaros", |
| "suffix": "" |
| }, |
| { |
| "first": "Walter", |
| "middle": [], |
| "last": "Polymenakos", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lasecki", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3846--3856", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1374" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan K. Kummerfeld, Sai R. Gouravajhala, Joseph J. Peper, Vignesh Athreya, Chulaka Gu- nasekara, Jatin Ganhotra, Siva Sankalp Patel, Lazaros C Polymenakos, and Walter Lasecki. 2019b. A large-scale corpus for conversation disentangle- ment. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguis- tics, pages 3846-3856, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Dialbert: A Hierarchical Pre-Trained Model for Conversation Disentanglement", |
| "authors": [ |
| { |
| "first": "Tianda", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jia-Chen", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Quan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiming", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tianda Li, Jia-Chen Gu, Xiaodan Zhu, Quan Liu, Zhen- Hua Ling, Zhiming Su, and Si Wei. 2020. Dialbert: A Hierarchical Pre-Trained Model for Conversation Disentanglement. CoRR, abs/2004.03760.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "End-to-End Transition-Based Online Dialogue Disentanglement", |
| "authors": [ |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhan", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jia-Chen", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Quan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI-20", |
| "volume": "", |
| "issue": "", |
| "pages": "3868--3874", |
| "other_ids": { |
| "DOI": [ |
| "10.24963/ijcai.2020/535" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hui Liu, Zhan Shi, Jia-Chen Gu, Quan Liu, Si Wei, and Xiaodan Zhu. 2020. End-to-End Transition-Based Online Dialogue Disentanglement. In Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI-20, pages 3868- 3874. International Joint Conferences on Artificial Intelligence Organization. Main track.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Note on the Sampling Error of the Difference between Correlated Proportions or Percentages", |
| "authors": [ |
| { |
| "first": "Quinn", |
| "middle": [], |
| "last": "Mcnemar", |
| "suffix": "" |
| } |
| ], |
| "year": 1947, |
| "venue": "Psychometrika", |
| "volume": "12", |
| "issue": "2", |
| "pages": "153--157", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Quinn McNemar. 1947. Note on the Sampling Error of the Difference between Correlated Proportions or Percentages. Psychometrika, 12(2):153-157.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Chat Disentanglement: Identifying Semantic Reply Relationships with Random Forests and Recurrent Neural Networks", |
| "authors": [ |
| { |
| "first": "Shikib", |
| "middle": [], |
| "last": "Mehri", |
| "suffix": "" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Carenini", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "615--623", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shikib Mehri and Giuseppe Carenini. 2017. Chat Dis- entanglement: Identifying Semantic Reply Relation- ships with Random Forests and Recurrent Neural Networks. In Proceedings of the Eighth Interna- tional Joint Conference on Natural Language Pro- cessing (Volume 1: Long Papers), pages 615-623.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Glove: Global Vectors for Word Representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global Vectors for Word Representation. In Proceedings of the 2014 confer- ence on empirical methods in natural language pro- cessing (EMNLP), pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Thread Detection in Dynamic Text Message Streams", |
| "authors": [ |
| { |
| "first": "Dou", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiang", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian-Tao", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 29th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '06", |
| "volume": "", |
| "issue": "", |
| "pages": "35--42", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/1148170.1148180" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dou Shen, Qiang Yang, Jian-Tao Sun, and Zheng Chen. 2006. Thread Detection in Dynamic Text Message Streams. In Proceedings of the 29th Annual Inter- national ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '06, page 35-42, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Improved Semantic Representations from Tree-Structured Long Short-Term Memory Networks", |
| "authors": [ |
| { |
| "first": "Kai Sheng", |
| "middle": [], |
| "last": "Tai", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1556--1566", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Sheng Tai, Richard Socher, and Christopher D Manning. 2015a. Improved Semantic Representa- tions from Tree-Structured Long Short-Term Mem- ory Networks. In Proceedings of the 53rd Annual Meeting of the Association for Computational Lin- guistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1556-1566.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Improved semantic representations from tree-structured long short-term memory networks", |
| "authors": [ |
| { |
| "first": "Kai Sheng", |
| "middle": [], |
| "last": "Tai", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1556--1566", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/P15-1150" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Sheng Tai, Richard Socher, and Christopher D. Manning. 2015b. Improved semantic representa- tions from tree-structured long short-term memory networks. In Proceedings of the 53rd Annual Meet- ing of the Association for Computational Linguistics and the 7th International Joint Conference on Natu- ral Language Processing (Volume 1: Long Papers), pages 1556-1566, Beijing, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Evaluation of multi-party virtual reality dialogue interaction", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [ |
| "R" |
| ], |
| "last": "Traum", |
| "suffix": "" |
| }, |
| { |
| "first": "Susan", |
| "middle": [], |
| "last": "Robinson", |
| "suffix": "" |
| }, |
| { |
| "first": "Jens", |
| "middle": [], |
| "last": "Stephan", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Fourth International Conference on Language Resources and Evaluation (LREC'04)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David R. Traum, Susan Robinson, and Jens Stephan. 2004. Evaluation of multi-party virtual reality dia- logue interaction. In Proceedings of the Fourth In- ternational Conference on Language Resources and Evaluation (LREC'04), Lisbon, Portugal. European Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Pointer Networks", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Meire", |
| "middle": [], |
| "last": "Fortunato", |
| "suffix": "" |
| }, |
| { |
| "first": "Navdeep", |
| "middle": [], |
| "last": "Jaitly", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "2692--2700", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, Meire Fortunato, and Navdeep Jaitly. 2015. Pointer Networks. In Advances in Neural In- formation Processing Systems, pages 2692-2700.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Learning Online Discussion Structures by Conditional Random Fields", |
| "authors": [ |
| { |
| "first": "Hongning", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chi", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengxiang", |
| "middle": [], |
| "last": "Zhai", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiawei", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 34th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '11", |
| "volume": "", |
| "issue": "", |
| "pages": "435--444", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/2009916.2009976" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hongning Wang, Chi Wang, ChengXiang Zhai, and Ji- awei Han. 2011. Learning Online Discussion Struc- tures by Conditional Random Fields. In Proceed- ings of the 34th International ACM SIGIR Confer- ence on Research and Development in Information Retrieval, SIGIR '11, page 435-444, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Recovering Implicit Thread Structure in Newsgroup Style Conversations", |
| "authors": [ |
| { |
| "first": "Yi-Chia", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mahesh", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Carolyn", |
| "middle": [], |
| "last": "Ros\u00e9", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi-Chia Wang, Mahesh Joshi, William Cohen, and Car- olyn Ros\u00e9. 2008. Recovering Implicit Thread Struc- ture in Newsgroup Style Conversations. In AAAI.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Online Conversation Disentanglement with Pointer Networks", |
| "authors": [ |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shafiq", |
| "middle": [], |
| "last": "Joty", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "6321--6330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tao Yu and Shafiq Joty. 2020a. Online Conversa- tion Disentanglement with Pointer Networks. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6321-6330.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Online conversation disentanglement with pointer networks", |
| "authors": [ |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shafiq", |
| "middle": [], |
| "last": "Joty", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "6321--6330", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.512" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tao Yu and Shafiq Joty. 2020b. Online conversation disentanglement with pointer networks. In Proceed- ings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6321-6330, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Ramesh Nallapati, and Bing Xiang. 2020. Who Did They Respond To? Conversation Structure Modeling Using Masked Hierarchical Transformer", |
| "authors": [ |
| { |
| "first": "Henghui", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Feng", |
| "middle": [], |
| "last": "Nan", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiguo", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Henghui Zhu, Feng Nan, Zhiguo Wang, Ramesh Nal- lapati, and Bing Xiang. 2020. Who Did They Re- spond To? Conversation Structure Modeling Using Masked Hierarchical Transformer. In Proceedings of the AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Dialogue Act Classification in Group Chats with DAG-LSTMs", |
| "authors": [ |
| { |
| "first": "Rakesh", |
| "middle": [], |
| "last": "Ozanirsoy", |
| "suffix": "" |
| }, |
| { |
| "first": "Haimin", |
| "middle": [], |
| "last": "Gosangi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mu-Hsin", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Duccio", |
| "middle": [], |
| "last": "Lund", |
| "suffix": "" |
| }, |
| { |
| "first": "Brendan", |
| "middle": [], |
| "last": "Pappadopulo", |
| "suffix": "" |
| }, |
| { |
| "first": "Neophytos", |
| "middle": [], |
| "last": "Fahy", |
| "suffix": "" |
| }, |
| { |
| "first": "Camilo", |
| "middle": [], |
| "last": "Neophytou", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ortiz", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "SIGIR 2019 Workshop on Conversational Interaction Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ozan\u0130rsoy, Rakesh Gosangi, Haimin Zhang, Mu- Hsin Wei, Peter Lund, Duccio Pappadopulo, Bren- dan Fahy, Neophytos Neophytou, and Camilo Ortiz. 2019. Dialogue Act Classification in Group Chats with DAG-LSTMs. In SIGIR 2019 Workshop on Conversational Interaction Systems.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF2": { |
| "type_str": "table", |
| "content": "<table><tr><td>Results of our experiments (bottom, best</td></tr><tr><td>in bold) and literature (top, best underlined). The</td></tr><tr><td>\u2191(\u2193) sign indicates the model being significantly better</td></tr><tr><td>(worse) (p < 0.05) than the DAG-LSTM entry based</td></tr><tr><td>on a McNemar test (McNemar, 1947) conducted on the</td></tr><tr><td>test set. User features and mention links are included</td></tr><tr><td>in this baseline model, thread encoding and self-link</td></tr><tr><td>threshold tuning are not. Starred entries use contextual</td></tr><tr><td>embeddings.</td></tr></table>", |
| "num": null, |
| "text": "", |
| "html": null |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "content": "<table><tr><td>Model</td><td>Self-links</td></tr><tr><td/><td>P R F</td></tr><tr><td>BiLSTM</td><td>79.6 94.6 86.5</td></tr><tr><td>DAG-LSTM</td><td>82.8 93.8 88.0</td></tr><tr><td>+ self-links threshold</td><td>87.7 92.4 90.0</td></tr></table>", |
| "num": null, |
| "text": "DAG-LSTM + thread enc. 81.4 93.8 87.2 + self-links threshold 89.8 90.6 90.2", |
| "html": null |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "content": "<table/>", |
| "num": null, |
| "text": "Thread starters (self-links) performances for our models inTable 1, before and after thresholding.", |
| "html": null |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "content": "<table/>", |
| "num": null, |
| "text": "Hyperparameters of the model architectures. During hyperparameter optimization, we perform a random search according to the distributions described above. Categorical distributions have uniform probability mass function.", |
| "html": null |
| } |
| } |
| } |
| } |