| { |
| "paper_id": "C18-1046", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:10:38.724106Z" |
| }, |
| "title": "Implicit Discourse Relation Recognition using Neural Tensor Network with Interactive Attention and Sparse Learning", |
| "authors": [ |
| { |
| "first": "Fengyu", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tianjin University", |
| "location": { |
| "settlement": "Tianjin", |
| "country": "China" |
| } |
| }, |
| "email": "fengyuguo@tju.edu.cn" |
| }, |
| { |
| "first": "Ruifang", |
| "middle": [], |
| "last": "He", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tianjin University", |
| "location": { |
| "settlement": "Tianjin", |
| "country": "China" |
| } |
| }, |
| "email": "rfhe@tju.edu.cn" |
| }, |
| { |
| "first": "Di", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tianjin University", |
| "location": { |
| "settlement": "Tianjin", |
| "country": "China" |
| } |
| }, |
| "email": "jindi@tju.edu.cn" |
| }, |
| { |
| "first": "Jianwu", |
| "middle": [], |
| "last": "Dang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tianjin University", |
| "location": { |
| "settlement": "Tianjin", |
| "country": "China" |
| } |
| }, |
| "email": "jdang@jaist.ac.jp" |
| }, |
| { |
| "first": "Longbiao", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tianjin University", |
| "location": { |
| "settlement": "Tianjin", |
| "country": "China" |
| } |
| }, |
| "email": "longbiaowang@tju.edu.cn" |
| }, |
| { |
| "first": "Xiangang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "AI Labs", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "lixiangang@didichuxing.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Implicit discourse relation recognition aims to understand and annotate the latent relations between two discourse arguments, such as temporal, comparison, etc. Most previous methods encode two discourse arguments separately, the ones considering pair specific clues ignore the bidirectional interactions between two arguments and the sparsity of pair patterns. In this paper, we propose a novel Neural Tensor Network framework with Interactive Attention and Sparse Learning (TIASL) for implicit discourse relation recognition. (1) We mine the most correlated word pairs from two discourse arguments to model pair specific clues, and integrate them as interactive attention into argument representations produced by the bidirectional long short-term memory network. Meanwhile, (2) the neural tensor network with sparse constraint is proposed to explore the deeper and the more important pair patterns so as to fully recognize discourse relations. The experimental results on PDTB show that our proposed TIASL framework is effective.", |
| "pdf_parse": { |
| "paper_id": "C18-1046", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Implicit discourse relation recognition aims to understand and annotate the latent relations between two discourse arguments, such as temporal, comparison, etc. Most previous methods encode two discourse arguments separately, the ones considering pair specific clues ignore the bidirectional interactions between two arguments and the sparsity of pair patterns. In this paper, we propose a novel Neural Tensor Network framework with Interactive Attention and Sparse Learning (TIASL) for implicit discourse relation recognition. (1) We mine the most correlated word pairs from two discourse arguments to model pair specific clues, and integrate them as interactive attention into argument representations produced by the bidirectional long short-term memory network. Meanwhile, (2) the neural tensor network with sparse constraint is proposed to explore the deeper and the more important pair patterns so as to fully recognize discourse relations. The experimental results on PDTB show that our proposed TIASL framework is effective.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Discourse relation describes how two adjacent text units (e.g. clauses, sentences, and larger sentence groups), called arguments, named Arg1 and Arg2, are connected semantically, such as temporally, causally, etc. Yet implicit discourse relation recognition without explicit connectives (Pitler et al., 2008) , which needs to infer the relation from specific context, is still a challenging problem. It can be used in text summarization (Gerani et al., 2014) , conversation system (Higashinaka et al., 2014) and so on.", |
| "cite_spans": [ |
| { |
| "start": 287, |
| "end": 308, |
| "text": "(Pitler et al., 2008)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 437, |
| "end": 458, |
| "text": "(Gerani et al., 2014)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 481, |
| "end": 507, |
| "text": "(Higashinaka et al., 2014)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Previous researches mainly include (1) traditional feature-based models and (2) neural network based models. Most feature-based models adopt various linguistic features (such as polarity, word pairs, and position information, etc.) and design complicated rules to recognize implicit discourse relations (Pitler et al., 2009; Zhou et al., 2010; Braud and Denis, 2015) . They can not fully use the local and the global context, and the human cost is huge. Neural network based models get the better argument representations and more precisely capture discourse relations (Braud and Denis, 2015; Zhang et al., 2015; . However, they encode two discourse arguments separately, and ignore pair specific clues. The further researches adopt the different hybrid neural models (Chen et al., 2016; Lei et al., 2017) and attention mechanism (Cai and Zhao, 2017) to mine the semantic interactions of argument pairs. Yet, they ignore the bidirectional interactions between two arguments during the representation stage since there is asymmetry from the perspective of human-like reading strategy. And the sparsity of word pair patterns indicating discourse relation is neither considered.", |
| "cite_spans": [ |
| { |
| "start": 303, |
| "end": 324, |
| "text": "(Pitler et al., 2009;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 325, |
| "end": 343, |
| "text": "Zhou et al., 2010;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 344, |
| "end": 366, |
| "text": "Braud and Denis, 2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 569, |
| "end": 592, |
| "text": "(Braud and Denis, 2015;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 593, |
| "end": 612, |
| "text": "Zhang et al., 2015;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 768, |
| "end": 787, |
| "text": "(Chen et al., 2016;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 788, |
| "end": 805, |
| "text": "Lei et al., 2017)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 830, |
| "end": 850, |
| "text": "(Cai and Zhao, 2017)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Therefore, a novel neural Tensor network model with Interactive Attention and Sparse Learning is proposed for implicit discourse relation recognition, namely TIASL. We imitate the human-like reading strategy, and model the relatedness between two discourse arguments as a kind of interactive attention from bidirectional aspects. It is added into the argument representations with a bidirectional Long Short-Term Memory network (Bi-LSTM), and then plugged in neural tensor network (NTN) with l 1 reg- ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "h 1 1 h 1 i h 1 1 n h 2 1 h 2 1 h 2 i h 2 2 n h 1 0 h 1 1 h 1 i h 1 1 n h 2 0 h 2 1 h 2 i h 2 2 n h \u2299 + + NTN Figure 1: The TIASL framework.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "ularization. This helps to mine the different aspects of semantic interactions between two arguments and select the important and the informative word pair patterns. Our main contributions are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Propose a novel TIASL framework from the perspectives of the human-like bidirectional reading strategy and the sparsity of word pair patterns;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Encode the discourse arguments by the Bi-LSTM with interactive attention for implicit discourse relation recognition;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Use neural tensor network with sparse constraint to capture the deeper and the more indicative pair patterns;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Experimental results on PDTB show that our TIASL model is effective.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We formalize implicit discourse relation recognition as a classification problem. The proposed TIASL framework is shown in Figure 1 . The main steps include (1) discourse argument representations with interactive attention based on Bi-LSTM and (2) sparse pair pattern selection and implicit discourse relation recognition.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 123, |
| "end": 131, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Proposed Method", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Attention mechanism has achieved great success in image recognition, which is based on the visual attention principle found in humans. Recently, it is widely adopted in many NLP tasks. Inspired by (Herzog et al., 2016) , we imitate the human-like bidirectional reading strategy, and propose an interactive attention mechanism to enhance discourse argument representations. For the original representations of discourse arguments shown in Figure 1 , we first associate each word w in the vocabulary with a vector representation x w \u2208 R d , where d is the dimension of the embeddings.", |
| "cite_spans": [ |
| { |
| "start": 197, |
| "end": 218, |
| "text": "(Herzog et al., 2016)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 438, |
| "end": 446, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discourse Argument Representations with Interactive Attention", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Since each argument is viewed as a sequence of word vectors, let x 1 i (x 2 i ) be the i-th word vector in Arg1 (Arg2), thus the arguments in a discourse relation are expressed as,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Argument Representations with Interactive Attention", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Arg1 : [x 1 1 , x 1 2 , ..., x 1 n 1 ], Arg2 : [x 2 1 , x 2 2 , ..., x 2 n 2 ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Argument Representations with Interactive Attention", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where Arg1 (Arg2) has n 1 (n 2 ) words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Argument Representations with Interactive Attention", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Long Short-Term Memory network (LSTM) (Hochreiter and Schmidhuber, 1997 ) is a variant of recurrent neural network. Considering that it can model long-term dependencies and encode context information, we use it in the basic argument representation. Given the word representations of two arguments as we just described, the LSTM computes the state sequence for each position t using the following equations:", |
| "cite_spans": [ |
| { |
| "start": 38, |
| "end": 71, |
| "text": "(Hochreiter and Schmidhuber, 1997", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Bi-LSTM", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "i t = \u03c3(W i [x t , h t\u22121 ] + b i ), (1) f t = \u03c3(W f [x t , h t\u22121 ] + b f ), (2) o t = \u03c3(W o [x t , h t\u22121 ] + b o ), (3) c t = tanh(W c [x t , h t\u22121 ] + b c ), (4) c t = i t c t + f t c t\u22121 , (5) h t = o t tanh(c t ).", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "The Basic Bi-LSTM", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "where i t , f t , o t , c t , h", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Bi-LSTM", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "t denote the input gate, forget gate, output gate, memory cell and hidden state at position t respectively. Since LSTM only considers the context from the previous, we utilize a bidirectional LSTM (Bi-LSTM) preserving both history and future information. Therefore, at each position t of the sequence, we can obtain two representations \u2212 \u2192 h t and \u2190 \u2212 h t . Then we concatenate them to get the intermediate state", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Bi-LSTM", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "W i , W f , W o , W c , b i , b f , b o , b", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Bi-LSTM", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "h t = [ \u2212 \u2192 h t , \u2190 \u2212 h t ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Bi-LSTM", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "For Arg1 and Arg2, we encode them into the contextual representations by Bi-LSTM. That is,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Bi-LSTM", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "h 1 i = [ \u2212 \u2192 h 1 i , \u2190 \u2212 h 1 i ] and h 2 j = [ \u2212 \u2192 h 2 j , \u2190 \u2212 h 2 j ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Bi-LSTM", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "are the intermediate states of i-th word in Arg1 and j-th word in Arg2 respectively, where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Bi-LSTM", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "\u2212 \u2192 h 1 i , \u2190 \u2212 h 1 i , \u2212 \u2192 h 2 j , \u2190 \u2212 h 2 j \u2208 R d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Bi-LSTM", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": ". Separately encoding arguments with Bi-LSTM could not reflect the semantic between two discourse arguments in a discourse relation. In order to fully use their semantic connections, we explore a novel argument representation. Herzog et al. (2016) proposed the two stage model of visual perception which indicated that people's image recognition includes two stages: collecting information and understanding information. In daily life, we have a similar feeling intuitively during reading: a more reasonable strategy is that people may read two discourse arguments back and forth, and find some relevant and informative clues helpful to judge the discourse relation. Due to the different reading order of two arguments, people may get the different focused information and thus have the different decisions. Therefore, we model the reciprocal attention on discourse arguments from two directions.", |
| "cite_spans": [ |
| { |
| "start": 227, |
| "end": 247, |
| "text": "Herzog et al. (2016)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Bi-LSTM", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "Firstly, we calculate semantic connections between word pairs in two arguments as a pair-wise matrix shown in Eq.(7), which indicates the relevant score of i-th Arg1 word and j-th Arg2 word by dot product of their hidden representations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model the Asymmetry of Reciprocal Attention on Discourse Arguments", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "S(i, j) = (h 1 i ) T \u2022 h 2 j .", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Model the Asymmetry of Reciprocal Attention on Discourse Arguments", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "where S \u2208 R n 1 \u00d7n 2 , n 1 and n 2 are the lengths of Arg1 and Arg2, respectively. Secondly, as the concerns of the forward and the reverse reading order are asymmetric when judging the relation of two discourse arguments. For each word in Arg2, we apply a column-wise softmax function on the pair-wise matrix S to get a probability distribution \u03b1 t over Arg1, shown in Eq. 8. Similarly, we conduct a row-wise softmax function to get \u03b2 t over Arg2 when considering one Arg1 word, shown in Eq.(9). We denote \u03b1 t \u2208 R n 1 as Arg2-to-Arg1 attention, and \u03b2 t \u2208 R n 2 as Arg1-to-Arg2 attention at position t, which are named interactive attention.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model the Asymmetry of Reciprocal Attention on Discourse Arguments", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b1 t = sof tmax(S(1, t), ..., S(n 1 , t)), (8) \u03b2 t = sof tmax(S(t, 1), ..., S(t, n 2 )).", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Model the Asymmetry of Reciprocal Attention on Discourse Arguments", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model the Asymmetry of Reciprocal Attention on Discourse Arguments", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "\u03b1 t = [\u03b1 1 t , \u03b1 2 t , ..., \u03b1 n 1 t ], \u03b1 i t means the attention value of i-th word in Arg1 at position t. Likewise, \u03b2 t = [\u03b2 1 t , \u03b2 2 t , ..., \u03b2 n 2 t ], \u03b2 j t", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model the Asymmetry of Reciprocal Attention on Discourse Arguments", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "is the attention value of j-th word in Arg2 at position t. In order to exploit the overall influence information to represent semantic connection of two discourse arguments, we average all the \u03b1 t , \u03b2 t to get the final attention of Arg1 and Arg2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model the Asymmetry of Reciprocal Attention on Discourse Arguments", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b1 = 1 n 2 n 2 t=1 \u03b1 t , \u03b2 = 1 n 1 n 1 t=1 \u03b2 t .", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Model the Asymmetry of Reciprocal Attention on Discourse Arguments", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "The new argument representations integrating argument context and interactive attention are shown as Eq. 11, which reflect the human-like bidirectional reading strategy to some extent.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model the Asymmetry of Reciprocal Attention on Discourse Arguments", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h Arg1 = h 1 \u03b1, h Arg2 = h 2 \u03b2.", |
| "eq_num": "(11)" |
| } |
| ], |
| "section": "Model the Asymmetry of Reciprocal Attention on Discourse Arguments", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "Observations show that there are some pair patterns in a discourse relation. Once we detect these interactions expressing pair patterns, discriminating discourse relation is obvious. However, how to represent and select this kind of interaction is a problem.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sparse Pair Pattern Selection and Discourse Relation Recognition", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Conventional methods to measure the relevance between two arguments includes bilinear model (Jenatton et al., 2012), and single layer neural networks (Collobert and Weston, 2008) , etc. These methods could hardly model the complex and informative interactions. Success in knowledge graph (Socher et al., 2013a) shows that tensor can model multiple interactions in data. Therefore, we further employ a tensor layer to mine the deeper semantic interactions based on the new argument representations so as to recognize implicit discourse relations.", |
| "cite_spans": [ |
| { |
| "start": 150, |
| "end": 178, |
| "text": "(Collobert and Weston, 2008)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 288, |
| "end": 310, |
| "text": "(Socher et al., 2013a)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Tensor Network", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "Tensor is a geometric object that describes relations between vectors, scalars, and others. It can be represented as a multi-dimensional array of numerical values. Following the NTN (Socher et al., 2013a; Pei et al., 2014) , we utilize a 3-way tensor M [1:k] \u2208 R d h \u00d7d h \u00d7k to model the interactions shown in Eq. 12.", |
| "cite_spans": [ |
| { |
| "start": 182, |
| "end": 204, |
| "text": "(Socher et al., 2013a;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 205, |
| "end": 222, |
| "text": "Pei et al., 2014)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Tensor Network", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "g(h Arg1 , h Arg2 ) = u T f (h Arg1 ) T M [1:k] h Arg2 + V h Arg1 h Arg2 + b .", |
| "eq_num": "(12)" |
| } |
| ], |
| "section": "Neural Tensor Network", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "where f is a standard nonlinearity applied element-wise, M [1:k] \u2208 R d h \u00d7d h \u00d7k is a tensor and the bilinear tensor product (h Arg1 ) T M [1:k] h Arg2 results in a vector m \u2208 R k , where each entry is computed by one slice i = 1, 2, ..., k of the tensor:", |
| "cite_spans": [ |
| { |
| "start": 139, |
| "end": 144, |
| "text": "[1:k]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Tensor Network", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "m i = (h Arg1 ) T M [i] h Arg2 . The other parameters V \u2208 R k\u00d72d h , u \u2208 R k , b \u2208 R k", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Tensor Network", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "are the standard form of a neural network. Here, each tensor slice can be seen as a \"feature extractor\", which extracts the features expressing the Arg1-Arg2 interactions. Through the tensor layer, we can obtain the semantic interactions between two arguments as features, which are further reshaped to a vector and fed to a full connection hidden layer. Then we apply a softmax function in the output layer to compute the probabilities of different relations and recognize them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Tensor Network", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "Given a training corpus which contains n instances {(x, y)} n r=1 , (x, y) denotes an argument pair and its label. We employ the cross-entropy error to assess how well the predicted relation represents the real relation, defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training with Sparse Constraint", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L(\u0177, y) = \u2212 C j=1 y j log(P r(\u0177 j )).", |
| "eq_num": "(13)" |
| } |
| ], |
| "section": "Model Training with Sparse Constraint", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "where P r(\u0177 j ) is the predicted probabilities of labels, C is the class number. Based on argument representations with interactive attention, tensor embodies the different aspects of semantic interactions between two arguments. However, not all the interactions are useful. There could exist some redundant and noisy interactions influencing the system performance. In order to remove the irrelevant interactions and select the indicative pair patterns, the large portion of M [i] should be zero. Therefore, we introduce the 1-norm regularizer to promote the feature sparsity. This element-wise sparsity can be helpful when most of the features are irrelevant to the learning objective. Furthermore, we also add l 2 regularization to avoid over-fitting issue. And the training objective function is transformed as:", |
| "cite_spans": [ |
| { |
| "start": 478, |
| "end": 481, |
| "text": "[i]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training with Sparse Constraint", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "J(\u03b8) = 1 n n r=1 L(\u0177 (r) , y (r) ) + R(\u03b8),", |
| "eq_num": "(14)" |
| } |
| ], |
| "section": "Model Training with Sparse Constraint", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "R(\u03b8) = \u03bb M \u03b8 M 1 + \u03bb O 2 \u03b8 O 2 .", |
| "eq_num": "(15)" |
| } |
| ], |
| "section": "Model Training with Sparse Constraint", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "where R(\u03b8) is the regularization term with respect to \u03b8. We divide \u03b8 into two parts: \u03b8 M is the tensor term weights, and \u03b8 O is the other parameters of our model. Especially, \u03b8 M 1 in Eq. 15is l 1 regularization for the tensor slices, which is used to filter the important values.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training with Sparse Constraint", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "To minimize the objective, we employ the proximal gradient descent method (Parikh and Boyd, 2014) since l 1 regularization is non-differentiable at zero. It is used for optimizing the objective as a combination of both smooth and non-smooth terms. The update formulas is as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training with Sparse Constraint", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b8 (t ) i = \u03b8 (t) i \u2212 \u03b3( \u2202L \u2202\u03b8 i + \u03bb \u2202R \u2202\u03b8 i )| \u03b8 i =\u03b8 (t) i ,", |
| "eq_num": "(16)" |
| } |
| ], |
| "section": "Model Training with Sparse Constraint", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2202R \u2202\u03b8 i = 2\u03b8 (t) i , if \u03b8 2 ; sign(\u03b8 (t) i ), if \u03b8 1 , and \u03b8 (t) i = 0. (17) \u03b8 (t+1) i = prox \u03bb (\u03b8 t i ) = \u03c4 (\u03b8 t i , \u03b3\u03bb),", |
| "eq_num": "(18)" |
| } |
| ], |
| "section": "Model Training with Sparse Constraint", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03c4 (a, z) = \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3 a \u2212 z, if a > z; a + z, if a < \u2212z; 0, otherwise.", |
| "eq_num": "(19)" |
| } |
| ], |
| "section": "Model Training with Sparse Constraint", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "where prox \u03bb is a proximal operator, \u03c4 is a soft-thresholding operator, and \u03b3 is the learning rate.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training with Sparse Constraint", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "3 Experiments", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training with Sparse Constraint", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "Corpus. We use the Penn Discourse TreeBank (PDTB) (Prasad et al., 2008) , which is the largest handannotated discourse relation corpus annotated on 2312 Wall Street Journal (WSJ) articles. Experiments are conducted on the four top-level classes as in previous work (Rutherford and Xue, 2014; Chen et al., 2016) . Following the conventional data splitting, we use Section 2-20 as training set, Section 21-22 as testing set, and Section 0-1 as development set. The relevant statistics is shown in We do not present the details of tuning the hyper-parameters and only give their final settings as shown in Table 2 .", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 71, |
| "text": "(Prasad et al., 2008)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 265, |
| "end": 291, |
| "text": "(Rutherford and Xue, 2014;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 292, |
| "end": 310, |
| "text": "Chen et al., 2016)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 603, |
| "end": 610, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Preparation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To evaluate our model, we adopt two kinds of experiment settings, including a four-way classification and four separate one-vs-other binary classification. The former is to observe the overall performance. And the latter is to solve the problem of unbalance data, where each top level class is against the other three discourse relation classes. We use an equal number of positive and negative instances in the training set in each class. The testing set and development set keep the natural state.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Preparation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We choose the following models as our baselines, which are the state-of-the-art models in argument representation, interaction and attention aspects during implicit discourse relation recognition.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Methods", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 Ji2015: Ji and Eisenstein (2014) utilized two recursive neural networks on the syntactic parse tree to induce argument representation and entity spans. \u2022 Liu2016: designed Neural Networks with Multi-level Attention (NNMA) and selected the important words for recognizing discourse relation. Here, we select the models with two and three levels attention as baselines.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Methods", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Besides, we also use the following variants of RNN and the proposed TIASL model for comparisons.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Methods", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 LSTM: encode two discourse arguments by LSTM respectively, and concatenate the two representations, feeding them to the full connection hidden layer as the input of softmax classifier.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Methods", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 Bi-LSTM: based on LSTM, we consider the bidirectional context information, and use Bi-LSTM to encode two discourse arguments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Methods", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 Bi-LSTM+Interactive Attention: further integrate the interactive attention to obtain the new argument representations shown in Eq.(11).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Methods", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 Bi-LSTM+Tensor Layer: based on Bi-LSTM, adopt the neural tensor network to capture the semantic interaction between two arguments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Methods", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 TIA with k-Max Pooling: use k-max pooling operation instead of our sparse strategy to select features after tensor layer in neural Tensor network with Interactive Attention model (TIA).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Methods", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 Our TIASL: based on Bi-LSTM with Interactive Attention and Tensor Layer, we add l 1 regularization for tensor parameters in order to capture the most important interactions. Table 3 shows the overall performance, using F 1 score and accuracy for four-wary classification and F 1 score for binary classification. With respect to four-way classification, we have the following observations:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 176, |
| "end": 183, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison Methods", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 Ji2015 gains the lowest performance on both F 1 score and accuracy, which separately computes discourse argument representations by integrating syntactic parse tree into RNN. It indicates a simple neural network, which ignores the interactive context of two discourse arguments, is not sufficient for implicit discourse relation recognition. Table 3 : Comparisons with the state-of-the-art models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 344, |
| "end": 351, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Overall Performance", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 The accuracy of Chen2016 model is better than that of other baselines. It verifies the effectiveness word pair information, which uses gate mechanism to control the combination of linear and nonlinear interactions between argument pairs. However, there unavoidably exits some noises, and this model has not considered the sparsity of pair patterns. F 1 score of Liu2016 (two levels) model is higher than that of other baselines, which achieves 1.34% than that of three levels attention's. It indicates that attention mechanism is useful, and yet paying more attention may bring the over-fitting problem due to more parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Overall Performance", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 Our TIASL gains an improvement 1.30% on F 1 score than that of Liu2016 (two levels), and an improvement 1.22% on accuracy than that of Chen2016. The results imply that our model with interactive attention for the bidirectional asymmetry of two arguments and sparse pair pattern selection is useful for recognizing implicit discourse relation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Overall Performance", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "For binary classification, the observations are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Overall Performance", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 F 1 scores of Temporal relation are the lowest in all models. This is reasonable since it accounts for the smallest number of instances (only 5%) in the corpus. With the increase of instance number in different relations, F 1 scores also rise. It proves that the corpus is also crucial to implicit discourse relation recognition.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Overall Performance", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 Qin2016 gains the best performance on Contingency, and our TIASL model obtains the comparable score with it. Notably, Chen2016 and Liu2016 (two levels) are quite relevant work to ours. Different from them, our model integrates the attention-based interactive information between arguments at representation stage. This may be the main reason why our TIASL model is better than the two models (the improvements of 2.05% and 2.33%, respectively). Similar results are in Comparison relation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Overall Performance", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 Our TIASL model achieves state-of-the-art performance in recognition of the Expansion relation. The reasons are two-fold: (1) some argument pairs may have confusable word pairs, which can be effectively mined by asymmetric attention;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Overall Performance", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "(2) some complex argument pairs need to be further understood their semantic representation and explore the indicative and interactive patterns. Our TIASL model integrates these two aspects and performs well.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Overall Performance", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In order to verify the effectiveness of attention mechanism, neural tensor layer and l 1 regularization, we design five experiments to compare with our TIASL model. Seen from Table 4 , we have the following observations:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 175, |
| "end": 182, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Effectiveness of Each Component", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "\u2022 The performance of LSTM is the worst on each relation. Although Bi-LSTM captures more information than LSTM, the results are not very good. The reason is that separately encoding discourse argument by LSTM or Bi-LSTM ignores the local focused words since it equally treats every word. Table 4 : The effects of different components.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 287, |
| "end": 294, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Effectiveness of Each Component", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "\u2022 Bi-LSTM with Interactive Attention performs better than the above two simple models. In detail, the F 1 score of this model gains 2.48%, 2.54%, 1.70% improvement on Comparison, Contingency and Temporal than that of LSTM, respectively. We perform significance test for these improvements, and they are both significant under one-tailed t-test (p < 0.05). It indicates that the model could find pair specific clues in two arguments by constructing the relevance of word pairs to some extent. And the effectiveness of our attention mechanism for capturing the interactive information between the arguments is crucial at representation stage.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Effectiveness of Each Component", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "\u2022 Bi-LSTM with Tensor Layer slightly achieves better performance. This indicates the effectiveness of tensor layer for capturing complex interactive features. The TIA model, which combines attention mechanism and tensor layer, also performs better, but lower than our TIASL model. It is because that k-max pooling strategy could not guarantee getting the important interaction pairs from the global perspective. How to represent and explain these interactive features mined in our model will be our next research focus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Effectiveness of Each Component", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "\u2022 Our TIASL model achieves the best performance. It not only encodes discourse arguments with important word pairs by interactive attention, but also captures the more deeper and the more important semantic interactions by NTN with l 1 regularization. The integration of all components is useful for recognizing implicit discourse relations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Effectiveness of Each Component", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "The observations of each component's four-way classification are consistent with the binary classification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Effectiveness of Each Component", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "To demonstrate the validity of our interactive attention, we visualize the heat maps of argument pairs shown in Figure 2 , which shows the interaction matrices of only using Bi-LSTM and our interactive attention on an example. Every word accompanies with the various background colors. The darker patches denote the correlations of word pairs are higher. The example of Contingency relation is listed below:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 112, |
| "end": 120, |
| "text": "Figure 2", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Interactive Attention Analysis", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Arg1: You are really lucky.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interactive Attention Analysis", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Arg2: The earthquake suddenly came two hours after you left.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interactive Attention Analysis", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "However, it might be classified as a Comparison relation if we only focus on the informative word pair (lucky, earthquake) with contrasting sentiment polarity. Therefore, we need to consider the context of the whole argument pair to infer the correct relation from two back and forth reading directions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interactive Attention Analysis", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Seen from Figure 2(a) , the word pairs (are, you), (lucky, earthquake), (lucky, left) get the higher scores, the scores on the other pairs are arbitrary. It demonstrates the Bi-LSTM model may be influenced by the word pair frequency in corpus. Meanwhile, it encodes two arguments separately, which ignores the relevant and informative interactions between two arguments. Figure 2(b) as a comparison, we observe that there are the more word pairs obtaining the higher scores, which are ignored in Figure 2(a) . This proves that the effectiveness of generating interactive argument representation by our interactive attention, which imitates human-like reading strategy. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 10, |
| "end": 21, |
| "text": "Figure 2(a)", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 371, |
| "end": 382, |
| "text": "Figure 2(b)", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 496, |
| "end": 507, |
| "text": "Figure 2(a)", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Interactive Attention Analysis", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Traditional methods for implicit discourse relation recognition rely on artificial and shallow features, such as POS, polarity, word position, etc. Recent neural network based methods acquire the better performance, and mainly focus on two aspects:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The prerequisite of recognizing discourse relation is to have a good argument representation. Most previous researches use various neural networks, such as CNN, RNN, and hybrid models (Zhang et al., 2015; Qin et al., 2016a; Rutherford et al., 2016) to encode discourse arguments as low-dimensional, dense and continuous representations. Ji and Eisenstein (2014) integrate the linguistic features, including syntactic parsing and coreferent entity mentions into compositional distributed representations. Though argument representation contains the high-level semantic, it does not embody emphasis during reading comprehension. Some used neural architectures with attention mechanism pick up the important information from discourse arguments (Mnih et al., 2014; . exploit the hierarchical attention to capture the focus of different granularity. imitate the repeated reading strategy, and proposes neural networks with multi-level attention to recognize discourse relations. However, these researches have not considered the human-like reading strategy from two directions. The imagination by first reading one argument is different from the other, which has the reciprocal effects on implicit discourse relation recognition.", |
| "cite_spans": [ |
| { |
| "start": 184, |
| "end": 204, |
| "text": "(Zhang et al., 2015;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 205, |
| "end": 223, |
| "text": "Qin et al., 2016a;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 224, |
| "end": 248, |
| "text": "Rutherford et al., 2016)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 337, |
| "end": 361, |
| "text": "Ji and Eisenstein (2014)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 742, |
| "end": 761, |
| "text": "(Mnih et al., 2014;", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument Representation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The emphasis of discourse arguments is partly obtained by attention mechanism. Most studies tend to discover more semantic interactions between two arguments by complex neural networks (Chen et al., 2016; Qin et al., 2016b; Lan et al., 2017) . Cai and Zhao (2017) generate discourse argument representations via pair-specified feature extraction. Lei et al. (2017) conduct word interaction score to capture both linear and quadratic relation for argument representation.", |
| "cite_spans": [ |
| { |
| "start": 185, |
| "end": 204, |
| "text": "(Chen et al., 2016;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 205, |
| "end": 223, |
| "text": "Qin et al., 2016b;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 224, |
| "end": 241, |
| "text": "Lan et al., 2017)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 244, |
| "end": 263, |
| "text": "Cai and Zhao (2017)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 347, |
| "end": 364, |
| "text": "Lei et al. (2017)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pair Interactions", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Neural tensor network is good at capturing multiple interactions in data, and gets the good performance on entity relation (Socher et al., 2013a) , Chinese word segmentation (Pei et al., 2014) and sentiment analysis (Socher et al., 2013b) tasks. And some NTN-like methods learn the semantic interaction between discourse arguments (Chen et al., 2016 ). Yet they do not discriminate the noises and the redundant information existed in interactions, and ignore the sparsity of pair patterns.", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 145, |
| "text": "(Socher et al., 2013a)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 174, |
| "end": 192, |
| "text": "(Pei et al., 2014)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 216, |
| "end": 238, |
| "text": "(Socher et al., 2013b)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 331, |
| "end": 349, |
| "text": "(Chen et al., 2016", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pair Interactions", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Inspired by sparse learning in deep neural networks (Collins and Kohli, 2014; Yoon and Hwang, 2017; Wen et al., 2017) , they use sparse regularization to obtain compact deep networks by removing unnecessary weighs. In our paper, we introduce sparse learning into neural tensor network to select some indicative and informative word pair patterns. To our knowledge, our study is the first to employ the idea of sparse learning in implicit discourse relation recognition.", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 77, |
| "text": "(Collins and Kohli, 2014;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 78, |
| "end": 99, |
| "text": "Yoon and Hwang, 2017;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 100, |
| "end": 117, |
| "text": "Wen et al., 2017)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pair Interactions", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "A novel neural tensor network framework with interactive attention and sparse learning (TIASL) is proposed for implicit discourse relation recognition. We imitate human-like bidirectional reading strategy, and encode the semantic representation with reciprocal influence of discourse arguments through interactive attention. And we further adopt neural tensor network with l 1 regularization to capture the indicative and informative interactions between discourse arguments. Our experimental results on PDTB show that the proposed TIASL model is effective.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "However, we just take the surface word pairs to express the correlation by calculating the pair-wise matrix in this paper. We will automatically mine the deeper interaction between two arguments and explain the specific patterns of the different relations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by the National Natural Science Foundation of China (61472277, 61772361, 61771333). We also thank the anonymous reviewers for their valuable comments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Comparing word representations for implicit discourse relation classification", |
| "authors": [ |
| { |
| "first": "Chlo\u00e9", |
| "middle": [], |
| "last": "Braud", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Denis", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "2201--2211", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chlo\u00e9 Braud and Pascal Denis. 2015. Comparing word representations for implicit discourse relation classifica- tion. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2201-2211.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Pair-aware neural sentence modeling for implicit discourse relation classification", |
| "authors": [ |
| { |
| "first": "Deng", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "International Conference on Industrial, Engineering and Other Applications of Applied Intelligent Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "458--466", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deng Cai and Hai Zhao. 2017. Pair-aware neural sentence modeling for implicit discourse relation classification. In International Conference on Industrial, Engineering and Other Applications of Applied Intelligent Systems, pages 458-466. Springer.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Implicit discourse relation detection via a deep architecture with gated relevance network", |
| "authors": [ |
| { |
| "first": "Jifan", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Pengfei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xipeng", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuanjing", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "1726--1735", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jifan Chen, Qi Zhang, Pengfei Liu, Xipeng Qiu, and Xuanjing Huang. 2016. Implicit discourse relation detec- tion via a deep architecture with gated relevance network. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL), pages 1726-1735.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Memory bounded deep convolutional networks", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Maxwell", |
| "suffix": "" |
| }, |
| { |
| "first": "Pushmeet", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kohli", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.1442" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maxwell D Collins and Pushmeet Kohli. 2014. Memory bounded deep convolutional networks. arXiv preprint arXiv:1412.1442.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A unified architecture for natural language processing: Deep neural networks with multitask learning", |
| "authors": [ |
| { |
| "first": "Ronan", |
| "middle": [], |
| "last": "Collobert", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 25th International Conference on Machine Learning (ICML)", |
| "volume": "", |
| "issue": "", |
| "pages": "160--167", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ronan Collobert and Jason Weston. 2008. A unified architecture for natural language processing: Deep neural networks with multitask learning. In Proceedings of the 25th International Conference on Machine Learning (ICML), pages 160-167. ACM.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Abstractive summarization of product reviews using discourse structure", |
| "authors": [ |
| { |
| "first": "Shima", |
| "middle": [], |
| "last": "Gerani", |
| "suffix": "" |
| }, |
| { |
| "first": "Yashar", |
| "middle": [], |
| "last": "Mehdad", |
| "suffix": "" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Carenini", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "T" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Bita", |
| "middle": [], |
| "last": "Nejat", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1602--1613", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shima Gerani, Yashar Mehdad, Giuseppe Carenini, Raymond T. Ng, and Bita Nejat. 2014. Abstractive sum- marization of product reviews using discourse structure. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1602-1613.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Time slices: What is the duration of a percept", |
| "authors": [ |
| { |
| "first": "Kammer", |
| "middle": [], |
| "last": "Herzog", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Scharnowski Frank", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "PLOS Biology", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Herzog, Kammer Michael H., and Scharnowski Frank Thomas. 2016. Time slices: What is the duration of a percept? PLOS Biology.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Towards an open-domain conversational system fully based on natural language processing", |
| "authors": [ |
| { |
| "first": "Ryuichiro", |
| "middle": [], |
| "last": "Higashinaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenji", |
| "middle": [], |
| "last": "Imamura", |
| "suffix": "" |
| }, |
| { |
| "first": "Toyomi", |
| "middle": [], |
| "last": "Meguro", |
| "suffix": "" |
| }, |
| { |
| "first": "Chiaki", |
| "middle": [], |
| "last": "Miyazaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Nozomi", |
| "middle": [], |
| "last": "Kobayashi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiroaki", |
| "middle": [], |
| "last": "Sugiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Toru", |
| "middle": [], |
| "last": "Hirano", |
| "suffix": "" |
| }, |
| { |
| "first": "Toshiro", |
| "middle": [], |
| "last": "Makino", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshihiro", |
| "middle": [], |
| "last": "Matsuo", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 25th International Conference on Computational Linguistics (COLING)", |
| "volume": "", |
| "issue": "", |
| "pages": "928--939", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryuichiro Higashinaka, Kenji Imamura, Toyomi Meguro, Chiaki Miyazaki, Nozomi Kobayashi, Hiroaki Sugiyama, Toru Hirano, Toshiro Makino, and Yoshihiro Matsuo. 2014. Towards an open-domain conversa- tional system fully based on natural language processing. In Proceedings of the 25th International Conference on Computational Linguistics (COLING), pages 928-939.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735- 1780.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A latent factor model for highly multi-relational data", |
| "authors": [ |
| { |
| "first": "Rodolphe", |
| "middle": [], |
| "last": "Jenatton", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Nicolas", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Roux", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [ |
| "R" |
| ], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Obozinski", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Advances in Neural Information Processing Systems (NIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "3167--3175", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rodolphe Jenatton, Nicolas L Roux, Antoine Bordes, and Guillaume R Obozinski. 2012. A latent factor model for highly multi-relational data. In Advances in Neural Information Processing Systems (NIPS), pages 3167-3175.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "One vector is not enough: Entity-augmented distributional semantics for discourse relations", |
| "authors": [ |
| { |
| "first": "Yangfeng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Eisenstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "3", |
| "issue": "", |
| "pages": "329--344", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yangfeng Ji and Jacob Eisenstein. 2014. One vector is not enough: Entity-augmented distributional semantics for discourse relations. Transactions of the Association for Computational Linguistics, 3:329-344.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Multi-task attention-based neural networks for implicit discourse relationship representation and identification", |
| "authors": [ |
| { |
| "first": "Man", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianxiang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuanbin", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng-Yu", |
| "middle": [], |
| "last": "Niu", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1299--1308", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Man Lan, Jianxiang Wang, Yuanbin Wu, Zheng-Yu Niu, and Haifeng Wang. 2017. Multi-task attention-based neural networks for implicit discourse relationship representation and identification. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1299-1308.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Swim: A simple word interaction model for implicit discourse relation recognition", |
| "authors": [ |
| { |
| "first": "Wenqiang", |
| "middle": [], |
| "last": "Lei", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuancong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Meichun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilija", |
| "middle": [], |
| "last": "Ilievski", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiangnan", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Min-Yen", |
| "middle": [], |
| "last": "Kan", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 26th International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "4026--4032", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenqiang Lei, Xuancong Wang, Meichun Liu, Ilija Ilievski, Xiangnan He, and Min-Yen Kan. 2017. Swim: A simple word interaction model for implicit discourse relation recognition. In Proceedings of the 26th Interna- tional Joint Conference on Artificial Intelligence, pages 4026-4032.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Discourse parsing with attention-based hierarchical neural networks", |
| "authors": [ |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianshi", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Baobao", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Emirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "362--371", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qi Li, Tianshi Li, and Baobao Chang. 2016. Discourse parsing with attention-based hierarchical neural networks. In Proceedings of the 2016 Conference on Emirical Methods in Natural Language Processing (EMNLP), pages 362-371.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Recognizing implicit discourse relations via repeated reading: Neural networks with multi-level attention", |
| "authors": [ |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1224--1233", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yang Liu and Sujian Li. 2016. Recognizing implicit discourse relations via repeated reading: Neural networks with multi-level attention. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1224-1233.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Implicit discourse relation classification via multitask neural networks", |
| "authors": [ |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifang", |
| "middle": [], |
| "last": "Sui", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence (AAAI)", |
| "volume": "", |
| "issue": "", |
| "pages": "2750--2756", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yang Liu, Sujian Li, Xiaodong Zhang, and Zhifang Sui. 2016. Implicit discourse relation classification via multi- task neural networks. In Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence (AAAI), pages 2750-2756.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Recurrent models of visual attention", |
| "authors": [ |
| { |
| "first": "Volodymyr", |
| "middle": [], |
| "last": "Mnih", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Heess", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Graves", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in Neural Information Processing Systems (NIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "2204--2212", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Volodymyr Mnih, Nicolas Heess, Alex Graves, et al. 2014. Recurrent models of visual attention. In Advances in Neural Information Processing Systems (NIPS), pages 2204-2212.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Proximal algorithms. Foundations and Trends in Optimization", |
| "authors": [ |
| { |
| "first": "Neal", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Boyd", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "1", |
| "issue": "", |
| "pages": "127--239", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Neal Parikh and Stephen Boyd. 2014. Proximal algorithms. Foundations and Trends in Optimization., 1(3):127- 239.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Max-margin tensor neural network for chinese word segmentation", |
| "authors": [ |
| { |
| "first": "Wenzhe", |
| "middle": [], |
| "last": "Pei", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Ge", |
| "suffix": "" |
| }, |
| { |
| "first": "Baobao", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "293--303", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenzhe Pei, Tao Ge, and Baobao Chang. 2014. Max-margin tensor neural network for chinese word segmentation. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (ACL), pages 293- 303.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Easily identifiable discourse relations", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Pitler", |
| "suffix": "" |
| }, |
| { |
| "first": "Mridhula", |
| "middle": [], |
| "last": "Raghupathy", |
| "suffix": "" |
| }, |
| { |
| "first": "Hena", |
| "middle": [], |
| "last": "Mehta", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Aravind K", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily Pitler, Mridhula Raghupathy, Hena Mehta, Ani Nenkova, Alan Lee, and Aravind K Joshi. 2008. Easily identifiable discourse relations. Technical Reports (CIS).", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Automatic sense prediction for implicit discourse relations in text", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Pitler", |
| "suffix": "" |
| }, |
| { |
| "first": "Annie", |
| "middle": [], |
| "last": "Louis", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Nenkova", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "683--691", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily Pitler, Annie Louis, and Ani and Nenkova. 2009. Automatic sense prediction for implicit discourse relations in text. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP, pages 683-691.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "The penn discourse treebank 2.0", |
| "authors": [ |
| { |
| "first": "Rashmi", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Diesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Eleni", |
| "middle": [], |
| "last": "Miltsakaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Livio", |
| "middle": [], |
| "last": "Robaldo", |
| "suffix": "" |
| }, |
| { |
| "first": "Aravind", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Bonnie", |
| "middle": [], |
| "last": "Webber", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rashmi Prasad, Nikhil Diesh, Alan Lee, Eleni Miltsakaki, Livio Robaldo, Aravind Joshi, and Bonnie Webber. 2008. The penn discourse treebank 2.0. In Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC).", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Shallow discourse parsing using convolutional neural network", |
| "authors": [ |
| { |
| "first": "Lianhui", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhisong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "CoNLL Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "70--77", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lianhui Qin, Zhisong Zhang, and Hai Zhao. 2016a. Shallow discourse parsing using convolutional neural network. In CoNLL Shared Task, pages 70-77.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "A stacking gated neural architecture for implicit discourse relation classification", |
| "authors": [ |
| { |
| "first": "Lianhui", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhisong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "2263--2270", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lianhui Qin, Zhisong Zhang, and Hai Zhao. 2016b. A stacking gated neural architecture for implicit discourse relation classification. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2263-2270.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Discovering implicit discourse relations through brown cluster pair representation and coreference patterns", |
| "authors": [ |
| { |
| "first": "Attapol", |
| "middle": [], |
| "last": "Rutherford", |
| "suffix": "" |
| }, |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics (EACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "645--654", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Attapol Rutherford and Nianwen Xue. 2014. Discovering implicit discourse relations through brown cluster pair representation and coreference patterns. In Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics (EACL), pages 645-654.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Neural network models for implicit discourse relation classification in english and chinese without surface features", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Attapol", |
| "suffix": "" |
| }, |
| { |
| "first": "Vera", |
| "middle": [], |
| "last": "Rutherford", |
| "suffix": "" |
| }, |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Demberg", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1606.01990" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Attapol T Rutherford, Vera Demberg, and Nianwen Xue. 2016. Neural network models for implicit discourse relation classification in english and chinese without surface features. arXiv preprint arXiv:1606.01990.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Reasoning with neural tensor networks for knowledge base completion", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "926--934", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Danqi Chen, Christopher D Manning, and Andrew Ng. 2013a. Reasoning with neural tensor networks for knowledge base completion. In Advances in neural information processing systems, pages 926- 934.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Recursive deep models for semantic compositionality over a sentiment treebank", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Perelygin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Chuang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1631--1642", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D Manning, Andrew Ng, and Christopher Potts. 2013b. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1631-1642.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Learning intrinsic sparse structures within long short-term memory", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuxiong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Samyam", |
| "middle": [], |
| "last": "Rajbhandari", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenhan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Fang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiran", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1709.05027" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Wen, Yuxiong He, Samyam Rajbhandari, Wenhan Wang, Fang Liu, Bin Hu, Yiran Chen, and Hai Li. 2017. Learning intrinsic sparse structures within long short-term memory. arXiv preprint arXiv:1709.05027.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Combined group and exclusive sparsity for deep neural networks", |
| "authors": [ |
| { |
| "first": "Jaehong", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Sung", |
| "middle": [ |
| "Ju" |
| ], |
| "last": "Hwang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 34th International Conference on Machine Learning (PMLR)", |
| "volume": "", |
| "issue": "", |
| "pages": "3958--3966", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jaehong Yoon and Sung Ju Hwang. 2017. Combined group and exclusive sparsity for deep neural networks. In Proceedings of the 34th International Conference on Machine Learning (PMLR), pages 3958-3966.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Shallow convolutional neural network for implicit discourse relation recognition", |
| "authors": [ |
| { |
| "first": "Biao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinsong", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaojie", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hong", |
| "middle": [], |
| "last": "Duan", |
| "suffix": "" |
| }, |
| { |
| "first": "Junfeng", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "2230--2235", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Biao Zhang, Jinsong Su, Deyi Xiong, Yaojie Lu, Hong Duan, and Junfeng Yao. 2015. Shallow convolutional neural network for implicit discourse relation recognition. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2230-2235.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Neural discourse relation recognition with semantic memory", |
| "authors": [ |
| { |
| "first": "Biao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinsong", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1603.03873" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Biao Zhang, Deyi Xiong, and Jinsong Su. 2016. Neural discourse relation recognition with semantic memory. arXiv preprint arXiv:1603.03873.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Predicting discourse connectives for implicit discourse relation recognition", |
| "authors": [ |
| { |
| "first": "Zhi-Min", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng-Yu", |
| "middle": [], |
| "last": "Niu", |
| "suffix": "" |
| }, |
| { |
| "first": "Man", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Chew Lim", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 23rd International Conference on Computational Linguistics: Posters", |
| "volume": "", |
| "issue": "", |
| "pages": "1507--1514", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhi-Min Zhou, Yu Xu, Zheng-Yu Niu, Man Lan, Jian Su, and Chew Lim Tan. 2010. Predicting discourse con- nectives for implicit discourse relation recognition. In Proceedings of the 23rd International Conference on Computational Linguistics: Posters, pages 1507-1514. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Experimental Settings. The 50-dimensional pre-trained word embeddings are provided by GloVe(Pennington et al., 2014), which are fixed during our model training. All the discourse arguments are padded to the same length of 50. And the length of intermediate representation for our network is also 50. The other parameters are initialized by random sampling from uniform distribution in [-0.1,0.1].", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF1": { |
| "text": "Qin2016Qin et al. (2016b) integrated a CNN and a Collaborative Gated Neural Network (CGNN) into argument representation. \u2022 Chen2016: Chen et al. (2016) used a Gated Relevance Network (GRN) and incorporated both the linear and non-linear interactions between word pairs.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF2": { |
| "text": "(a) Bi-LSTM (only word embedding) (b) Bi-LSTM + Interactive attention", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF3": { |
| "text": "The relatedness between two arguments.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "TABREF2": { |
| "text": "", |
| "content": "<table><tr><td>Relation</td><td colspan=\"3\">Train Dev. Test</td></tr><tr><td colspan=\"3\">Comparison 1842 393</td><td>144</td></tr><tr><td colspan=\"3\">Contingency 3139 610</td><td>266</td></tr><tr><td>Expansion</td><td colspan=\"3\">6658 1231 537</td></tr><tr><td>Temporal</td><td>579</td><td>83</td><td>55</td></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF3": { |
| "text": "Statistics of implicit discourse relations.", |
| "content": "<table><tr><td>Hyper-parameters</td><td>Value</td></tr><tr><td>Initial learning rate</td><td>0.01</td></tr><tr><td>Minibatch size</td><td>30</td></tr><tr><td>Dropout rate</td><td>0.1</td></tr><tr><td colspan=\"2\">Number of tensor slice 3</td></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF4": { |
| "text": "Hyper-parameters for our TIASL model.", |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |