| { |
| "paper_id": "E17-1002", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T10:51:50.365733Z" |
| }, |
| "title": "Neural Tree Indexers for Text Understanding", |
| "authors": [ |
| { |
| "first": "Tsendsuren", |
| "middle": [], |
| "last": "Munkhdalai", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Massachusetts", |
| "location": { |
| "region": "MA", |
| "country": "USA" |
| } |
| }, |
| "email": "tsendsuren.munkhdalai@umassmed.edu" |
| }, |
| { |
| "first": "Hong", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Massachusetts", |
| "location": { |
| "region": "MA", |
| "country": "USA" |
| } |
| }, |
| "email": "hong.yu@umassmed.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Recurrent neural networks (RNNs) process input text sequentially and model the conditional transition between word tokens. In contrast, the advantages of recursive networks include that they explicitly model the compositionality and the recursive structure of natural language. However, the current recursive architecture is limited by its dependence on syntactic tree. In this paper, we introduce a robust syntactic parsing-independent tree structured model, Neural Tree Indexers (NTI) that provides a middle ground between the sequential RNNs and the syntactic treebased recursive models. NTI constructs a full n-ary tree by processing the input text with its node function in a bottom-up fashion. Attention mechanism can then be applied to both structure and node function. We implemented and evaluated a binarytree model of NTI, showing the model achieved the state-of-the-art performance on three different NLP tasks: natural language inference, answer sentence selection, and sentence classification, outperforming state-of-the-art recurrent and recursive neural networks 1 .", |
| "pdf_parse": { |
| "paper_id": "E17-1002", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Recurrent neural networks (RNNs) process input text sequentially and model the conditional transition between word tokens. In contrast, the advantages of recursive networks include that they explicitly model the compositionality and the recursive structure of natural language. However, the current recursive architecture is limited by its dependence on syntactic tree. In this paper, we introduce a robust syntactic parsing-independent tree structured model, Neural Tree Indexers (NTI) that provides a middle ground between the sequential RNNs and the syntactic treebased recursive models. NTI constructs a full n-ary tree by processing the input text with its node function in a bottom-up fashion. Attention mechanism can then be applied to both structure and node function. We implemented and evaluated a binarytree model of NTI, showing the model achieved the state-of-the-art performance on three different NLP tasks: natural language inference, answer sentence selection, and sentence classification, outperforming state-of-the-art recurrent and recursive neural networks 1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Recurrent neural networks (RNNs) have been successful for modeling sequence data (Elman, 1990) . RNNs equipped with gated hidden units and internal short-term memories, such as long shortterm memories (LSTM) (Hochreiter and Schmidhuber, 1997 ) have achieved a notable success in several NLP tasks including named entity recognition (Lample et al., 2016) , constituency parsing (Vinyals et al., 2015) , textual entailment recognition (Rockt\u00e4schel et al., 2016 ), question answering (Hermann et al., 2015) , and machine translation (Bahdanau et al., 2015) . However, most LSTM models explored so far are sequential. It encodes text sequentially from left to right or vice versa and do not naturally support compositionality of language. Sequential LSTM models seem to learn syntactic structure from the natural language however their generalization on unseen text is relatively poor comparing with models that exploit syntactic tree structure (Bowman et al., 2015b) .", |
| "cite_spans": [ |
| { |
| "start": 81, |
| "end": 94, |
| "text": "(Elman, 1990)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 208, |
| "end": 241, |
| "text": "(Hochreiter and Schmidhuber, 1997", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 332, |
| "end": 353, |
| "text": "(Lample et al., 2016)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 377, |
| "end": 399, |
| "text": "(Vinyals et al., 2015)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 433, |
| "end": 458, |
| "text": "(Rockt\u00e4schel et al., 2016", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 481, |
| "end": 503, |
| "text": "(Hermann et al., 2015)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 530, |
| "end": 553, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 941, |
| "end": 963, |
| "text": "(Bowman et al., 2015b)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Unlike sequential models, recursive neural networks compose word phrases over syntactic tree structure and have shown improved performance in sentiment analysis (Socher et al., 2013) . However its dependence on a syntactic tree architecture limits practical NLP applications. In this study, we introduce Neural Tree Indexers (NTI), a class of tree structured models for NLP tasks. NTI takes a sequence of tokens and produces its representation by constructing a full n-ary tree in a bottom-up fashion. Each node in NTI is associated with one of the node transformation functions: leaf node mapping and non-leaf node composition functions. Unlike previous recursive models, the tree structure for NTI is relaxed, i.e., NTI does not require the input sequences to be parsed syntactically; and therefore it is flexible and can be directly applied to a wide range of NLP tasks beyond sentence modeling.", |
| "cite_spans": [ |
| { |
| "start": 161, |
| "end": 182, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Furthermore, we propose different variants of node composition function and attention over tree for our NTI models. When a sequential leaf node transformer such as LSTM is chosen, the NTI network forms a sequence-tree hybrid model taking advantage of both conditional and compositional powers of sequential and recursive models. Figure 1: A binary tree form of Neural Tree Indexers (NTI) in the context of question answering and natural language inference. We insert empty tokens (denoted by \u2212) to the input text to form a full binary tree. (a) NTI produces answer representation at the root node. This representation along with the question is used to find the answer. (b) NTI learns representations for the premise and hypothesis sentences and then attentively combines them for classification. Dotted lines indicate attention over premise-indexed tree.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "1 shows a binary-tree model of NTI. Although the model does not follow the syntactic tree structure, we empirically show that it achieved the state-ofthe-art performance on three different NLP applications: natural language inference, answer sentence selection, and sentence classification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2 Related Work", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "RNNs model input text sequentially by taking a single token at each time step and producing a corresponding hidden state. The hidden state is then passed along through the next time step to provide historical sequence information. Although a great success in a variety of tasks, RNNs have limitations (Bengio et al., 1994; Hochreiter, 1998) . Among them, it is not efficient at memorizing long or distant sequence (Sutskever et al., 2014) . This is frequently called as information flow bottleneck. Approaches have therefore been developed to overcome the limitations. For example, to mitigate the information flow bottleneck, Bahdanau et al. (2015) extended RNNs with a soft attention mechanism in the context of neural machine translation, leading to improved the results in translating longer sentences. RNNs are linear chain-structured; this limits its potential for natural language which can be represented by complex structures including syntactic structure. In this study, we propose models to mitigate this limitation.", |
| "cite_spans": [ |
| { |
| "start": 301, |
| "end": 322, |
| "text": "(Bengio et al., 1994;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 323, |
| "end": 340, |
| "text": "Hochreiter, 1998)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 414, |
| "end": 438, |
| "text": "(Sutskever et al., 2014)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 627, |
| "end": 649, |
| "text": "Bahdanau et al. (2015)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recurrent Neural Networks and Attention Mechanism", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Unlike RNNs, recursive neural networks explicitly model the compositionality and the recursive structure of natural language over tree. The tree structure can be predefined by a syntactic parser (Socher et al., 2013) . Each non-leaf tree node is associated with a node composition function which combines its children nodes and produces its own representation. The model is then trained by back-propagating error through structures (Goller and Kuchler, 1996) .", |
| "cite_spans": [ |
| { |
| "start": 195, |
| "end": 216, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 432, |
| "end": 458, |
| "text": "(Goller and Kuchler, 1996)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recursive Neural Networks", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The node composition function can be varied. A single layer network with tanh non-linearity was adopted in recursive auto-associate memories (Pollack, 1990) and recursive autoencoders (Socher et al., 2011) . Socher et al. (2012) extended this network with an additional matrix representation for each node to augment the expressive power of the model. Tensor networks have also been used as composition function for sentencelevel sentiment analysis task (Socher et al., 2013) . Recently, Zhu et al. (2015) introduced S-LSTM which extends LSTM units to compose tree nodes in a recursive fashion.", |
| "cite_spans": [ |
| { |
| "start": 141, |
| "end": 156, |
| "text": "(Pollack, 1990)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 184, |
| "end": 205, |
| "text": "(Socher et al., 2011)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 208, |
| "end": 228, |
| "text": "Socher et al. (2012)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 454, |
| "end": 475, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 488, |
| "end": 505, |
| "text": "Zhu et al. (2015)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recursive Neural Networks", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In this paper, we introduce a novel attentive node composition function that is based on S-LSTM. Our NTI model does not rely on either a parser output or a fine-grained supervision of nonleaf nodes, both required in previous work. In NTI, the supervision from the target labels is provided at the root node. As such, our NTI model is robust and applicable to a wide range of NLP tasks. We introduce attention over tree in NTI to overcome the vanishing/explode gradients challenges as shown in RNNs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recursive Neural Networks", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Our training set consists of N examples", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "3" |
| }, |
| { |
| "text": "{X i , Y i } N i=1 ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where the input X i is a sequence of word tokens w i 1 , w i 2 , . . . , w i T i and the output Y i can be either a single target or a sequence. Each input word token w t is represented by its word", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "3" |
| }, |
| { |
| "text": "embedding x t \u2208 R k .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "3" |
| }, |
| { |
| "text": "NTI is a full n-ary tree (and the sub-trees can be overlapped).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "3" |
| }, |
| { |
| "text": "It has two types of transformation function: non-leaf node function f node (h 1 , . . . , h c ) and leaf node function f leaf (x t ). f leaf (x t ) computes a (possibly nonlinear) transformation of the input word embedding x t . f node (h 1 , . . . , h c ) is a function of its child nodes representation h 1 , . . . , h c , where c is the total number of child nodes of this non-leaf node.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "3" |
| }, |
| { |
| "text": "NTI can be implemented with different tree structures. In this study we implemented and evaluated a binary tree form of NTI: a non-leaf node can take in only two direct child nodes (i.e., c = 2). Therefore, the function f node (h l , h r ) composes its left child node h l and right child node h r . Figure 1 illustrates our NTI model that is applied to question answering (a) and natural language inference tasks (b). Note that the node and leaf node functions are neural networks and are the only training parameters in NTI.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 300, |
| "end": 308, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We explored two different approaches to compose node representations: an extended LSTM and attentive node composition functions, to be described below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We define two different methods for non-leaf node function f node (h l , h r ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-Leaf Node Composition Functions", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "LSTM-based Non-leaf Node Function (S-LSTM): We initiate f node (h l , h r ) with LSTM. For non-leaf node, we adopt S- LSTM Zhu et al. (2015) , an extension of LSTM to tree structures, to learn a node representation by its children nodes. ", |
| "cite_spans": [ |
| { |
| "start": 118, |
| "end": 140, |
| "text": "LSTM Zhu et al. (2015)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-Leaf Node Composition Functions", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "i t+1 = \u03c3(W s 1 h l t + W s 2 h r t + W s 3 c l t + W s 4 c r t ) (1) f l t+1 = \u03c3(W s 5 h l t + W s 6 h r t + W s 7 c l t + W s 8 c r t ) (2) f r t+1 = \u03c3(W s 9 h l t + W s 10 h r t + W s 11 c l t + W s 12 c r t ) (3) c p t+1 = f l t+1 c l t + f r t+1 c r t + i t+1 tanh(W s 13 h l t + W s 14 h r t ) (4) o t+1 = \u03c3(W s 15 h l t + W s 16 h r t + W s 18 c p t+1 ) (5) h p t+1 = o t+1 tanh(c p t+1 )", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Non-Leaf Node Composition Functions", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where W s 1 , . . . , W s 18 \u2208 R k\u00d7k and biases (for brevity we eliminated the bias terms) are the training parameters. \u03c3 and denote the elementwise sigmoid function and the element-wise vector multiplication. Extension of S-LSTM nonleaf node function to compose more children is straightforward. However, the number of parameters increases quadratically in S-LSTM as we add more child nodes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-Leaf Node Composition Functions", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Attentive Non-leaf Node Function (ANF): Some NLP applications (e.g., QA and machine translation) would benefit from a dynamic query dependent composition function. We introduce ANF as a new non-leaf node function. Unlike S-LSTM, ANF composes the child nodes attentively in respect to another relevant input vector q \u2208 R k . The input vector q can be a learnable representation from a sequence representation. Given a matrix S AN F \u2208 R k\u00d72 resulted by concatenating the child node representations h l t , h r t and the third input vector q, ANF is defined as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-Leaf Node Composition Functions", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "m = f score (S AN F , q) (7) \u03b1 = sof tmax(m) (8) z = S AN F \u03b1 (9) h p t+1 = ReLU (W AN F 1 z)", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Non-Leaf Node Composition Functions", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-Leaf Node Composition Functions", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "W AN F 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-Leaf Node Composition Functions", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2208 R k\u00d7k is a learnable matrix, m \u2208 R 2 the attention score and \u03b1 \u2208 R 2 the attention weight vector for each child. f score is an attention scoring function, which can be implemented as a multi-layer perceptron (MLP) 11or a matrix-vector product m = q S AN F . The matrices W score 1 and W score 2 \u2208 R k\u00d7k and the vector w \u2208 R k are training parameters. e \u2208 R 2 is a vector of ones and \u2297 the outer product. We use ReLU function for non-linear transformation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-Leaf Node Composition Functions", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "m = w ReLU (W score 1 S AN F + W score 2 q \u2297 e)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-Leaf Node Composition Functions", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Comparing with sequential LSTM models, NTI has less recurrence, which is defined by the tree depth, log(n) for binary tree where n is the length of the input sequence. However, NTI still needs to compress all the input information into a single representation vector of the root. This imposes practical difficulties when processing long sequences. We address this issue with attention (Bowman et al., 2015a) 300 3.0M 83.9 80.6 Dependency Tree CNN encoders (Mou et al., 2016) 300 3.5M 83.3 82.1 NTI-SLSTM (Ours) 300 3.3M 83.9 82.4 SPINN-PI encoders (Bowman et al., 2016) 300 3.7M 89.2 83.2 NTI-SLSTM-LSTM (Ours) 300 4.0M 82.5 83.4 LSTMs attention (Rockt\u00e4schel et al., 2016) 100 242K 85.4 82.3 LSTMs word-by-word attention (Rockt\u00e4schel et al., 2016) 100 mechanism over tree. In addition, the attention mechanism can be used for matching trees (described in Section 4 as Tree matching NTI) that carry different sequence information. We first define a global attention and then introduce a tree attention which considers the parent-child dependency for calculation of the attention weights. Global Attention: An attention neural network for the global attention takes all node representations as input and produces an attentively blended vector for the whole tree. This neural net is similar to ANF. Particularly, given a matrix S GA \u2208 R k\u00d72n\u22121 resulted by concatenating the node representations h 1 , . . . , h 2n\u22121 and the relevant input representation q, the global attention is defined as", |
| "cite_spans": [ |
| { |
| "start": 385, |
| "end": 407, |
| "text": "(Bowman et al., 2015a)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 456, |
| "end": 474, |
| "text": "(Mou et al., 2016)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 548, |
| "end": 569, |
| "text": "(Bowman et al., 2016)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 646, |
| "end": 672, |
| "text": "(Rockt\u00e4schel et al., 2016)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 721, |
| "end": 747, |
| "text": "(Rockt\u00e4schel et al., 2016)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Over Tree", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "m = f score (S GA , q) (12) \u03b1 = sof tmax(m) (13) z = S GA \u03b1", |
| "eq_num": "(14)" |
| } |
| ], |
| "section": "Attention Over Tree", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "h", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Over Tree", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "tree = ReLU (W GA 1 z + W GA 2 q)", |
| "eq_num": "(15)" |
| } |
| ], |
| "section": "Attention Over Tree", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where W GA 1 and W GA 2 \u2208 R k\u00d7k are training parameters and \u03b1 \u2208 R 2n\u22121 the attention weight vector for each node. This attention mechanism is robust as it globally normalizes the attention score m with sof tmax to obtain the weights \u03b1. However, it does not consider the tree structure when producing the final representation h tree .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Over Tree", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Tree Attention: We modify the global attention network to the tree attention mechanism. The resulting tree attention network performs almost the same computation as ANF for each node. It compares the parent and children nodes to produce a new representation assuming that all node representations are constructed. Given a matrix S T A \u2208 R k\u00d73 resulted by concatenating the parent node representation h p t , the left child h l t and the right child h r t and the relevant input representation q, every non-leaf node h p t simply updates its own representation by using the following equation in a bottom-up manner.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Over Tree", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "m = f score (S T A , q) (16) \u03b1 = sof tmax(m) (17) z = S T A \u03b1 (18) h p t = ReLU (W T A 1 z)", |
| "eq_num": "(19)" |
| } |
| ], |
| "section": "Attention Over Tree", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "and this equation is similarity to the global attention. However, now each non-leaf node attentively collects its own and children representations and passes towards the root which finally constructs the attentively blended tree representation. Note that unlike the global attention, the tree attention locally normalizes the attention scores with sof tmax.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Over Tree", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We describe in this section experiments on three different NLP tasks, natural language inference, question answering and sentence classification to demonstrate the flexibility and the effectiveness of NTI in the different settings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We trained NTI using Adam (Kingma and Ba, 2014) with hyperparameters selected on development set. The pre-trained 300-D Glove 840B vectors (Pennington et al., 2014) were obtained for the word embeddings 2 . The word embeddings are fixed during training. The embeddings for out-ofvocabulary words were set to zero vector. We pad the input sequence to form a full binary tree. A padding vector was inserted when padding. We analyzed the effects of the padding size and found out that it has no influence on the performance (see Appendix 5.3). The size of hidden units of the NTI modules were set to 300. The models were regularized by using dropouts and an l 2 weight decay. 3", |
| "cite_spans": [ |
| { |
| "start": 26, |
| "end": 47, |
| "text": "(Kingma and Ba, 2014)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 139, |
| "end": 164, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We conducted experiments on the Stanford Natural Language Inference (SNLI) dataset (Bowman et al., 2015a), which consists of 549,367/9,842/9,824 premise-hypothesis pairs for train/dev/test sets and target label indicating their relation. Unless otherwise noted, we follow the setting in the previous work (Mou et al., 2016; Bowman et al., 2016) and use an MLP for classification which takes in NTI outputs and computes the concatenation", |
| "cite_spans": [ |
| { |
| "start": 305, |
| "end": 323, |
| "text": "(Mou et al., 2016;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 324, |
| "end": 344, |
| "text": "Bowman et al., 2016)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "[h p 2n\u22121 ; h h 2n\u22121 ], absolute dif- ference h p 2n\u22121 \u2212 h h 2n\u22121 and elementwise product h p 2n\u22121 \u2022 h h", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "2n\u22121 of the two sentence representations. The MLP has also an input layer with 1024 units with ReLU activation and a sof tmax output layer. We explored nine different task-oriented NTI models with varying complexity, to be described below. For each model, we set the batch size to 32. The initial learning, the regularization strength and the number of epoch to be trained are varied for each model. NTI-SLSTM: this model does not rely on f leaf transformer but uses the S-LSTM units for the non-leaf node function. We set the initial learning rate to 1e-3 and l 2 regularizer strength to 3e-5, and train the model for 90 epochs. The neural net was regularized by 10% input dropouts and the 20% output dropouts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "NTI-SLSTM-LSTM: we use LSTM for the leaf node function f leaf . Concretely, the LSTM output vectors are given to NTI-SLSTM and the memory cells of the lowest level S-LSTM were initialized with the LSTM memory states. The hyper-parameters are the same as the previous model. NTI-SLSTM node-by-node global attention: This model learns inter-sentence relation with the global attention over premise-indexed tree, which is similar to word-by-word attention model of Rockt\u00e4schel et al. (2016) in that it attends over the premise tree nodes at every time step of hypothesis encoding. We tie the weight parameters of the two NTI-SLSTMs for premise and hypothesis and no f leaf transformer used. We set the initial learning rate to 3e-4 and l 2 regularizer strength to 1e-5, and train the model for 40 epochs. The neural net was regularized by 15% input dropouts and the 15% output dropouts.", |
| "cite_spans": [ |
| { |
| "start": 462, |
| "end": 487, |
| "text": "Rockt\u00e4schel et al. (2016)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "NTI-SLSTM node-by-node tree attention: this is a variation of the previous model with the tree attention. The hyper-parameters are the same as the previous model. NTI-SLSTM-LSTM node-by-node global attention: in this model we include LSTM as the leaf node function f leaf . Here we initialize the memory cell of S-LSTM with LSTM memory and hidden/memory state of hypothesis LSTM with premise LSTM (the later follows the work of (Rockt\u00e4schel et al., 2016) ). We set the initial learning rate to 3e-4 and l 2 regularizer strength to 1e-5, and train the model for 10 epochs. The neural net was regularized by 10% input dropouts and the 15% output dropouts.", |
| "cite_spans": [ |
| { |
| "start": 428, |
| "end": 454, |
| "text": "(Rockt\u00e4schel et al., 2016)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "NTI-SLSTM-LSTM node-by-node tree attention: this is a variation of the previous model with the tree attention. The hyper-parameters are the same as the previous model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Tree matching NTI-SLSTM-LSTM global attention: this model first constructs the premise and hypothesis trees simultaneously with the NTI-SLSTM-LSTM model and then computes their matching vector by using the global attention and an additional LSTM. The attention vectors are produced at each hypothesis tree node and then are given to the LSTM model sequentially. The LSTM model compress the attention vectors and outputs a single matching vector, which is passed to an MLP for classification. The MLP for this tree matching setting has an input layer with 1024 units with ReLU activation and a sof tmax output layer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Unlike Wang and Jiang (2016) 's matching LSTM model which is specific to matching sequences, we use the standard LSTM units and match trees. We set the initial learning rate to 3e-4 and l 2 regularizer strength to 3e-5, and train the model for 20 epochs. The neural net was regularized by 20% input dropouts and the 20% output dropouts.", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 28, |
| "text": "Wang and Jiang (2016)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Tree matching NTI-SLSTM-LSTM tree attention: we replace the global attention with the tree attention. The hyper-parameters are the same as the previous model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Full tree matching NTI-SLSTM-LSTM global attention: this model produces two sets of the attention vectors, one by attending over the premise tree regarding each hypothesis tree node and another by attending over the hypothesis tree regarding each premise tree node. Each set of the attention vectors is given to a LSTM model to achieve full tree matching. The last hidden states of the two LSTM models (i.e. one for each attention vector set) are concatenated for classification. The training weights are shared among the LSTM models The hyper-parameters are the same as the previous model. 4 Table 1 shows the results of our models. For comparison, we include the results from the published state-of-the-art systems. While most of the sentence encoder models rely solely on word embeddings, the dependency tree CNN and the SPINN-PI models make use of sentence parser output; which present strong baseline systems. The last set of methods designs inter-sentence relation with soft attention (Bahdanau et al., 2015) . Our best score on this task is 87.3% accuracy obtained with the full tree matching NTI model. The previous best performing model on the task performs phrase matching by using the attention mechanism.", |
| "cite_spans": [ |
| { |
| "start": 991, |
| "end": 1014, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 593, |
| "end": 600, |
| "text": "Table 1", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Our results show that NTI-SLSTM improved the performance of the sequential LSTM encoder by approximately 2%. Not surprisingly, using LSTM as leaf node function helps in learning better representations. Our NTI-SLSTM-LSTM is a hybrid model which encodes a sequence sequentially through its leaf node function and then hierarchically composes the output representations. The node-by-node attention models improve the performance, indicating that modeling inter-sentence interaction is an important element in NLI. Aggregating matching vector between trees or sequences with a separate LSTM model is effective. The global attention seems to (Socher et al., 2013) 85.4 45.7 CNN-MC (Kim, 2014) 88.1 47.4 DRNN (Irsoy and Cardie, 2015) 86.6 49.8 2-layer LSTM (Tai et al., 2015) 86.3 46.0 Bi-LSTM (Tai et al., 2015) 87.5 49.1 NTI-SLSTM (Ours) 87.8 50.5 CT-LSTM (Tai et al., 2015) 88.0 51.0 DMN (Kumar et al., 2016) 88.6 52.1 NTI-SLSTM-LSTM (Ours)", |
| "cite_spans": [ |
| { |
| "start": 638, |
| "end": 659, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 677, |
| "end": 688, |
| "text": "(Kim, 2014)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 704, |
| "end": 728, |
| "text": "(Irsoy and Cardie, 2015)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 752, |
| "end": 770, |
| "text": "(Tai et al., 2015)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 789, |
| "end": 807, |
| "text": "(Tai et al., 2015)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 853, |
| "end": 871, |
| "text": "(Tai et al., 2015)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 886, |
| "end": 906, |
| "text": "(Kumar et al., 2016)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "89.3 53.1 Table 3 : Test accuracy for sentence classification. Bin: binary, FG: fine-grained 5 classes. be robust on this task. The tree attention were not helpful as it normalizes the attention scores locally in parent-child relationship.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 10, |
| "end": 17, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For this task, a model is trained to identify the correct sentences that answer a factual question, from a set of candidate sentences. We experiment on WikiQA dataset constructed from Wikipedia (Yang et al., 2015) . The dataset contains 20,360/2,733/6,165 QA pairs for train/dev/test sets.", |
| "cite_spans": [ |
| { |
| "start": 194, |
| "end": 213, |
| "text": "(Yang et al., 2015)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer Sentence Selection", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We used the same setup in the language inference task except that we replace the sof tmax layer with a sigmoid layer and model the following conditional probability distribution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer Sentence Selection", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p \u03b8 (y = 1|h q n , h a n ) = sigmoid(o QA )", |
| "eq_num": "(20)" |
| } |
| ], |
| "section": "Answer Sentence Selection", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where h q n and h a n are the question and the answer encoded vectors and o QA denotes the output of the hidden layer of the MLP. For this task, we use NTI-SLSTM-LSTM to encode answer candidate sentences and NTI-ANF-LSTM to encode the question sentences. Note that NTI-ANF-LSTM is relied on ANF as the non-leaf node function. q vector for NTI-ANF-LSTM is the answer representation produced by the answer encoding NTI-SLSTM-LSTM model. We set the batch size to 4 and the initial learning rate to 1e-3, and train the an outdoor concert at the park a snowmobile in a blizzard an Obama supporter is upset a woman kids playing at a park outside a Skier ski -jumping but doesn't have any money a young person a mom takes a break in a park A skier preparing a trick crying because he didn't get cake a guy people play frisbee outdoors a child is playing on christmas trying his hardest to not fall off a single human takes his lunch break in the park two men play with a snowman is upset and crying on the ground Table 4 : Nearest-neighbor phrases based on cosine similarity between learned representations. model for 10 epochs. We used 20% input dropouts and no l 2 weight decay. Following previous work, we adopt MAP and MRR as the evaluation metrics for this task. 5 Table 2 presents the results of our model and the previous models for the task. 6 The classifier with handcrafted features is a SVM model trained with a set of features. The Bigram-CNN model is a simple convolutional neural net. The Deep LSTM and LSTM attention models outperform the previous best result by a large margin, nearly 5-6%. NASM improves the result further and sets a strong baseline by combining variational autoencoder (Kingma and Welling, 2014) with the soft attention. In NASM, they adopt a deep three-layer LSTM and introduced a latent stochastic attention mechanism over the answer sentence. Our NTI model exceeds NASM by approximately 0.4% on MAP for this task.", |
| "cite_spans": [ |
| { |
| "start": 1343, |
| "end": 1344, |
| "text": "6", |
| "ref_id": null |
| }, |
| { |
| "start": 1697, |
| "end": 1723, |
| "text": "(Kingma and Welling, 2014)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1006, |
| "end": 1013, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 1263, |
| "end": 1270, |
| "text": "Table 2", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Answer Sentence Selection", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Lastly, we evaluated NTI on the Stanford Sentiment Treebank (SST) (Socher et al., 2013) . This dataset comes with standard train/dev/test sets and two subtasks: binary sentence classification or fine-grained classification of five classes. We trained our model on the text spans corresponding to labeled phrases in the training set and evaluated the model on the full sentences.", |
| "cite_spans": [ |
| { |
| "start": 66, |
| "end": 87, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We use NTI-SLSTM and NTI-SLSTM-LSTM models to learn sentence representations for the task. The sentence representations were passed to a two-layer MLP for classification. We set the batch size to 64, the initial learning rate to 1e-3 and l 2 regularizer strength to 3e-5, and train each model for 10 epochs. The NTI-SLSTM model was regularized by 10%/20% of input/output and 20%/30% of input/output dropouts and the NTI-SLSTM-LSTM model 20% of input and 20%/30% of input/output dropouts for binary and finegrained settings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "NTI-SLSTM-LSTM (as shown in Table 5 ) set the state-of-the-art results on both subtasks. Our NTI-SLSTM model performed slightly worse A dog mouth holds a retrieved ball.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 28, |
| "end": 35, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "A cat nurses puppies.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "A dog sells a woman a hat. A brown and white dog holds a tennis ball in his mouth.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "A golden retriever nurses some other dogs puppies.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The dog is a labrador retriever.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The dog has a ball.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "A golden retriever nurses puppies. A girl is petting her dog. The dogs are chasing a ball.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "A mother dog checking up on her baby puppy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The dog is a shitzu.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "A small dog runs to catch a ball. A girl is petting her dog. A husband and wife making pizza. The puppy is chasing a ball.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The hat wearing girl is petting a cat. The dog is a chihuahua. Table 5 : Nearest-neighbor sentences based on cosine similarity between learned representations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 63, |
| "end": 70, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "than its constituency tree-based counter part, CT-LSTM model. The CT-LSTM model composes phrases according to the output of a sentence parser and uses a node composition function similar to S-LSTM. After we transformed the input with the LSTM leaf node function, we achieved the best performance on this task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "5 Qualitative Analysis", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "To help analyzing the results, we output attention weights by our NTI-SLSTM node-by-node global attention model. Figure 2 shows the attention heatmaps for two sentences in the SNLI test set. It shows that our model semantically aligns single or multiword expressions (\"little child\" and \"toddler\"; \"rock wall\" and \"stone\"). In addition, our model is able to re-orient its attention over different parts of the hypothesis when the expression is more complex. For example, for (c) \"rock wall in autumn\", NTI mostly focuses on the nodes in depth 1, 2 and 3 representing contexts related to \"a stone\", \"leaves.\" and \"a stone wall surrounded\". Surprisingly, attention degree for the single word expression like \"stone\", \"wall\" and \"leaves\" is lower to compare with multiword phrases. Sequence models lack this property as they have no explicit composition module to produce such mu- Finally, the most interesting pattern is that the model attends over higher level (low depth) tree nodes with rich semantics when considering a (c) longer phrase or (d) full sentence. As shown in (d), the NTI model aligns the root node representing the whole hypothesis sentence to the higher level tree nodes covering larger sub-trees in the premise. It certainly ignores the lower level single word expressions and only starts to attend when the words are collectively to form rich semantics.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 113, |
| "end": 121, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Attention and Compositionality", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Using cosine similarity between their representations produced by the NTI-SLSTM model, we show that NTI is able to capture paraphrases on SNLI test data. As shown in Table 4 , NTI seems to distinguish plural from singular forms (similar phrases to \"a person\"). In addition, NTI captures non-surface knowledge. For example, the phrases similar to \"park for fun\" tend to align to the semantic content of fun and park, including \"people play frisbee outdoors\". The NTI model was able to relate \"Santa Claus\" to christmas and snow. Interestingly, the learned representations were also able to connect implicit semantics. For example, NTI found that \"sad, depressed, and hatred\" is close to the phrases like \"an Obama supporter is upset\". Overall the NTI model is robust to the length of the phrases being matched. Given a short phrase, NTI can retrieve longer yet semantically coherent sequences from the SNLI test set. In Table 5 , we show nearest-neighbor sentences from SNLI test set. Note that the sentences listed in the first two columns sound semantically coherent but not the ones in the last column. The query sentence \"A dog sells a women a hat\" does not actually represent a common-sense knowledge and this sentence now seem to confuse the NTI model. As a result, the retrieved sentence are arbitrary and not coherent.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 166, |
| "end": 173, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 919, |
| "end": 926, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Learned Representations of Phrases and Sentences", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We introduced a special padding character in order to construct full binary tree. Does this padding character influence the performance of the NTI models? In Figure 3 , we show relationship between the padding size and the accuracy on Stanford sentiment analysis data. Each sentence was padded to form a full binary tree. The x-axis represents the number of padding characters introduced. When the padding size is less (up to 10), the NTI-SLSTM-LSTM model performs better. However, this model tends to perform poorly or equally when the padding size is large. Overall we do not observe any significant performance drop for both models as the padding size increases. This suggests that NTI learns to ignore the special padding character while processing padded sentences. The same scenario was also observed while analyzing attention weights. The attention over the padded nodes was nearly zero.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 158, |
| "end": 166, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effects of Padding Size", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "We introduced Neural Tree Indexers, a class of tree structured recursive neural network. The NTI models achieved state-of-the-art performance on different NLP tasks. Most of the NTI models form deep neural networks and we think this is one reason that NTI works well even if it lacks direct linguistic motivations followed by other syntactictree-structured recursive models (Socher et al., 2013) .", |
| "cite_spans": [ |
| { |
| "start": 374, |
| "end": 395, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "CNN and NTI are topologically related (Kalchbrenner and Blunsom, 2013). Both NTI and CNNs are hierarchical. However, current implementation of NTI only operates on non-overlapping subtrees while CNNs can slide over the input to produce higher-level representations. NTI is flexible in selecting the node function and the attention mechanism. Like CNN, the computation in the same tree-depth can be parallelized effectively; and therefore NTI is scalable and suitable for large-scale sequence processing. Note that NTI can be seen as a generalization of LSTM. If we construct left-branching trees in a bottom-up fashion, the model acts just like sequential LSTM. Different branching factors for the underlying tree structure have yet to be explored. NTI can be extended so it learns to select and compose dynamic number of nodes for efficiency, essentially discovering intrinsic hierarchical structure in the input.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Code for the experiments and NTI is available at https://bitbucket.org/tsendeemts/nti", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://nlp.stanford.edu/projects/glove/ 3 More detail on hyper-parameters can be found in code.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Computational constraint prevented us from experimenting the tree attention variant of this model", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We used trec eval script to calculate the evaluation metrics6 Inclusion of simple word count feature improves the performance by around 0.15-0.3 across the board", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the anonymous reviewers for their insightful comments and suggestions. This work was supported in part by the grant HL125089 from the National Institutes of Health (NIH). Any opinions, findings and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect those of the sponsor.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In ICLR 2015.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Learning long-term dependencies with gradient descent is difficult", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrice", |
| "middle": [], |
| "last": "Simard", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Frasconi", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "IEEE Transactions on", |
| "volume": "5", |
| "issue": "2", |
| "pages": "157--166", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, Patrice Simard, and Paolo Frasconi. 1994. Learning long-term dependencies with gra- dient descent is difficult. Neural Networks, IEEE Transactions on, 5(2):157-166.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A large annotated corpus for learning natural language inference", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Potts", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "632--642", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015a. A large anno- tated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empiri- cal Methods in Natural Language Processing, pages 632-642, Lisbon, Portugal, September. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Tree-structured composition in neural networks without tree-structured architectures", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 NIPS Workshop on Cognitive Computation: Integrating Neural and Symbolic Approaches", |
| "volume": "1583", |
| "issue": "", |
| "pages": "37--42", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R. Bowman, Christopher D. Manning, and Christopher Potts. 2015b. Tree-structured compo- sition in neural networks without tree-structured ar- chitectures. In Proceedings of the 2015 NIPS Work- shop on Cognitive Computation: Integrating Neural and Symbolic Approaches-Volume 1583, pages 37- 42.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A fast unified model for parsing and sentence understanding", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Jon", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Abhinav", |
| "middle": [], |
| "last": "Gauthier", |
| "suffix": "" |
| }, |
| { |
| "first": "Raghav", |
| "middle": [], |
| "last": "Rastogi", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1466--1477", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R. Bowman, Jon Gauthier, Abhinav Ras- togi, Raghav Gupta, Christopher D. Manning, and Christopher Potts. 2016. A fast unified model for parsing and sentence understanding. In Proceed- ings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), pages 1466-1477, Berlin, Germany, August. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Long short-term memory-networks for machine reading", |
| "authors": [ |
| { |
| "first": "Jianpeng", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "551--561", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jianpeng Cheng, Li Dong, and Mirella Lapata. 2016. Long short-term memory-networks for machine reading. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Process- ing, pages 551-561, Austin, Texas, November. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Finding structure in time", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jeffrey L Elman", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Cognitive science", |
| "volume": "14", |
| "issue": "2", |
| "pages": "179--211", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey L Elman. 1990. Finding structure in time. Cognitive science, 14(2):179-211.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Learning task-dependent distributed representations by backpropagation through structure", |
| "authors": [ |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "Goller", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Kuchler", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "IEEE International Conference on", |
| "volume": "1", |
| "issue": "", |
| "pages": "347--352", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christoph Goller and Andreas Kuchler. 1996. Learn- ing task-dependent distributed representations by backpropagation through structure. In Neural Net- works, 1996., IEEE International Conference on, volume 1, pages 347-352. IEEE.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Teaching machines to read and comprehend", |
| "authors": [ |
| { |
| "first": "Karl", |
| "middle": [], |
| "last": "Moritz Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Kocisky", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Grefenstette", |
| "suffix": "" |
| }, |
| { |
| "first": "Lasse", |
| "middle": [], |
| "last": "Espeholt", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Kay", |
| "suffix": "" |
| }, |
| { |
| "first": "Mustafa", |
| "middle": [], |
| "last": "Suleyman", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karl Moritz Hermann, Tomas Kocisky, Edward Grefenstette, Lasse Espeholt, Will Kay, Mustafa Su- leyman, and Phil Blunsom. 2015. Teaching ma- chines to read and comprehend. In NIPS 2015.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "The vanishing gradient problem during learning recurrent neural nets and problem solutions", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "International Journal of Uncertainty", |
| "volume": "6", |
| "issue": "02", |
| "pages": "107--116", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter. 1998. The vanishing gradient problem during learning recurrent neural nets and problem solutions. International Journal of Un- certainty, Fuzziness and Knowledge-Based Systems, 6(02):107-116.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Modeling compositionality with multiplicative recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Ozan", |
| "middle": [], |
| "last": "Irsoy", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ozan Irsoy and Claire Cardie. 2015. Modeling compo- sitionality with multiplicative recurrent neural net- works. In ICLR 2015.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Recurrent continuous translation models", |
| "authors": [ |
| { |
| "first": "Nal", |
| "middle": [], |
| "last": "Kalchbrenner", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1700--1709", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nal Kalchbrenner and Phil Blunsom. 2013. Recurrent continuous translation models. In Proceedings of the 2013 Conference on Empirical Methods in Nat- ural Language Processing, pages 1700-1709. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Convolutional neural networks for sentence classification", |
| "authors": [ |
| { |
| "first": "Yoon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1746--1751", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoon Kim. 2014. Convolutional neural networks for sentence classification. In Proceedings of the 2014 Conference on Empirical Methods in Natu- ral Language Processing (EMNLP), pages 1746- 1751, Doha, Qatar, October. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "Diederik", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. In ICLR 2014.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Autoencoding variational bayes", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Welling", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Max Welling. 2014. Auto- encoding variational bayes. In ICLR 2014.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Ask me anything: Dynamic memory networks for natural language processing", |
| "authors": [ |
| { |
| "first": "Ankit", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Ozan", |
| "middle": [], |
| "last": "Irsoy", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Bradbury", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "English", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Pierce", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Ondruska", |
| "suffix": "" |
| }, |
| { |
| "first": "Ishaan", |
| "middle": [], |
| "last": "Gulrajani", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of The 33rd International Conference on Machine Learning (ICML 2016)", |
| "volume": "", |
| "issue": "", |
| "pages": "1378--1387", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankit Kumar, Ozan Irsoy, Jonathan Su, James Brad- bury, Robert English, Brian Pierce, Peter Ondruska, Ishaan Gulrajani, and Richard Socher. 2016. Ask me anything: Dynamic memory networks for nat- ural language processing. In Proceedings of The 33rd International Conference on Machine Learn- ing (ICML 2016), pages 1378-1387, New York, NY, USA, June.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Neural architectures for named entity recognition", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Ballesteros", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandeep", |
| "middle": [], |
| "last": "Subramanian", |
| "suffix": "" |
| }, |
| { |
| "first": "Kazuya", |
| "middle": [], |
| "last": "Kawakami", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "260--270", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample, Miguel Ballesteros, Sandeep Sub- ramanian, Kazuya Kawakami, and Chris Dyer. 2016. Neural architectures for named entity recog- nition. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, pages 260-270, San Diego, California, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Distributed representations of sentences and documents", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "ICML 2014", |
| "volume": "14", |
| "issue": "", |
| "pages": "1188--1196", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Quoc V Le and Tomas Mikolov. 2014. Distributed rep- resentations of sentences and documents. In ICML 2014, volume 14, pages 1188-1196.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Neural variational inference for text processing", |
| "authors": [ |
| { |
| "first": "Yishu", |
| "middle": [], |
| "last": "Miao", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yishu Miao, Lei Yu, and Phil Blunsom. 2016. Neural variational inference for text processing. In ICLR 2016.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Natural language inference by tree-based convolution and heuristic matching", |
| "authors": [ |
| { |
| "first": "Lili", |
| "middle": [], |
| "last": "Mou", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Men", |
| "suffix": "" |
| }, |
| { |
| "first": "Ge", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhi", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "130--136", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lili Mou, Rui Men, Ge Li, Yan Xu, Lu Zhang, Rui Yan, and Zhi Jin. 2016. Natural language inference by tree-based convolution and heuristic matching. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 130-136, Berlin, Germany, August. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "A decomposable attention model for natural language inference", |
| "authors": [ |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Oscar", |
| "middle": [], |
| "last": "T\u00e4ckstr\u00f6m", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2249--2255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankur Parikh, Oscar T\u00e4ckstr\u00f6m, Dipanjan Das, and Jakob Uszkoreit. 2016. A decomposable attention model for natural language inference. In Proceed- ings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2249-2255, Austin, Texas, November. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representation. In Proceedings of the 2014 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, Doha, Qatar, October. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Recursive distributed representations", |
| "authors": [ |
| { |
| "first": "Jordan", |
| "middle": [ |
| "B" |
| ], |
| "last": "Pollack", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Artificial Intelligence", |
| "volume": "46", |
| "issue": "1", |
| "pages": "77--105", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jordan B. Pollack. 1990. Recursive distributed repre- sentations. Artificial Intelligence, 46(1):77-105.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Reasoning about entailment with neural attention", |
| "authors": [ |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rockt\u00e4schel", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Grefenstette", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [ |
| "Moritz" |
| ], |
| "last": "Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Ko\u010disk\u1ef3", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tim Rockt\u00e4schel, Edward Grefenstette, Karl Moritz Hermann, Tom\u00e1\u0161 Ko\u010disk\u1ef3, and Phil Blunsom. 2016. Reasoning about entailment with neural attention. In ICLR 2016.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Semi-supervised recursive autoencoders for predicting sentiment distributions", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "H" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "151--161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Jeffrey Pennington, Eric H. Huang, Andrew Y. Ng, and Christopher D. Manning. 2011. Semi-supervised recursive autoencoders for predict- ing sentiment distributions. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 151-161, Edinburgh, Scotland, UK., July. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Semantic compositionality through recursive matrix-vector spaces", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Brody", |
| "middle": [], |
| "last": "Huval", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "1201--1211", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Brody Huval, Christopher D. Man- ning, and Andrew Y. Ng. 2012. Semantic composi- tionality through recursive matrix-vector spaces. In Proceedings of the 2012 Joint Conference on Empir- ical Methods in Natural Language Processing and Computational Natural Language Learning, pages 1201-1211, Jeju Island, Korea, July. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Recursive deep models for semantic compositionality over a sentiment treebank", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Perelygin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Chuang", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1631--1642", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Ng, and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment tree- bank. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Process- ing, pages 1631-1642, Seattle, Washington, USA, October. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "3104--3112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural net- works. In NIPS, pages 3104-3112.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Improved semantic representations from tree-structured long short-term memory networks", |
| "authors": [ |
| { |
| "first": "Kai Sheng", |
| "middle": [], |
| "last": "Tai", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1556--1566", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Sheng Tai, Richard Socher, and Christopher D. Manning. 2015. Improved semantic representa- tions from tree-structured long short-term memory networks. In Proceedings of the 53rd Annual Meet- ing of the Association for Computational Linguistics and the 7th International Joint Conference on Natu- ral Language Processing (Volume 1: Long Papers), pages 1556-1566, Beijing, China, July. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Grammar as a foreign language", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Terry", |
| "middle": [], |
| "last": "Koo", |
| "suffix": "" |
| }, |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, \u0141ukasz Kaiser, Terry Koo, Slav Petrov, Ilya Sutskever, and Geoffrey Hinton. 2015. Gram- mar as a foreign language. In NIPS 2015.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Learning natural language inference with lstm", |
| "authors": [ |
| { |
| "first": "Shuohang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1442--1451", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuohang Wang and Jing Jiang. 2016. Learning natu- ral language inference with lstm. In Proceedings of the 2016 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, pages 1442-1451, San Diego, California, June. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Wikiqa: A challenge dataset for open-domain question answering", |
| "authors": [ |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yih", |
| "middle": [], |
| "last": "Wen-Tau", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Meek", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2013--2018", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi Yang, Wen-tau Yih, and Christopher Meek. 2015. Wikiqa: A challenge dataset for open-domain ques- tion answering. In Proceedings of the 2015 Con- ference on Empirical Methods in Natural Language Processing, pages 2013-2018, Lisbon, Portugal, September. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Question answering using enhanced lexical semantic models", |
| "authors": [ |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Wen-Tau Yih", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrzej", |
| "middle": [], |
| "last": "Meek", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pastusiak", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1744--1753", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wen-tau Yih, Ming-Wei Chang, Christopher Meek, and Andrzej Pastusiak. 2013. Question answering us- ing enhanced lexical semantic models. In Proceed- ings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), pages 1744-1753, Sofia, Bulgaria, August. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Deep learning for answer sentence selection", |
| "authors": [ |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [ |
| "Moritz" |
| ], |
| "last": "Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Pulman", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "NIPS Deep Learning Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lei Yu, Karl Moritz Hermann, Phil Blunsom, and Stephen Pulman. 2014. Deep learning for answer sentence selection. In NIPS Deep Learning Work- shop 2014.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Long short-term memory over recursive structures", |
| "authors": [ |
| { |
| "first": "Xiao-Dan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Parinaz", |
| "middle": [], |
| "last": "Sobhani", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongyu", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "1604--1612", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiao-Dan Zhu, Parinaz Sobhani, and Hongyu Guo. 2015. Long short-term memory over recursive structures. In ICML, pages 1604-1612.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "type_str": "figure", |
| "num": null, |
| "text": "Node-by-node attention visualizations. The phrases shown on the top are nodes from hypothesis-indexed tree and the premise tokens are listed along the x-axis. The adjacent cells are composed in the top cell representing a binary tree and resulting a longer attention span. a person park for fun Santa Claus sad, depressed, and hatred single person", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "num": null, |
| "text": "Fine-grained sentiment classification accuracy vs. padding size on test set of SST data. tiword phrases.", |
| "uris": null |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "text": "Training and test accuracy on natural language inference task. d is the word embedding size and |\u03b8| M the number of model parameters.", |
| "html": null, |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF5": { |
| "content": "<table><tr><td>Model</td><td>Bin</td><td>FG</td></tr><tr><td>RNTN</td><td/><td/></tr></table>", |
| "text": "Test set performance on answer sentence selection.", |
| "html": null, |
| "type_str": "table", |
| "num": null |
| } |
| } |
| } |
| } |