| { |
| "paper_id": "C18-1035", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:06:51.895173Z" |
| }, |
| "title": "Adopting the Word-Pair-Dependency-Triplets with Individual Comparison for Natural Language Inference", |
| "authors": [ |
| { |
| "first": "Qianlong", |
| "middle": [], |
| "last": "Du", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "National Laboratory of Pattern Recognition", |
| "institution": "Chinese Academy of Science", |
| "location": {} |
| }, |
| "email": "qianlong.du@nlpr.ia.ac.cn" |
| }, |
| { |
| "first": "Chengqing", |
| "middle": [], |
| "last": "Zong", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "National Laboratory of Pattern Recognition", |
| "institution": "Chinese Academy of Science", |
| "location": {} |
| }, |
| "email": "cqzong@nlpr.ia.ac.cn" |
| }, |
| { |
| "first": "Keh-Yih", |
| "middle": [], |
| "last": "Su", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Academia Sinica", |
| "location": {} |
| }, |
| "email": "kysu@iis.sinica.edu.tw" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper proposes to perform natural language inference with Word-Pair-Dependency-Triplets. Most previous DNN-based approaches either ignore syntactic dependency among words, or directly use tree-LSTM to generate sentence representation with irrelevant information. To overcome the problems mentioned above, we adopt Word-Pair-Dependency-Triplets to improve alignment and inference judgment. To be specific, instead of comparing each triplet from one passage with the merged information of another passage, we first propose to perform comparison directly between the triplets of the given passage-pair to make the judgment more interpretable. Experimental results show that the performance of our approach is better than most of the approaches that use tree structures, and is comparable to other stateof-the-art approaches. \uf020", |
| "pdf_parse": { |
| "paper_id": "C18-1035", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper proposes to perform natural language inference with Word-Pair-Dependency-Triplets. Most previous DNN-based approaches either ignore syntactic dependency among words, or directly use tree-LSTM to generate sentence representation with irrelevant information. To overcome the problems mentioned above, we adopt Word-Pair-Dependency-Triplets to improve alignment and inference judgment. To be specific, instead of comparing each triplet from one passage with the merged information of another passage, we first propose to perform comparison directly between the triplets of the given passage-pair to make the judgment more interpretable. Experimental results show that the performance of our approach is better than most of the approaches that use tree structures, and is comparable to other stateof-the-art approaches. \uf020", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Natural language inference (NLI) refers to the following task: given a text passage P (Premise) (which might have more than one sentence) and a text passage H (Hypothesis), whether we can infer H from P, i.e., identifying a specific relationship among entailment, neutral and contradiction. It has many applications such as question answering (Bhaskar et al., 2013; Harabagiu and Hickl, 2006) , information extraction (Romano et al., 2006) , machine translation (Pado et al., 2009) , automatic text summarization (Harabagiu et al., 2007) and so on. Some evaluations about this task have been organized in the past decades, such as the PASCAL Recognizing Textual Entailment (RTE) Challenge (Dagan et al., 2005) , SemEval-2014 (Marelli et al., 2014) and RITE (Shima et al., 2011) .", |
| "cite_spans": [ |
| { |
| "start": 343, |
| "end": 365, |
| "text": "(Bhaskar et al., 2013;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 366, |
| "end": 392, |
| "text": "Harabagiu and Hickl, 2006)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 418, |
| "end": 439, |
| "text": "(Romano et al., 2006)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 462, |
| "end": 481, |
| "text": "(Pado et al., 2009)", |
| "ref_id": null |
| }, |
| { |
| "start": 513, |
| "end": 537, |
| "text": "(Harabagiu et al., 2007)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 689, |
| "end": 709, |
| "text": "(Dagan et al., 2005)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 725, |
| "end": 747, |
| "text": "(Marelli et al., 2014)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 757, |
| "end": 777, |
| "text": "(Shima et al., 2011)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Many previous approaches adopt statistical frameworks (Heilman et al., 2010; Kouylekov and Magnini, 2005) . However, neural network approaches have emerged after Stanford Natural Language Inference (SNLI) dataset (Bowman et al., 2015) was released. Most of them adopt an increasingly complicated network structure to represent text passages, and then predict the relationship between them (Bowman et al., 2016; Liu et al., 2016b) . However, P might include extra words which are not directly related to H. Actually, only the words in P that are associated with the words in H should be paid attention to. Those relevant words should be emphasized more while the irrelevant words should be less weighted during decision making. Therefore, some approaches (Parikh et al., 2016; Chen et al., 2017a) adopt attention mechanism to implicitly align the words between two passages to yield a better performance. This idea is very similar to how human make the entailment judgment, and the result shows that it is very effective for performing natural language inference on SNLI corpus in which most words in H can find their corresponding ones in P. Figure 1 . Dependency trees 1 for Premise \"An older man sits with his orange juice at a small table in a coffee shop while employees in bright colored shirts smile in the background.\" and Hypothesis \"An elderly man sitting in a small shop.\" The relationship between them is \"neutral\".", |
| "cite_spans": [ |
| { |
| "start": 54, |
| "end": 76, |
| "text": "(Heilman et al., 2010;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 77, |
| "end": 105, |
| "text": "Kouylekov and Magnini, 2005)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 389, |
| "end": 410, |
| "text": "(Bowman et al., 2016;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 411, |
| "end": 429, |
| "text": "Liu et al., 2016b)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 754, |
| "end": 775, |
| "text": "(Parikh et al., 2016;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 776, |
| "end": 795, |
| "text": "Chen et al., 2017a)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1142, |
| "end": 1150, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "However, after having analyzed some errors generated from an attention-based approach (Parikh et al., 2016) , we find that it will introduce mis-alignment and might cause wrong inference. Although context information is used to alleviate this problem, it still cannot handle long distance dependency. Take the following sentence pair as an example (The benchmark is neutral):", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 107, |
| "text": "(Parikh et al., 2016)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Premise: An older man sits with his orange juice at a small table in a coffee shop while employees in bright colored shirts smile in the background.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The attention-based approach (Parikh et al., 2016) cannot catch the relation between \"shop\" and \"small\" in P precisely (which is important for the prediction). In this example, the relationship between P and H will be predicted as \"entailment\", because all the words in H can be found in P, although the word \"small\" in these two sentences does not modify the same thing (e.g., \"small\" modifies \"table\" in premise while modifies \"shop\" in hypothesis).", |
| "cite_spans": [ |
| { |
| "start": 29, |
| "end": 50, |
| "text": "(Parikh et al., 2016)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hypothesis: An elderly man sitting in a small shop.", |
| "sec_num": null |
| }, |
| { |
| "text": "The above example clearly shows that a sentence is not a set of independent words. It is a sequence of words with syntactic relationship. Based on this observation, we propose to adopt the Word-Pair Relation-Head-Dependent (RHD) triplet 2 for conducting alignment and comparison. Furthermore, Parikh et al. (2016) and other previous models only compare each word in H with the vector merged from all the words in P according to their associated alignment scores, and vice versa. However, as shown in Figure 1 , human compares H and P mainly based on Structure Analogy (Du et al., 2016; Gentner 1983; Gentner & Markman, 1997) instead of the merged meaning which will not only import irrelevant text but also lose information during merging. Consequently, only words with closely related syntactic/semantic roles (of the aligned predicates) should be compared.", |
| "cite_spans": [ |
| { |
| "start": 280, |
| "end": 313, |
| "text": "Furthermore, Parikh et al. (2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 568, |
| "end": 585, |
| "text": "(Du et al., 2016;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 586, |
| "end": 599, |
| "text": "Gentner 1983;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 600, |
| "end": 624, |
| "text": "Gentner & Markman, 1997)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 500, |
| "end": 508, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Hypothesis: An elderly man sitting in a small shop.", |
| "sec_num": null |
| }, |
| { |
| "text": "Therefore, we first create two sets of RHD from P and H to denote their corresponding structures, and then perform comparison between triplets in P and those in H. Accordingly, two RHD triplets should be aligned if their relations are related and their head-words are aligned (e.g., the triplets with the same indexes are aligned in Figure 1 ). Particularly, when two RHD triplets are compared, each part of RHD triplet (i.e., Relation, Head, and Dependent) should be compared separately. Besides, as the words of some triplet pairs possess reversed head-dependent relations (e.g., the Dependent in \"(nsubj, sits, man)\" is linked to the Head in \"(vmod, man, sitting)\", as shown by the triplet pair with index 3 in Figure 1 ), we introduce cross-comparison to compare the Head from \"(nsubj, sits, man)\" with the Dependent from \"(vmod, man, sitting)\", and vice versa. The skeleton of the proposed approach. The rel, head and dep of the triplet are represented with green, purple, and light-blue colors, respectively. Also, \u2295 denotes the Comparison operation between triplets (see section 2.2.2 for \"Matching layer\"), while \uf0c4 denotes the Aggregation operation (see section 2.2.3 for \"Aggregation layer\"). Besides, the left part and the right part in Matching Layer represent P-aligned-to-H and H-aligned-to-P, respectively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 333, |
| "end": 341, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 714, |
| "end": 722, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Hypothesis: An elderly man sitting in a small shop.", |
| "sec_num": null |
| }, |
| { |
| "text": "Our contributions are summarized as follows: (1) The RHD triplet is first proposed to be the alignment and comparison unit in the neural network for NLI. In this way, the corresponding words could be aligned and compared more precisely. (2) Instead of comparing one RHD triplet of H with the merged meaning of all the RHD triplets in P (and vice versa), we propose to directly compare each RHD triplet of H with another RHD triplet of P; and each part in RHD triplet is compared separately.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hypothesis: An elderly man sitting in a small shop.", |
| "sec_num": null |
| }, |
| { |
| "text": "(3) We propose to use cross-comparison to compare the related words with different syntactic/semantic roles.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hypothesis: An elderly man sitting in a small shop.", |
| "sec_num": null |
| }, |
| { |
| "text": "In our model, we first transform P and H into two sets of RHD triplets. For each RHD triplet of P, we compare it with another RHD triplet of H (without merging) to generate an individual comparison vector (and vice versa). Afterwards, we use a self-attention mechanism to align and sum them to yield the one-side merged comparison vector between a triplet and the triplet set of the other side. Last, we aggregate those one-side merged comparison vectors to give the overall entailment judgment. Figure 2 shows the skeleton of the proposed approach. It consists of the following 5 layers: (1) Input Layer, which initializes the embedding of the words and relations; (2) Triplet Embedding Layer, which is used to adapt the input embedding to yield a better representation for this task; (3) Matching Layer, which performs comparison within each RHD triplet pair, scores the alignment weights, and sum those individual comparison vectors to generate the one-side merged comparison vector between each triplet with the triplet set of the other side; (4) Aggregation Layer, which aggregates the one-side merged comparison vectors to get a directional comparison vector for each comparing direction; and (5) Prediction Layer, which uses two separated directional comparison vectors and a feed-forward neural network classifier to predict the overall judgment.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 496, |
| "end": 504, |
| "text": "Figure 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Proposed Approach", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our model is symmetric about P and H. So for simplicity, we only describe the left parts which mainly about comparing each unit of P with H. Right part is exactly the same except that the roles of P and H are exchanged. 2. H(,) is a multilayer perception denoted in equation 4. The green solid-line, purple solid-line and red solid-line represent the comparison of pair (rel, rel), (head, head) and (dep, dep), respectively. The purple dot-line represents the cross-comparison of pair (head, dep), and red dot-line represents the cross-comparison of pair (dep, head).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Approach", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We first use a dependency parser 3 to transform P and H into two sets of RHD triplets. We define ^: = ( , , \u22ef , ) and ^: = (\u210e , \u210e , \u22ef , \u210e ) be two sets of RHD triplets, while and \u210e denote the RHD triplet and the RHD triplet in P and H, respectively; also, m and n indicate the number of associated triplets in P and H, respectively. We then instantiate the Input Layer with the corresponding word-embeddings and rel-embedding of RHD triplets (For conciseness, we will let rel/head/dep denote both the original meaning and the corresponding embedding interchangeably from now on). Each \u210e , \u2208 is a word embedding of dimension which is initialized with pre-trained GloVe word embedding (Pennington et al., 2014) , while \u2208 is a relation embedding vector of dimension and is initialized randomly with a standard normal distribution (Please note, only will be tuned later during training). Each triplet-embedding will be presented as a triplet which contains three embedding corresponding to rel, head and dep respectively.", |
| "cite_spans": [ |
| { |
| "start": 683, |
| "end": 708, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Layer Generation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "As we fix the value of word embedding during training, in order to obtain better relation/word embedding representations to compare for this task, we use a simple feed-forward structure to adapt the three parts of the triplet to the task. The computations are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Triplet Embedding Layer", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "= * + \u210e = * \u210e + (1) = * +", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Triplet Embedding Layer", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "where * is the multiplication of matrices, \u2208 , \u210e \u2208 and \u2208 are the input embedding-vectors from Input Layer, \u2208 , \u210e \u2208 and \u2208 are the new representations generated,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Triplet Embedding Layer", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "\u2208 \u00d7 , \u2208 \u00d7 , \u2208 , \u2208", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Triplet Embedding Layer", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "are the weight matrices to be learned. Here, all the rel share the same weight matrices, while all the words share the same weight matrices.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Triplet Embedding Layer", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "This layer is the core of our model. It is mainly used to perform the individual comparison between two triplets and use associated alignment weights to focus on the individual-comparisons of the preferred alignments. In this step, we will use a one-side merged comparison vector to represent each comparison result of one triplet with the triplet-set of another side. For a triplet from Triplet Embedding Layer, it has three parts: \u2208 , \u210e \u2208 and \u2208 ; and for \u210e , it has \u2208 , \u210e \u2208 and \u2208 . Figure 3 shows how the individual comparison-vector of one triplet-pair is generated. The vector \u2208 denotes the comparison result between triplet and a triplet \u210e from ^, while d is the dimension of hidden layer. During comparison, each component of the triplet (i.e., rel, head and dep) is compared independently, as shown in the figure. Here, the comparing function is denoted as comp(,) in equation (2). G is a multi-layer perceptron with one hidden layer and a Relu activation. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 484, |
| "end": 492, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Matching Layer", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "Where and are any two embedding vectors. The notation \";\" (within the bracket-pair in the above equation) denotes Concatenation; also, '\u2212' and '\u2299' are the difference and element-wise product of vectors respectively. Then we can get the comparison results in Figure 3 as follows:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 258, |
| "end": 266, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Matching Layer", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": ": = , \u210e : = \u210e , \u210e \u210e _ : = \u210e ,", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Matching Layer", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": ": = ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Layer", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "_ : = , \u210e", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Layer", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "Where \u210e _ and _ are the results of cross-comparison. Please note that the comparison functions with the same input arguments share the same set of parameters in G. For example, all the functions comp(head, head) share a set of parameters while all comp(head, dep) share another set of parameters. After we obtain the comparison results of these components, we can incorporate them to yield the triplet individual comparison vector between and \u210e as follow.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Layer", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "= ([ ; \u210e ; \u210e _ ; ; _ ])", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Matching Layer", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "Here H is a multi-layer perceptron with two hidden layers and a Relu activation. Afterwards, we need to generate the alignment weight between triplet and triplet \u210e to extract the key information for judgment. Most of the previous models (Chen et al., 2017a; Parikh et al., 2016) use the multiplication of two semantic unit vectors as their alignment weights. However, we find the individual comparison here can describe the relatedness of those triplet-pairs better. Consequently, we generate the alignment weights using these individual comparison vectors with a self-attention mechanism as follows. ", |
| "cite_spans": [ |
| { |
| "start": 237, |
| "end": 257, |
| "text": "(Chen et al., 2017a;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 258, |
| "end": 278, |
| "text": "Parikh et al., 2016)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Layer", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "Where \u2208 \u00d7 and \u2208 \u00d7 are weight matrices to be learned. Then we use to obtain the one-side merged comparison vectors , from various as follow.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Layer", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": ", = ( ) \u2211 ( ))", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Matching Layer", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "Where , is the one-side merged comparison vector between and the whole set , is the number of RHD triplets in H; , (the right part in Figure 2 ) is defined similarly between \u210e and the whole .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 134, |
| "end": 142, |
| "text": "Figure 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Matching Layer", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "In this layer, we aggregate all the one-side merged comparison vectors , and , (obtained above) to generate the final comparison vector for these two different directions between P and H. Like the previous approaches (Chen et al., 2017a; Parikh et al., 2016) , we aggregate the information by summation and max pooling:", |
| "cite_spans": [ |
| { |
| "start": 217, |
| "end": 237, |
| "text": "(Chen et al., 2017a;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 238, |
| "end": 258, |
| "text": "Parikh et al., 2016)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Aggregation Layer", |
| "sec_num": "2.2.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": ", = \u2211 , , , = = 1 , , = [ , ; , ] , = \u2211 , , , = = 1 , , = [ , ; , ]", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Aggregation Layer", |
| "sec_num": "2.2.3" |
| }, |
| { |
| "text": "Where and are the numbers of triplets in P and H, respectively. We get the overall comparison vector by concatenating the summation , and the max pooling , using the one-side merged comparison vectors of P (and vice versa). Afterwards, we use these overall comparison vectors to predict the relationship in the next section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Aggregation Layer", |
| "sec_num": "2.2.3" |
| }, |
| { |
| "text": "From the above section, we have obtained and , which are the overall comparison vectors from ^ to ^ and from ^ to ^, respectively (i.e., the overall comparisons in two different directions). We then concatenate them and use a multi-layer perceptron classifier Q (it has two hidden layers with Relu activation and a softmax output layer) to generate the final overall judgment vector \u02c6 (as shown in Eq. 8), where \u02c6\u2208 (C equals the number of classes) are the scores for each class. The predicted class can be got by setting = argmax \u02c6 . When we train the model, we use multi-class cross-entropy loss with dropout regularization (Srivastava et al., 2014) .", |
| "cite_spans": [ |
| { |
| "start": 625, |
| "end": 650, |
| "text": "(Srivastava et al., 2014)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Prediction Layer", |
| "sec_num": "2.2.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u02c6= ([ ; ])", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Prediction Layer", |
| "sec_num": "2.2.4" |
| }, |
| { |
| "text": "3 Experiments", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Prediction Layer", |
| "sec_num": "2.2.4" |
| }, |
| { |
| "text": "We adopt both SNLI (Bowman et al., 2015) corpus 4 and MultiNLI (Williams et al., 2018 ) corpus 5 to test the performance. They are briefly introduced as follows.", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 85, |
| "text": "(Williams et al., 2018", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "SNLI -It contains 570k sentence pairs. The sentence pairs in this corpus are labelled with one of the following relationships: entailment, contradiction, neutral and \"-\", where \"-\" means that it lacks of consensus from human annotators. In our experiments, we follow Bowman et al. (2015) to delete those sentence pairs labelled with \"-\". Consequently, we end up with 549,367 pairs for training, 9,842 pairs for development and 9,824 pairs for testing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "MultiNLI -This corpus has 433k sentence pairs, which are collected from broad range of genre of American English such as written non-fiction genres (e.g. SLATE, OUP), spoken genres (TELEPHONE, FACE-TO-FACE), less formal written genres (FICTION, LETTERS), and that specialized on for 9/11. For the training set of this corpus, it selects half of the genres to create in-domain (matched) and outdomain (mismatched) development/test sets. Since the test set labels of this corpus are not released, the test performance is obtained through submission to Kaggle.com 6 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Acc.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training", |
| "sec_num": null |
| }, |
| { |
| "text": "Test Acc.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training", |
| "sec_num": null |
| }, |
| { |
| "text": "(1) LSTM (Bowman et al., 2015) 84.4 77.6 (2) Classifier (Bowman et al., 2015) 99.7 78.2", |
| "cite_spans": [ |
| { |
| "start": 56, |
| "end": 77, |
| "text": "(Bowman et al., 2015)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training", |
| "sec_num": null |
| }, |
| { |
| "text": "(3) 300D tree-based CNN encoders (Mou et al., 2016) 83.3 82.1 (4) 300D SPINN-PI encoders (Bowman et al. 2016) 89.2 83.2 (5) 100D LSTMs w/ word-by-word attention (Rocktaschel et al., 2015) 85.3 83.5 (6) 300D mLSTM word-by-word attention model (Wang & Jiang, 2016) 92.0 86.1 7 (Zhao et al., 2016) 87.7 87.2 (10) 300D Full tree matching NTI-SLSTM-LSTM w/ global attention (Munkhdalai and Yu, 2016) 88.5 87.3", |
| "cite_spans": [ |
| { |
| "start": 33, |
| "end": 51, |
| "text": "(Mou et al., 2016)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 71, |
| "end": 109, |
| "text": "SPINN-PI encoders (Bowman et al. 2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 161, |
| "end": 187, |
| "text": "(Rocktaschel et al., 2015)", |
| "ref_id": null |
| }, |
| { |
| "start": 242, |
| "end": 262, |
| "text": "(Wang & Jiang, 2016)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 275, |
| "end": 294, |
| "text": "(Zhao et al., 2016)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 369, |
| "end": 394, |
| "text": "(Munkhdalai and Yu, 2016)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training", |
| "sec_num": null |
| }, |
| { |
| "text": "(11) 300D Syntactic Tree-LSTM (Chen et al., 2017a) 92.9 87.8 Human Performance (Gong et al., 2017) 97 (1)BiLSTM (Williams et al., 2018) 67.0 67.6 (2) Inner Att (Balazs et al., 2017) 72.1 72.1", |
| "cite_spans": [ |
| { |
| "start": 30, |
| "end": 50, |
| "text": "(Chen et al., 2017a)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 79, |
| "end": 98, |
| "text": "(Gong et al., 2017)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 112, |
| "end": 135, |
| "text": "(Williams et al., 2018)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 160, |
| "end": 181, |
| "text": "(Balazs et al., 2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training", |
| "sec_num": null |
| }, |
| { |
| "text": "(3) ESIM (Williams et al., 2018) 72.3 72.1 (4) Gated-Att BiLSTM (Chen et al., 2017b) 73.2 73.6 (5) Shortcut-Stacked encoder (Nie & Bansal, 2017) 74.6 73.6 (6) DIIN (Gong et al., 2017) 78.8 77.8 (7) Inner Att (ensemble) (Balazs et al., 2017) 72.2 72.8 (8) Gated-Att BiLSTM (ensemble) (Chen et al., 2017b) 74.9 74.9 (9) DIIN (ensemble) (Gong et al., 2017) 80.0 78.7 Human Performance (Gong et al., 2017) 88.5 89.2 Our model 75.1 74.7 Table 2 . Performance on MultiNLI", |
| "cite_spans": [ |
| { |
| "start": 9, |
| "end": 32, |
| "text": "(Williams et al., 2018)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 64, |
| "end": 84, |
| "text": "(Chen et al., 2017b)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 124, |
| "end": 144, |
| "text": "(Nie & Bansal, 2017)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 164, |
| "end": 183, |
| "text": "(Gong et al., 2017)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 219, |
| "end": 240, |
| "text": "(Balazs et al., 2017)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 283, |
| "end": 303, |
| "text": "(Chen et al., 2017b)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 334, |
| "end": 353, |
| "text": "(Gong et al., 2017)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 382, |
| "end": 401, |
| "text": "(Gong et al., 2017)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 432, |
| "end": 439, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Training", |
| "sec_num": null |
| }, |
| { |
| "text": "In order to initialize the words in the triplets, we used 300 dimensional Glove embedding (Pennington et al., 2014) . For the relation vectors (the dimension is set to 20), we use a standard normal distribution to randomly initialize the values and then normalize each vector. Besides, for OOV words, we follow (Parikh et al., 2016) to initialize them by randomly selecting one from 100 random vectors. During the training, the word embedding including the 100 random vectors for OOV words are fixed while the embedding of the relations will be updated. The dimensions of the Triplet embedding Layer for relation and head/dependent-words are set to 20 and 300, respectively. And the dimensions of other hidden layers are set to 1,024. Besides, Adadelta method (Zeiler, 2012) is adopted for optimization, and the batch size is 32, the dropout ratio is 0.2, while the learning rate is 0.1. Table 1 shows the results of different models on SNLI. The first group includes two baseline classifiers presented by Bowman et al. (2015) . In model (1), they use LSTM to learn a representation for the passage, and then use the representation of the passage-pair to predict the judgment label. Model (2) uses a traditional statistical classifier to predict the label with some handcrafted features such as the overlapped words, negation detection, etc. In the second group, Model (3) and model (4) are two models based on passage encoding with tree structure. In model (3), Mou et al. (2016) considers tree-based CNN to capture passage-level semantics, while Bowman et al. (2016) use parsing and interpretation within a single tree-sequence hybrid model in model (4).", |
| "cite_spans": [ |
| { |
| "start": 90, |
| "end": 115, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 311, |
| "end": 332, |
| "text": "(Parikh et al., 2016)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 1006, |
| "end": 1026, |
| "text": "Bowman et al. (2015)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1463, |
| "end": 1480, |
| "text": "Mou et al. (2016)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 888, |
| "end": 895, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Details of training", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the next group, models (5)-(11) are inter-passage attention-based models which are similar to ours. Model (5) and model (6) are LSTMs with word-by-word attention. In model (7), Parikh et al. (2016) decompose each passage into a set of words and then compare two word-sets of the passage-pair. They further improve the performance by adding intra-passage attentions in model (8). Our model is inspired by their approach. Specifically, in models (9)-(11): Zhao et al. (2016) adopt tree-LSTM and attention mechanism based on binary tree to generate semantic units and compare; Munkhdalai and Yu (2016) constructs a full n-ary tree to improve the performance, while Chen et al. (2017a) use a syntactic tree-LSTM to extract the inner dependency relations in the passage and compare between the passage-pair. Table 1 shows that our model achieves an accuracy of 87.4%, which outperforms most of those models with tree structure (even those with complicated network architectures (Munkhdalai and Yu, 2016; Zhao et al., 2016) ), and our model is more interpretable than the other models which are based tree structure. Specifically, our performance is better than Parikh et al. 2016significantly though our model is inspired by theirs.", |
| "cite_spans": [ |
| { |
| "start": 457, |
| "end": 475, |
| "text": "Zhao et al. (2016)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 976, |
| "end": 1001, |
| "text": "(Munkhdalai and Yu, 2016;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1002, |
| "end": 1020, |
| "text": "Zhao et al., 2016)", |
| "ref_id": "BIBREF43" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 806, |
| "end": 813, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In the first group of Table 2 , models (1)-(6) show some published best performances on MultiNLI. And models (7)-(9) are some ensemble models which have a very complicated network architecture. From this table, we can see that our performance is better than models (1)-(5) on matched set (even outperforms the ensemble models (7) and (8)), and better than models (1)-(5) on mismatched set. Please note that the structure of our model is more interpretable than that of others. (3) remove cross-comparison 88.3 86.5 Table 3 . Ablation study on SNLI set Table 3 examines the effect of each major component. In order to check whether the strategy of performing individual comparison between RHD triplets works well in this task, model (1) directly compares one triplet with the merged representation from the triplet-set of the other side (in the Matching Layer). In this model, we compute one representation for each triplet by concatenating its three parts, align between these triplet representations of the sentence pair, and then compare each triplet with the merged representation from the triplet-set of the other sentence as in (Parikh et al., 2016) . It shows that the proposed individual comparison strategy gives a better result. In model (2), instead of using self-attention to generate the alignment weight between one triplet pair, we first yield a semantic vector representation for each triplet by concatenating their three parts and processing it with a linear transform. And then we use the multiplication of the vector representations of each triplet-pair as their alignment weight (as adopted in (Parikh et al., 2016) ). When we do this alteration, the accuracy drops to 86.6% on the test set. This again proves that adopting individual comparison for each triplet-pair can describe their relatedness more accurately.", |
| "cite_spans": [ |
| { |
| "start": 1133, |
| "end": 1154, |
| "text": "(Parikh et al., 2016)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 1613, |
| "end": 1634, |
| "text": "(Parikh et al., 2016)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 22, |
| "end": 29, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 515, |
| "end": 522, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 552, |
| "end": 559, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Last, we remove cross-comparison from our approach and list the result in model (3) to check the effect of this component. This component is mainly used to compare the words which are similar in semantics but play different syntactic/semantic roles. It shows that when we remove this component, the accuracy drops to 86.5%. Figure 4 shows the alignment weights (i.e., the degree of relatedness between the triplets within the same sentence pair) of the example in Figure 1 . In this figure, a darker color corresponds to a larger value of . From this figure, we can see that the related triplets do have larger alignment weights between them. For example, compared with other triplets of H, triplet (amod, man, elderly) in H (the red entry at the left of Figure 4 ) is more related to (amod, man, older) of P (the red entry at the top), which is echoed with a darker corresponding cell in Figure 4 . Similarly, the corresponding cell for (nsubj, sits, man) of P and (vmod, man, sitting) of H shows a darker color, which also meets human judgment. This clearly shows that the alignment weights between these two triplet sets reflect the human interpretation closely.", |
| "cite_spans": [ |
| { |
| "start": 699, |
| "end": 719, |
| "text": "(amod, man, elderly)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 324, |
| "end": 332, |
| "text": "Figure 4", |
| "ref_id": "FIGREF5" |
| }, |
| { |
| "start": 464, |
| "end": 472, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 755, |
| "end": 763, |
| "text": "Figure 4", |
| "ref_id": "FIGREF5" |
| }, |
| { |
| "start": 889, |
| "end": 897, |
| "text": "Figure 4", |
| "ref_id": "FIGREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation analysis", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Early approaches for natural language inference usually adopted statistical models such as SVM (Joachims, 1998), CRF (Hatoriet et al., 2009) and so on, which employed hand-crafted features, and utilized various external resources and specialized sub-components such as negation detection (Lai and Hockenmaier, 2014; Levy et al., 2013) . Besides, all the adopted datasets are very small.", |
| "cite_spans": [ |
| { |
| "start": 113, |
| "end": 140, |
| "text": "CRF (Hatoriet et al., 2009)", |
| "ref_id": null |
| }, |
| { |
| "start": 288, |
| "end": 315, |
| "text": "(Lai and Hockenmaier, 2014;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 316, |
| "end": 334, |
| "text": "Levy et al., 2013)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "After the SNLI corpus (Bowman et al., 2015) was released, a lot of work about natural language inference based on neural networks have been published in recent years (Bowman et al., 2016; Liu et al., 2016; Liu et al., 2016b; Munkhdalai and Yu, 2016; Mou et al., 2015; Sha et al., 2016) . Basically, those neural network based approaches could be classified into 2 categories: (1) Merely computing the passage-embedding without introducing alignment (between the words in the sentences), and then comparing these passage-embedding to get the prediction (Bowman et al., 2016 , Mou et al., 2016 . (2) Decomposing the passage into some semantic units, comparing each semantic unit with the passage of the other side and then aggregating these comparisons (Parikh et al., 2016; Wang et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 166, |
| "end": 187, |
| "text": "(Bowman et al., 2016;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 188, |
| "end": 205, |
| "text": "Liu et al., 2016;", |
| "ref_id": null |
| }, |
| { |
| "start": 206, |
| "end": 224, |
| "text": "Liu et al., 2016b;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 225, |
| "end": 249, |
| "text": "Munkhdalai and Yu, 2016;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 250, |
| "end": 267, |
| "text": "Mou et al., 2015;", |
| "ref_id": null |
| }, |
| { |
| "start": 268, |
| "end": 285, |
| "text": "Sha et al., 2016)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 552, |
| "end": 572, |
| "text": "(Bowman et al., 2016", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 573, |
| "end": 591, |
| "text": ", Mou et al., 2016", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 751, |
| "end": 772, |
| "text": "(Parikh et al., 2016;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 773, |
| "end": 791, |
| "text": "Wang et al., 2017)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Within the first category, Bowman et al. (2015) used two LSTMs to get the representations of P and H, respectively. They then compare the vector representation of these two passages to predict the relationship between P and H. Besides, Bowman et al. (2016) used tree-LSTM to encode the representation of the passage. Although it uses the dependency relation in the passages to generate the representation, it cannot point out the difference among different words and is unable to catch the key information in the prediction step.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For the second category, Zhao et al. (2017) used the intermediate representations from tree-LSTM to compare P and H. It compares each node representation with the merged representation of the other passage, and generates the result bottom-up. In this way, it can extract the dependency relations in the passages and compare semantics in different granularities. However, they adopt the tree-LSTM to bottomup generate the intermediate representations, and then directly compare each unit with the merged representation of the other passage. As the result, they cannot catch the key information and filter out irrelevant text.", |
| "cite_spans": [ |
| { |
| "start": 25, |
| "end": 43, |
| "text": "Zhao et al. (2017)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Different from them, we transform P and H into two sets of RHD triplets, instead of comparing one RHD triplet of P with the merged information of the whole H (and vice versa), we directly compare a RHD triplet of P with another RHD triplet of H to obtain the individual comparison vector (without merging). Specifically, when comparing two RHD triplets, each part in RHD triplet (i.e., rel, head, and dep) is compared separately and cross-comparison is adopted between nodes. Furthermore, we generate more precise alignment weights with these individual comparison vectors and self-attention mechanism. Consequently, our model is more interpretable than the previous models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Inspired by how human judge the entailment relationship between the given Premise (P) and Hypothesis (H) text passages, we propose to transform the passages into two sets of word-pair dependency triplets, and then directly compare each triplet with every triplet from the other side to get the individual comparison vector. In this way, we can filter out the irrelevant information and judge the relationship between two passages more precisely. In order to further improve the comparison precision, we compare each part (i.e., \"rel, head, and dep\") of triplet separately and adopt cross-comparison to compare the related words which are with different syntactic/semantic roles. As these individual comparison vectors can describe the relatedness of the triplet-pair well, we use them and self-attention mechanism to generate the alignment weights between triplets from each passage. Afterwards, we use the alignment weights to incorporate these individual comparison vectors to yield the one-side merged comparison vector of one RHD triplet with the RHD triplet set of the whole other side. Finally, we aggregate those one-side merged comparison vectors to conduct the final overall entailment decision.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\uf020 This work is licensed under a Creative Commons Attribution 4.0 International Licence. Licence details: http:// creativecommons.org/licenses/by/4.0/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The words in black represent the nodes of the dependency tree, and the string on each line represents the dependency relation between two nodes. The red relation denotes that its associated triplet is important in making the judgment. Links with the same indexes indicates that they are aligned and compared when judged the result by human. 2 Each RHD triplet is denoted by \"(rel, head, dep)\", where head and dep denote the head-word and the dependent-word of the dependency relation, respectively; and rel denotes the dependency relation between head and dependent.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://nlp.stanford.edu/software/lex-parser.shtml#Download", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://nlp.stanford.edu/projects/snli/ 5 http://www.nyu.edu/projects/bowman/multinli/ 6 Matched : https://www.kaggle.com/c/multinli-matched-open-evaluation ; Mismatched: https://www.kaggle.com/c/multinlimismatched-open-evaluation", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The research work descried in this paper has been supported by the National Key Research and Development Program of China under Grant No. 2017YFC0820700 and the Natural Science Foundation of China under Grant No. 61333018.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Refining Raw Sentence Representations for Textual Entailment Recognition via Attention", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Jorge", |
| "suffix": "" |
| }, |
| { |
| "first": "Edison", |
| "middle": [], |
| "last": "Balazs", |
| "suffix": "" |
| }, |
| { |
| "first": "Pablo", |
| "middle": [], |
| "last": "Marrese-Taylor", |
| "suffix": "" |
| }, |
| { |
| "first": "Yutaka", |
| "middle": [], |
| "last": "Loyola", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Matsuo", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2 nd Workshop on Evaluating Vector-Space Representations for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "51--55", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jorge A Balazs, Edison Marrese-Taylor, Pablo Loyola, and Yutaka Matsuo. 2017. Refining Raw Sentence Repre- sentations for Textual Entailment Recognition via Attention. In Proceedings of the 2 nd Workshop on Evaluat- ing Vector-Space Representations for NLP, pages 51-55, Copenhagen, Denmark, September 7-11, 2017.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "In: Question Answering for Machine Reading Evaluation (QA4MRE) at CLEF 2013 Conference and Labs of the Evaluation Forum", |
| "authors": [ |
| { |
| "first": "Pinaki", |
| "middle": [], |
| "last": "Bhaskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Somnath", |
| "middle": [], |
| "last": "Banerjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Partha", |
| "middle": [], |
| "last": "Pakray", |
| "suffix": "" |
| }, |
| { |
| "first": "Samadrita", |
| "middle": [], |
| "last": "Banerjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Sivaji", |
| "middle": [], |
| "last": "Bandyopadhyay", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Gelbukh", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pinaki Bhaskar, Somnath Banerjee, Partha Pakray, Samadrita Banerjee, Sivaji Bandyopadhyay and Alexander Gelbukh. 2013. A hybrid question answering system for Multiple Choice Question (MCQ). In: Question An- swering for Machine Reading Evaluation (QA4MRE) at CLEF 2013 Conference and Labs of the Evaluation Forum, Valencia, Spain.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Recognising textual entailment with logical inference", |
| "authors": [ |
| { |
| "first": "Johan", |
| "middle": [], |
| "last": "Bos", |
| "suffix": "" |
| }, |
| { |
| "first": "Katja", |
| "middle": [], |
| "last": "Markert", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the conference on Human Language Technology and Empirical Methods in Natural Language Processing (HLT/EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "628--635", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Johan Bos and Katja Markert. 2005. Recognising textual entailment with logical inference. In Proceedings of the conference on Human Language Technology and Empirical Methods in Natural Language Processing (HLT/EMNLP), pages 628-635, Vancouver, October 2005.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A large annotated corpus for learning natural language inference", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Potts", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "17--21", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts and Christopher D. Manning. 2015. A large annotated cor- pus for learning natural language inference. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 632-642, Lisbon, Portugal, 17-21 September 2015.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A fast unified model for parsing and sentence understanding", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Jon", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Abhinav", |
| "middle": [], |
| "last": "Gauthier", |
| "suffix": "" |
| }, |
| { |
| "first": "Raghav", |
| "middle": [], |
| "last": "Rastogi", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54 th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1466--1477", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R. Bowman, Jon Gauthier, Abhinav Rastogi, Raghav Gupta, Christopher D. Manning and Christopher Potts. 2016. A fast unified model for parsing and sentence understanding. In Proceedings of the 54 th Annual Meeting of the Association for Computational Linguistics, pages 1466-1477, Berlin, Germany, August 7-12, 2016.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Enhanced LSTM for Natural Language Inference", |
| "authors": [ |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhenhua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55 th Annual Meeting of the Association for Computional Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1657--1668", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qian Chen, Xiaodan Zhu, Zhenhua Ling, Si Wei, Hui Jiang and Diana Inkpen. 2017a. Enhanced LSTM for Natu- ral Language Inference. In Proceedings of the 55 th Annual Meeting of the Association for Computional Lin- guistics, pages 1657-1668 Vancouver, Canada.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Recurrent Neural Network-Based Sentence Encoder with Gated Attention for Natural Language Inference", |
| "authors": [ |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2 nd Workshop on Evaluating Vector-Space Representations for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "36--40", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qian Chen, Xiaodan Zhu, Zhen-Hua Ling, Si Wei, Hui Jiang, and Diana Inkpen. 2017b. Recurrent Neural Net- work-Based Sentence Encoder with Gated Attention for Natural Language Inference. In Proceedings of the 2 nd Workshop on Evaluating Vector-Space Representations for NLP, pages 36-40, Copenhagen, Denmark, September 7-11, 2017.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "My computer is an honor student-but how intelligent is it? Standard tests as measure of ai", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Qren", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "AI Magazine", |
| "volume": "37", |
| "issue": "1", |
| "pages": "5--12", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Clark and Qren Etzioni. 2016. My computer is an honor student-but how intelligent is it? Standard tests as measure of ai. AI Magazine 37(1):5-12.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "The PASCAL Recognising Textual Entailment Challenge", |
| "authors": [ |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Ido Dagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernardo", |
| "middle": [], |
| "last": "Glickman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Magnini", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "International Conference on Machine Learning Challenges: Evaluating Predictive Uncertainty Visual Object Classification, and Recognizing Textual Entailment", |
| "volume": "", |
| "issue": "", |
| "pages": "177--190", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ido Dagan, Oren Glickman and Bernardo Magnini. 2005. The PASCAL Recognising Textual Entailment Chal- lenge. In International Conference on Machine Learning Challenges: Evaluating Predictive Uncertainty Vis- ual Object Classification, and Recognizing Textual Entailment, pages 177-190.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Intergrating Structural Context and Local Context for Disambiguating Word Senses", |
| "authors": [ |
| { |
| "first": "Qianlong", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengqing", |
| "middle": [], |
| "last": "Zong", |
| "suffix": "" |
| }, |
| { |
| "first": "Keh-Yih", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "The Fifth Conference On Natural Language Processing and Chinese Computing & The Twenty Fourth International Conference On Computer Processing of Oriental Languages(NLPCC-ICCPOL 2016)\uff0cKunming", |
| "volume": "", |
| "issue": "", |
| "pages": "3--15", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qianlong Du, Chengqing Zong and Keh-Yih Su. Intergrating Structural Context and Local Context for Disam- biguating Word Senses. The Fifth Conference On Natural Language Processing and Chinese Computing & The Twenty Fourth International Conference On Computer Processing of Oriental Languages(NLPCC- ICCPOL 2016)\uff0cKunming, China, December 2-6, 2016, pages. 3-15", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Structure mapping in analogy and similarity", |
| "authors": [ |
| { |
| "first": "Dedre", |
| "middle": [], |
| "last": "Gentner", |
| "suffix": "" |
| }, |
| { |
| "first": "Arthur", |
| "middle": [ |
| "B" |
| ], |
| "last": "Markman", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "American Psychologist", |
| "volume": "52", |
| "issue": "1", |
| "pages": "45--56", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dedre Gentner and Arthur B. Markman. 1997. Structure mapping in analogy and similarity. American Psycholo- gist, 52(1), 45-56.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Structure-mapping: A theoretical framework for analogy", |
| "authors": [ |
| { |
| "first": "Dedre", |
| "middle": [], |
| "last": "Gentner", |
| "suffix": "" |
| } |
| ], |
| "year": 1983, |
| "venue": "Congnitive science", |
| "volume": "7", |
| "issue": "2", |
| "pages": "155--170", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dedre Gentner. 1983. Structure-mapping: A theoretical framework for analogy. Congnitive science 7(2):155-170.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Natural Language Inference Over Interaction Space", |
| "authors": [ |
| { |
| "first": "Yichen", |
| "middle": [], |
| "last": "Gong", |
| "suffix": "" |
| }, |
| { |
| "first": "Heng", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yichen Gong, Heng Luo and Jian Zhang. 2017. Natural Language Inference Over Interaction Space. CoRR abs/1709.04348. http://arxiv.org/abs/1709.04348.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Deep sparse rectifier neural networks", |
| "authors": [ |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Glorot", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 14 th International Conference on Artifical Intelligence and Statistics (AISTATS) 2011", |
| "volume": "15", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xavier Glorot, Antoine Bordes, and Yoshua Bengio. 2011. Deep sparse rectifier neural networks. In Proceedings of the 14 th International Conference on Artifical Intelligence and Statistics (AISTATS) 2011, Fort Lauderdale, FL, USA. Volume 15 of JMLR: W&CP 15.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Methods for using textual entailment in open-domain question answering", |
| "authors": [ |
| { |
| "first": "Sanda", |
| "middle": [], |
| "last": "Harabagiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Hickl", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 21st International Conference on Computational Linguistics and the 44th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "905--912", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sanda Harabagiu and Andrew Hickl. 2006. Methods for using textual entailment in open-domain question an- swering. In Proceedings of the 21st International Conference on Computational Linguistics and the 44th An- nual Meeting of the Association for Computational Linguistics. Association for Computational Linguistics, Sydney, Australia, 2006:905-912", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Satisfying information needs with multi-document summaries", |
| "authors": [ |
| { |
| "first": "Sanda", |
| "middle": [], |
| "last": "Harabagiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Hickl", |
| "suffix": "" |
| }, |
| { |
| "first": "Finley", |
| "middle": [], |
| "last": "Lacatusu", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Information Processing & Management", |
| "volume": "43", |
| "issue": "", |
| "pages": "1619--1642", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sanda Harabagiu, Andrew Hickl, and Finley Lacatusu. 2007. Satisfying information needs with multi-document summaries. Information Processing & Management 43.6 (2007): 1619-1642.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "On contribution of sense dependencies to word sense disambiguation", |
| "authors": [ |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Hatori", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Miyao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun'ichi", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Information and Media Technologies", |
| "volume": "4", |
| "issue": "", |
| "pages": "1129--1155", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jun Hatori, Yusuke Miyao, and Jun'ichi Tsujii. On contribution of sense dependencies to word sense disambigua- tion. 2009. Information and Media Technologies 4.4 (2009): 1129-1155.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Tree edit models for recognizing textual entailments, paraphrases, and answers to questions", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Heilman", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Human Language Technologies: The 2010 Annual Conference of the North American Chapter", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Heilman, and Noah A. Smith. Tree edit models for recognizing textual entailments, paraphrases, and answers to questions. 2010. Human Language Technologies: The 2010 Annual Conference of the North Amer- ican Chapter of the Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Making large-scale SVM learning practical", |
| "authors": [ |
| { |
| "first": "Thorsten", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "SFB 475: Komplexit\u00e4tsreduktion in Multivariaten Datenstrukturen", |
| "volume": "28", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thorsten Joachims. Making large-scale SVM learning practical. 1998. No. 1998, 28. Technical Report, SFB 475: Komplexit\u00e4tsreduktion in Multivariaten Datenstrukturen, Universit\u00e4t Dortmund.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. CoRR abs/1412.6980. http://arxiv.org/abs/1412.6980.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Recognizing textual entailment with tree edit distance algorithms", |
| "authors": [ |
| { |
| "first": "Milen", |
| "middle": [], |
| "last": "Kouylekov", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernardo", |
| "middle": [], |
| "last": "Magnini", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the First Challenge Workshop Recognising Textual Entailment", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Milen Kouylekov, and Bernardo Magnini. 2005. Recognizing textual entailment with tree edit distance algo- rithms. In Proceedings of the First Challenge Workshop Recognising Textual Entailment.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Illinois-LH: A Denotational and Distributional Approach to Semantics", |
| "authors": [ |
| { |
| "first": "Alice", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hockenmaier", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 8 th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "329--334", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alice Lai, and Julia Hockenmaier. 2014. Illinois-LH: A Denotational and Distributional Approach to Semantics. In Proceedings of the 8 th International Workshop on Semantic Evaluation (SemEval 2014), pages 329-334, Dublin, Ireland, August 23-24, 2014.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Recognizing partial textual entailment", |
| "authors": [ |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Torsten", |
| "middle": [], |
| "last": "Zesch", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Asociation for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "451--455", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omer Levy, Torsten Zesch, Ido Dagan and Iryna Gurevych. 2013. Recognizing partial textual entailment. In Pro- ceedings of the 51st Annual Meeting of the Asociation for Computational Linguistics, pages 451-455.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Deep Fusion LSTMs for Text Semantic Matching", |
| "authors": [ |
| { |
| "first": "Pengfei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xipeng", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jifan", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuanjing", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1034--1043", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pengfei Liu, Xipeng Qiu, Jifan Chen and Xuanjing Huang. 2016a. Deep Fusion LSTMs for Text Semantic Matching. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, pages 1034-1043.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Learning Natural Language Inference using Bidirectional LSTM model and Inner-Attention", |
| "authors": [ |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengjie", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaolong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:1605.09090" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yang Liu, Chengjie Sun, Lei Lin and Xiaolong Wang. 2016b. Learning Natural Language Inference using Bidi- rectional LSTM model and Inner-Attention. In arXiv preprint arXiv: 1605.09090.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Visualizing data using t-sne", |
| "authors": [ |
| { |
| "first": "Laurens", |
| "middle": [], |
| "last": "Van Der Maaten", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Journal of Machine Learing Research", |
| "volume": "9", |
| "issue": "", |
| "pages": "2579--2605", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laurens van der Maaten and Geoffrey Hinton. 2008. Visualizing data using t-sne. Journal of Machine Learing Research, 9(Nov):2579-2605.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "SemEval-2014 Task 1: Evaluation of Compositional Distributional Semantic Models on Full Sentences through Semantic Relatedness and Textual Entailment", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Marelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Luisa", |
| "middle": [], |
| "last": "Bentivogli", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Raffaella", |
| "middle": [], |
| "last": "Bernardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefano", |
| "middle": [], |
| "last": "Menini", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Zamparelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 8 th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Marelli, Luisa Bentivogli, Marco Baroni, Raffaella Bernardi, Stefano Menini and Roberto Zamparelli. 2014. SemEval-2014 Task 1: Evaluation of Compositional Distributional Semantic Models on Full Sentences through Semantic Relatedness and Textual Entailment. In Proceedings of the 8 th International Workshop on Semantic Evaluation (SemEval 2014), pages 1-8, Dublin, Ireland, August 23-24 2014.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Natural Language Inference by Tree-Based Convoluation and Heuristic Matching", |
| "authors": [ |
| { |
| "first": "Lili", |
| "middle": [], |
| "last": "Mou", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Men", |
| "suffix": "" |
| }, |
| { |
| "first": "Ge", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhi", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54 th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "130--136", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lili Mou, Rui Men, Ge Li, Yan Xu, Lu Zhang, Rui Yan and Zhi Jin. 2016. Natural Language Inference by Tree- Based Convoluation and Heuristic Matching. In Proceedings of the 54 th Annual Meeting of the Association for Computational Linguistics, pages 130-136, Berlin, Germany, August 7-12, 2016.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Neural tree indexers for text understanding", |
| "authors": [ |
| { |
| "first": "Tsendsuren", |
| "middle": [], |
| "last": "Munkhdalai", |
| "suffix": "" |
| }, |
| { |
| "first": "Hong", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 15 th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "11--21", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsendsuren Munkhdalai and Hong Yu. 2016. Neural tree indexers for text understanding. In Proceedings of the 15 th Conference of the European Chapter of the Association for Computational Linguistics: Volume 1, Long Papers, pages 11-21, Valencia, Spain, April 3-7, 2017.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Shortcut-Stacked Sentence Encoder for Multi-Domain Inference", |
| "authors": [ |
| { |
| "first": "Yixin", |
| "middle": [], |
| "last": "Nie", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2 nd Workshop on Evaluating Vector-Space Representation for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "41--45", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yixin Nie and Mohit Bansal. Shortcut-Stacked Sentence Encoder for Multi-Domain Inference. In Proceedings of the 2 nd Workshop on Evaluating Vector-Space Representation for NLP, pages 41-45, Copenhagen, Denmark, September 7-11, 2017.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Recognizing entailment in intelligent tutoring systems", |
| "authors": [ |
| { |
| "first": "Rodney", |
| "middle": [ |
| "D" |
| ], |
| "last": "Nielsen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wayne", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "H" |
| ], |
| "last": "Martin", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Natural Language Engineering", |
| "volume": "15", |
| "issue": "", |
| "pages": "479--501", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rodney D. Nielsen, Wayne Ward, and James H. Martin. 2009. Recognizing entailment in intelligent tutoring systems. Natural Language Engineering 15.4 (2009): 479-501.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Investigating a generic paraphrase-based approach for relation extraction", |
| "authors": [ |
| { |
| "first": "Lorenza", |
| "middle": [], |
| "last": "Romano", |
| "suffix": "" |
| }, |
| { |
| "first": "Milen", |
| "middle": [], |
| "last": "Kouylekov", |
| "suffix": "" |
| }, |
| { |
| "first": "Idan", |
| "middle": [], |
| "last": "Szpektor", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "409--416", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lorenza Romano, Milen Kouylekov, Idan Szpektor, Ido Dagan and Alberto Lavelli. 2006. Investigating a gener- ic paraphrase-based approach for relation extraction. In Proceedings of EACL, pages 409-416, Trento.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Measuring machine translation quality as semantic equivalence: A metric based on entailment features", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Pad\u00f3", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Machine Translation", |
| "volume": "23", |
| "issue": "", |
| "pages": "181--193", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Pad\u00f3, et al. 2009. Measuring machine translation quality as semantic equivalence: A metric based on entailment features. Machine Translation 23.2-3 (2009): 181-193.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "A Decomposable Attention Model for Natural Language Inference", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Ankur", |
| "suffix": "" |
| }, |
| { |
| "first": "Oscar", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Tackstrom", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2249--2255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankur P. Parikh, Oscar Tackstrom, Dipanjan Das and Jakob Uszkoreit. 2016. A Decomposable Attention Model for Natural Language Inference. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2249-2255.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "GloVe: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. GloVe: Global vectors for word repre- sentation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, October 25-29, 2014, Doha, Qatar.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Reasoning about entailment with neural attention", |
| "authors": [ |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rockt\u00e4schel", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Grefenstette", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [ |
| "Moritz" |
| ], |
| "last": "Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Ko\u010disk\u00fd", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tim Rockt\u00e4schel, Edward Grefenstette, Karl Moritz Hermann, Tom\u00e1\u0161 Ko\u010disk\u00fd and Phil Blunsom. 2015. Reason- ing about entailment with neural attention. In arXiv-2015. https://arxiv.org/abs/1509.06664", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Reading and Thinking: Reread LSTM Unit for Textual Entailment Recognition", |
| "authors": [ |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Sha", |
| "suffix": "" |
| }, |
| { |
| "first": "Baobao", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifang", |
| "middle": [], |
| "last": "Sui", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "2870--2879", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lei Sha, Baobao Chang, Zhifang Sui and Sujian Li. 2016. Reading and Thinking: Reread LSTM Unit for Textual Entailment Recognition. In Proceedings of COLING 2016, the 26th International Conference on Computa- tional Linguistics: Technical Papers, pages 2870-2879.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Recognizing Textual Entailment using Dependency Analysis and Machine Learning", |
| "authors": [ |
| { |
| "first": "Nidhi", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Richa", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Kanad", |
| "middle": [ |
| "K" |
| ], |
| "last": "Biswas", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of NAACL-HLT 2015 Student Research Workshop (SRW)", |
| "volume": "", |
| "issue": "", |
| "pages": "147--153", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nidhi Sharma, Richa Sharma and Kanad K. Biswas. 2015. Recognizing Textual Entailment using Dependency Analysis and Machine Learning. In Proceedings of NAACL-HLT 2015 Student Research Workshop (SRW), pages 147-153.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Overview of NTCIR-9 RITE: Recognizing Inference in Text", |
| "authors": [ |
| { |
| "first": "Hideki", |
| "middle": [], |
| "last": "Shima", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiroshi", |
| "middle": [], |
| "last": "Kanayama", |
| "suffix": "" |
| }, |
| { |
| "first": "Cheng-Wei", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Chuan-Jie", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Teruko", |
| "middle": [], |
| "last": "Mitamura", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Miyao", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuming", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Koichi", |
| "middle": [], |
| "last": "Takeda", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of NTCIR-9 Workshop Meeting", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hideki Shima, Hiroshi Kanayama, Cheng-Wei Lee, Chuan-Jie Lin, Teruko Mitamura, Yusuke Miyao, Shuming Shi and Koichi Takeda. 2011. Overview of NTCIR-9 RITE: Recognizing Inference in Text. In Proceedings of NTCIR-9 Workshop Meeting.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Dropout: A simple way to prevent neural network from overfintting", |
| "authors": [ |
| { |
| "first": "Nitish", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutshever", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "15", |
| "issue": "1", |
| "pages": "1929--1958", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutshever, and Ruslan Salakhutdinov. 2014. Dropout: A simple way to prevent neural network from overfintting. The Journal of Machine Learning Research, 15(1):1929-1958.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Learning Natural Language Inference with LSTM", |
| "authors": [ |
| { |
| "first": "Shuohang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of NAACL-HLT 2016", |
| "volume": "", |
| "issue": "", |
| "pages": "1442--1451", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuohang Wang and Jing Jiang. 2016. Learning Natural Language Inference with LSTM. In Proceedings of NAACL-HLT 2016, pages 1442-1451, San Diego, California, June 12-17, 2016.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Bilateral Multi-Perspective Matching for Natural Language Sentences", |
| "authors": [ |
| { |
| "first": "Zhiguo", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wael", |
| "middle": [], |
| "last": "Hamza", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Florian", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhiguo Wang, Wael Hamza and Radu Florian. Bilateral Multi-Perspective Matching for Natural Language Sen- tences. IJCAI(2017).", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "A Broad-Coverage Challenge Corpus for Sentence Understanding through Inference", |
| "authors": [ |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Nangia", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel R", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NAACL-HLT 2018", |
| "volume": "", |
| "issue": "", |
| "pages": "1112--1122", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adina Williams, Nikita Nangia, and Samuel R Bowman. 2018. A Broad-Coverage Challenge Corpus for Sen- tence Understanding through Inference. In Proceedings of NAACL-HLT 2018, pages 1112-1122, New Orleans, Louisiana, June 1-6, 2018.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Textual Entailment with Structured Attentions and Composition", |
| "authors": [ |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingbo", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of Coling 2016, the 26th International Computational Linguistics: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "2248--2258", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Zhao, Liang Huang and Mingbo Ma. Textual Entailment with Structured Attentions and Composition. In Proceedings of Coling 2016, the 26th International Computational Linguistics: Technical Papers, pages 2248-2258, Osaka, Japan, December 11-17 2016.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Ecnu: One stone two birds: Ensemble of heterogenous measures for semantic relatedness and textual entailment", |
| "authors": [ |
| { |
| "first": "Jiang", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Tian", |
| "middle": [], |
| "last": "Tian", |
| "suffix": "" |
| }, |
| { |
| "first": "Man", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 8th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "271--277", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiang Zhao, Tian Tian Zhu and Man Lan. 2014. Ecnu: One stone two birds: Ensemble of heterogenous measures for semantic relatedness and textual entailment. In Proceedings of the 8th International Workshop on Seman- tic Evaluation (SemEval 2014), pages 271-277, Dublin, Ireland, August 23-24, 2014.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "text": "Figure 2: The skeleton of the proposed approach. The rel, head and dep of the triplet are represented with green, purple, and light-blue colors, respectively. Also, \u2295 denotes the Comparison operation between triplets (see section 2.2.2 for \"Matching layer\"), while \uf0c4 denotes the Aggregation operation (see section 2.2.3 for \"Aggregation layer\"). Besides, the left part and the right part in Matching Layer represent P-aligned-to-H and H-aligned-to-P, respectively.", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "num": null, |
| "text": "An illustration for the comparison between a triplet in Premise and a triplet \u210e in Hypothesis. The rel, head and dep of the triplet are represented with green, purple, and light-blue colors, respectively. Comp(,) indicate the comparison function denoted in equation", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "num": null, |
| "text": "( , ) = ([ ; ; \u2212 ; \u2299 ])", |
| "uris": null |
| }, |
| "FIGREF5": { |
| "type_str": "figure", |
| "num": null, |
| "text": "Triplet alignment weights for the triplet-pair inFigure 1. The darker color represents greater value. The triplets for P are on the top, and the triplets for H are on the left.", |
| "uris": null |
| } |
| } |
| } |
| } |