{ "paper_id": "S13-1002", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T15:42:49.881767Z" }, "title": "Montague Meets Markov: Deep Semantics with Probabilistic Logical Form", "authors": [ { "first": "Islam", "middle": [], "last": "Beltagy", "suffix": "", "affiliation": {}, "email": "beltagy@cs.utexas.edu" }, { "first": "Cuong", "middle": [], "last": "Chau", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Gemma", "middle": [], "last": "Boleda", "suffix": "", "affiliation": { "laboratory": "", "institution": "The University of Texas at Austin Austin", "location": { "postCode": "78712", "region": "Texas" } }, "email": "gemma.boleda@utcompling.com" }, { "first": "Dan", "middle": [], "last": "Garrette", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Katrin", "middle": [], "last": "Erk", "suffix": "", "affiliation": { "laboratory": "", "institution": "The University of Texas at Austin Austin", "location": { "postCode": "78712", "region": "Texas" } }, "email": "katrin.erk@mail.utexas.edu" }, { "first": "Raymond", "middle": [], "last": "Mooney", "suffix": "", "affiliation": {}, "email": "mooney@cs.utexas.edu" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "We combine logical and distributional representations of natural language meaning by transforming distributional similarity judgments into weighted inference rules using Markov Logic Networks (MLNs). We show that this framework supports both judging sentence similarity and recognizing textual entailment by appropriately adapting the MLN implementation of logical connectives. We also show that distributional phrase similarity, used as textual inference rules created on the fly, improves its performance.", "pdf_parse": { "paper_id": "S13-1002", "_pdf_hash": "", "abstract": [ { "text": "We combine logical and distributional representations of natural language meaning by transforming distributional similarity judgments into weighted inference rules using Markov Logic Networks (MLNs). We show that this framework supports both judging sentence similarity and recognizing textual entailment by appropriately adapting the MLN implementation of logical connectives. We also show that distributional phrase similarity, used as textual inference rules created on the fly, improves its performance.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "Tasks in natural language semantics are very diverse and pose different requirements on the underlying formalism for representing meaning. Some tasks require a detailed representation of the structure of complex sentences. Some tasks require the ability to recognize near-paraphrases or degrees of similarity between sentences. Some tasks require logical inference, either exact or approximate. Often it is necessary to handle ambiguity and vagueness in meaning. Finally, we frequently want to be able to learn relevant knowledge automatically from corpus data.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "There is no single representation for natural language meaning at this time that fulfills all requirements. But there are representations that meet some of the criteria. Logic-based representations (Montague, 1970; Kamp and Reyle, 1993) provide an expressive and flexible formalism to express even complex propositions, and they come with standardized inference mechanisms. Distributional mod- (Turney and Pantel, 2010) use contextual similarity to predict semantic similarity of words and phrases (Landauer and Dumais, 1997; Mitchell and Lapata, 2010) , and to model polysemy (Sch\u00fctze, 1998; Erk and Pad\u00f3, 2008; Thater et al., 2010) . This suggests that distributional models and logicbased representations of natural language meaning are complementary in their strengths (Grefenstette and Sadrzadeh, 2011; Garrette et al., 2011) , which encourages developing new techniques to combine them. Garrette et al. (2011; 2013) propose a framework for combining logic and distributional models in which logical form is the primary meaning representation. Distributional similarity between pairs of words is converted into weighted inference rules that are added to the logical form, as illustrated in Figure 1. Finally, Markov Logic Networks (Richardson and Domingos, 2006) (MLNs) are used to perform weighted inference on the resulting knowledge base. However, they only employed single-word distributional similarity rules, and only evaluated on a small set of short, hand-crafted test sentences.", "cite_spans": [ { "start": 198, "end": 214, "text": "(Montague, 1970;", "ref_id": "BIBREF35" }, { "start": 215, "end": 236, "text": "Kamp and Reyle, 1993)", "ref_id": "BIBREF24" }, { "start": 394, "end": 419, "text": "(Turney and Pantel, 2010)", "ref_id": "BIBREF47" }, { "start": 498, "end": 525, "text": "(Landauer and Dumais, 1997;", "ref_id": "BIBREF27" }, { "start": 526, "end": 552, "text": "Mitchell and Lapata, 2010)", "ref_id": "BIBREF34" }, { "start": 577, "end": 592, "text": "(Sch\u00fctze, 1998;", "ref_id": "BIBREF41" }, { "start": 593, "end": 612, "text": "Erk and Pad\u00f3, 2008;", "ref_id": "BIBREF15" }, { "start": 613, "end": 633, "text": "Thater et al., 2010)", "ref_id": "BIBREF45" }, { "start": 773, "end": 807, "text": "(Grefenstette and Sadrzadeh, 2011;", "ref_id": "BIBREF21" }, { "start": 808, "end": 830, "text": "Garrette et al., 2011)", "ref_id": "BIBREF17" }, { "start": 893, "end": 915, "text": "Garrette et al. (2011;", "ref_id": "BIBREF17" }, { "start": 916, "end": 921, "text": "2013)", "ref_id": "BIBREF18" }, { "start": 1195, "end": 1213, "text": "Figure 1. Finally,", "ref_id": null }, { "start": 1214, "end": 1235, "text": "Markov Logic Networks", "ref_id": null }, { "start": 1236, "end": 1267, "text": "(Richardson and Domingos, 2006)", "ref_id": null }, { "start": 1268, "end": 1274, "text": "(MLNs)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "In this paper, we extend Garrette et al.'s approach and adapt it to handle two existing semantic tasks: recognizing textual entailment (RTE) and semantic textual similarity (STS). We show how this single semantic framework using probabilistic logical form in Markov logic can be adapted to support both of these important tasks. This is possible because MLNs constitute a flexible programming language based on probabilistic logic (Domingos and Lowd, 2009) that can be easily adapted to support multiple types of linguistically useful inference.", "cite_spans": [ { "start": 431, "end": 456, "text": "(Domingos and Lowd, 2009)", "ref_id": "BIBREF13" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "At the word and short phrase level, our approach model entailment through \"distributional\" similarity ( Figure 1 ). If X and Y occur in similar contexts, we assume that they describe similar entities and thus there is some degree of entailment between them. At the sentence level, however, we hold that a stricter, logic-based view of entailment is beneficial, and we even model sentence similarity (in STS) as entailment.", "cite_spans": [], "ref_spans": [ { "start": 104, "end": 112, "text": "Figure 1", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "There are two main innovations in the formalism that make it possible for us to work with naturally occurring corpus data. First, we use more expressive distributional inference rules based on the similarity of phrases rather than just individual words. In comparison to existing methods for creating textual inference rules (Lin and Pantel, 2001b; Szpektor and Dagan, 2008) , these rules are computed on the fly as needed, rather than pre-compiled. Second, we use more flexible probabilistic combinations of evidence in order to compute degrees of sentence similarity for STS and to help compensate for parser errors. We replace deterministic conjunction by an average combiner, which encodes causal independence (Natarajan et al., 2010) .", "cite_spans": [ { "start": 325, "end": 348, "text": "(Lin and Pantel, 2001b;", "ref_id": "BIBREF29" }, { "start": 349, "end": 374, "text": "Szpektor and Dagan, 2008)", "ref_id": "BIBREF44" }, { "start": 714, "end": 738, "text": "(Natarajan et al., 2010)", "ref_id": "BIBREF36" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "We show that our framework is able to handle both sentence similarity (STS) and textual entailment (RTE) by making some simple adaptations to the MLN when switching between tasks. The framework achieves reasonable results on both tasks. On STS, we obtain a correlation of r = 0.66 with full logic, r = 0.73 in a system with weakened variable binding, and r = 0.85 in an ensemble model. On RTE-1 we obtain an accuracy of 0.57. We show that the distributional inference rules benefit both tasks and that more flexible probabilistic combinations of evidence are crucial for STS. Al-though other approaches could be adapted to handle both RTE and STS, we do not know of any other methods that have been explicitly tested on both problems.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Distributional semantics Distributional models define the semantic relatedness of words as the similarity of vectors representing the contexts in which they occur (Landauer and Dumais, 1997; Lund and Burgess, 1996) . Recently, such models have also been used to represent the meaning of larger phrases. The simplest models compute a phrase vector by adding the vectors for the individual words (Landauer and Dumais, 1997) or by a component-wise product of word vectors (Mitchell and Lapata, 2008; Mitchell and Lapata, 2010) . Other approaches, in the emerging area of distributional compositional semantics, use more complex methods that compute phrase vectors from word vectors and tensors (Baroni and Zamparelli, 2010; Grefenstette and Sadrzadeh, 2011) .", "cite_spans": [ { "start": 163, "end": 190, "text": "(Landauer and Dumais, 1997;", "ref_id": "BIBREF27" }, { "start": 191, "end": 214, "text": "Lund and Burgess, 1996)", "ref_id": "BIBREF31" }, { "start": 394, "end": 421, "text": "(Landauer and Dumais, 1997)", "ref_id": "BIBREF27" }, { "start": 469, "end": 496, "text": "(Mitchell and Lapata, 2008;", "ref_id": "BIBREF33" }, { "start": 497, "end": 523, "text": "Mitchell and Lapata, 2010)", "ref_id": "BIBREF34" }, { "start": 691, "end": 720, "text": "(Baroni and Zamparelli, 2010;", "ref_id": "BIBREF2" }, { "start": 721, "end": 754, "text": "Grefenstette and Sadrzadeh, 2011)", "ref_id": "BIBREF21" } ], "ref_spans": [], "eq_spans": [], "section": "Related work", "sec_num": "2" }, { "text": "Wide-coverage logic-based semantics Boxer (Bos, 2008 ) is a software package for wide-coverage semantic analysis that produces logical forms using Discourse Representation Structures (Kamp and Reyle, 1993) . It builds on the C&C CCG parser (Clark and Curran, 2004) .", "cite_spans": [ { "start": 42, "end": 52, "text": "(Bos, 2008", "ref_id": "BIBREF6" }, { "start": 183, "end": 205, "text": "(Kamp and Reyle, 1993)", "ref_id": "BIBREF24" }, { "start": 240, "end": 264, "text": "(Clark and Curran, 2004)", "ref_id": "BIBREF9" } ], "ref_spans": [], "eq_spans": [], "section": "Related work", "sec_num": "2" }, { "text": "In order to combine logical and probabilistic information, we draw on existing work in Statistical Relational AI (Getoor and Taskar, 2007) . Specifically, we utilize Markov Logic Networks (MLNs) (Domingos and Lowd, 2009) , which employ weighted formulas in first-order logic to compactly encode complex undirected probabilistic graphical models. MLNs are well suited for our approach since they provide an elegant framework for assigning weights to first-order logical rules, combining a diverse set of inference rules and performing sound probabilistic inference.", "cite_spans": [ { "start": 113, "end": 138, "text": "(Getoor and Taskar, 2007)", "ref_id": null }, { "start": 195, "end": 220, "text": "(Domingos and Lowd, 2009)", "ref_id": "BIBREF13" } ], "ref_spans": [], "eq_spans": [], "section": "Markov Logic", "sec_num": null }, { "text": "An MLN consists of a set of weighted first-order clauses. It provides a way of softening first-order logic by allowing situations in which not all clauses are satisfied. More specifically, they provide a well-founded probability distribution across possible worlds by specifying that the probability of a world increases exponentially with the total weight of the logical clauses that it satisfies. While methods exist for learning MLN weights directly from training data, since the appropriate training data is lacking, our approach uses weights computed using distributional semantics. We use the open-source software package Alchemy (Kok et al., 2005) for MLN inference, which allows computing the probability of a query literal given a set of weighted clauses as background knowledge and evidence.", "cite_spans": [ { "start": 636, "end": 654, "text": "(Kok et al., 2005)", "ref_id": "BIBREF25" } ], "ref_spans": [], "eq_spans": [], "section": "Markov Logic", "sec_num": null }, { "text": "Tasks: RTE and STS Recognizing Textual Entailment (RTE) is the task of determining whether one natural language text, the premise, implies another, the hypothesis. Consider (1) below.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Markov Logic", "sec_num": null }, { "text": "(1) p: Oracle had fought to keep the forms from being released h: Oracle released a confidential document Here, h is not entailed. RTE directly tests whether a system can construct semantic representations that allow it to draw correct inferences. Of existing RTE approaches, the closest to ours is by Bos and Markert (2005) , who employ a purely logical approach that uses Boxer to convert both the premise and hypothesis into first-order logic and then checks for entailment using a theorem prover. By contrast, our approach uses Markov logic with probabilistic inference. Semantic Textual Similarity (STS) is the task of judging the similarity of two sentences on a scale from 0 to 5 (Agirre et al., 2012) . Gold standard scores are averaged over multiple human annotations. The best performer in 2012's competition was by B\u00e4r et al. (2012) , an ensemble system that integrates many techniques including string similarity, n-gram overlap, WordNet similarity, vector space similarity and MT evaluation metrics.", "cite_spans": [ { "start": 302, "end": 324, "text": "Bos and Markert (2005)", "ref_id": "BIBREF5" }, { "start": 687, "end": 708, "text": "(Agirre et al., 2012)", "ref_id": "BIBREF0" }, { "start": 826, "end": 843, "text": "B\u00e4r et al. (2012)", "ref_id": "BIBREF1" } ], "ref_spans": [], "eq_spans": [], "section": "Markov Logic", "sec_num": null }, { "text": "Weighted inference, and combined structuraldistributional representations One approach to weighted inference in NLP is that of Hobbs et al. (1993) , who proposed viewing natural language interpretation as abductive inference. In this framework, problems like reference resolution and syntactic ambiguity resolution become inferences to best explanations that are associated with costs. However, this leaves open the question of how costs are assigned. Raina et al. (2005) use this framework for RTE, deriving inference costs from WordNet similarity and properties of the syntactic parse. Garrette et al. (2011; 2013) proposed an approach to RTE that uses MLNs to combine traditional logical representations with distributional information in order to support probabilistic textual inference. This approach can be viewed as a bridge between Bos and Markert (2005) 's purely logical approach, which relies purely on hard logical rules and theorem proving, and distributional approaches, which support graded similarity between concepts but have no notion of logical operators or entailment.", "cite_spans": [ { "start": 127, "end": 146, "text": "Hobbs et al. (1993)", "ref_id": "BIBREF23" }, { "start": 452, "end": 471, "text": "Raina et al. (2005)", "ref_id": "BIBREF38" }, { "start": 588, "end": 610, "text": "Garrette et al. (2011;", "ref_id": "BIBREF17" }, { "start": 611, "end": 616, "text": "2013)", "ref_id": "BIBREF18" }, { "start": 840, "end": 862, "text": "Bos and Markert (2005)", "ref_id": "BIBREF5" } ], "ref_spans": [], "eq_spans": [], "section": "Markov Logic", "sec_num": null }, { "text": "There are also other methods that combine distributional and structured representations. Stern et al. (2011) conceptualize textual entailment as tree rewriting of syntactic graphs, where some rewriting rules are distributional inference rules. Socher et al. (2011) recognize paraphrases using a \"tree of vectors,\" a phrase structure tree in which each constituent is associated with a vector, and overall sentence similarity is computed by a classifier that integrates all pairwise similarities. (This is in contrast to approaches like Baroni and Zamparelli (2010) and Grefenstette and Sadrzadeh (2011) , who do not offer a proposal for using vectors at multiple levels in a syntactic tree simultaneously.)", "cite_spans": [ { "start": 89, "end": 108, "text": "Stern et al. (2011)", "ref_id": "BIBREF43" }, { "start": 536, "end": 564, "text": "Baroni and Zamparelli (2010)", "ref_id": "BIBREF2" }, { "start": 569, "end": 602, "text": "Grefenstette and Sadrzadeh (2011)", "ref_id": "BIBREF21" } ], "ref_spans": [], "eq_spans": [], "section": "Markov Logic", "sec_num": null }, { "text": "Our system extends that of Garrette et al. (2011; 2013) to support larger-scale evaluation on standard benchmarks for both RTE and STS. We conceptualize both tasks as probabilistic entailment in Markov logic, where STS is judged as the average degree of mutual entailment, i.e. we compute the probability of both S 1 |= S 2 and S 2 |= S 1 and average the results. Below are some sentence pairs that we use as examples in the discussion below:", "cite_spans": [ { "start": 27, "end": 49, "text": "Garrette et al. (2011;", "ref_id": "BIBREF17" }, { "start": 50, "end": 55, "text": "2013)", "ref_id": "BIBREF18" } ], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "(2) S 1 : A man is slicing a cucumber. S 2 : A man is slicing a zucchini.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "(3) S 1 : A boy is riding a bicycle. S 2 : A little boy is riding a bike.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "(4) S 1 : A man is driving. S 2 : A man is driving a car.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "System overview. To compute the probability of an entailment S 1 |= S 2 , the system first constructs logical forms for each sentence using Boxer and then translates them into MLN clauses. In example (2) above, the logical form for S 1 :", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "\u2203x 0 , e 1 , x 2 man(x 0 ) \u2227 slice(e 1 ) \u2227 Agent(e 1 , x 0 )\u2227 cucumber(x 2 ) \u2227 P atient(e 1 , x 2 )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "is used as evidence, and the logical form for S 2 is turned into the following formula (by default, variables are assumed to be universally quantified):", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "man(x) \u2227 slice(e) \u2227 Agent(e, x)\u2227 zucchini(y) \u2227 P atient(e, y) \u2192 result()", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "where result() is the query for which we have Alchemy compute the probability. However, S 2 is not strictly entailed by S 1 because of the mismatch between \"cucumber\" and \"zucchini\", so with just the strict logical-form translations of S 1 and S 2 , the probability of result() will be zero. This is where we introduce distributional similarity, in this case the similarity of \"cucumber\" and \"zucchini\", cos( # \u00bb cucumber, # \u00bb zucchini). We create inference rules from such similarities as a form of background knowledge. We then treat similarity as degree of entailment, a move that has a long tradition (e.g., (Lin and Pantel, 2001b; Raina et al., 2005; Szpektor and Dagan, 2008) ). In general, given two words a and b, we transform their cosine similarity into an inference-rule weight wt(a, b) using:", "cite_spans": [ { "start": 612, "end": 635, "text": "(Lin and Pantel, 2001b;", "ref_id": "BIBREF29" }, { "start": 636, "end": 655, "text": "Raina et al., 2005;", "ref_id": "BIBREF38" }, { "start": 656, "end": 681, "text": "Szpektor and Dagan, 2008)", "ref_id": "BIBREF44" } ], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "wt(a, b) = log( cos( #\u00bb a , #\u00bb b ) 1 \u2212 cos( #\u00bb a , #\u00bb b ) ) \u2212 prior (5)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "Where prior is a negative weight used to initialize all predicates, so that by default facts are assumed to have very low probability. In our experiments, we use prior = \u22123. In the case of sentence pair (2), we generate the inference rule:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "cucumber(x) \u2192 zucchini(x) | wt(cuc., zuc.)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "Such inference rules are generated for all pairs of words (w 1 , w 2 ) where w 1 \u2208 S 1 and w 2 \u2208 S 2 . 1", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "The distributional model we use contains all lemmas occurring at least 50 times in the Gigaword corpus (Graff et al., 2007) except a list of stop words. The dimensions are the 2,000 most frequent of these words, and cell values are weighted with point-wise mutual information. 2", "cite_spans": [ { "start": 103, "end": 123, "text": "(Graff et al., 2007)", "ref_id": "BIBREF20" } ], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "Phrase-based inference rules. Garrette et al. only considered distributional inference rules for pairs of individual words. We extend their approach to distributional inference rules for pairs of phrases in order to handle cases like (3). To properly estimate the similarity between S 1 and S 2 in (3), we not only need an inference rule linking \"bike\" to \"bicycle\", but also a rule estimating how similar \"boy\" is to \"little boy\". To do so, we make use of existing approaches that compute distributional representations for phrases. In particular, we compute the vector for a phrase from the vectors of the words in that phrase, using either vector addition (Landauer and Dumais, 1997) or component-wise multiplication (Mitchell and Lapata, 2008; Mitchell and Lapata, 2010) . The inference-rule weight, wt(p 1 , p 2 ), for two phrases p 1 and p 2 is then determined using Eq. (5) in the same way as for words.", "cite_spans": [ { "start": 659, "end": 686, "text": "(Landauer and Dumais, 1997)", "ref_id": "BIBREF27" }, { "start": 720, "end": 747, "text": "(Mitchell and Lapata, 2008;", "ref_id": "BIBREF33" }, { "start": 748, "end": 774, "text": "Mitchell and Lapata, 2010)", "ref_id": "BIBREF34" } ], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "Existing approaches that derive phrasal inference rules from distributional similarity (Lin and Pantel, 2001a; Szpektor and Dagan, 2008; precompile large lists of inference rules. By comparison, distributional phrase similarity can be seen as a generator of inference rules \"on the fly\", as it is possible to compute distributional phrase vectors for arbitrary phrases on demand as they are needed for particular examples.", "cite_spans": [ { "start": 87, "end": 110, "text": "(Lin and Pantel, 2001a;", "ref_id": "BIBREF28" }, { "start": 111, "end": 136, "text": "Szpektor and Dagan, 2008;", "ref_id": "BIBREF44" } ], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "Inference rules are generated for all pairs of constituents (c 1 , c 2 ) where c 1 \u2208 S 1 and c 2 \u2208 S 2 , a constituent is a single word or a phrase. The logical form provides a handy way to extract phrases, as they are generally mapped to one of two logical constructs. Either we have multiple single-variable predicates operating on the same variable. For example the phrase \"a little boy\" has the logical form boy(x) \u2227 little(x). Or we have two unary predicates connected with a relation. For example, \"pizza slice\" and \"slice of pizza\" are both mapped to the logical form, slice(x 0 ) \u2227 of (x 0 , x 1 ) \u2227 pizza(x 1 ). We consider all binary predicates as relations.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "Average Combiner to determine similarity in the presence of missing phrases. The logical forms for the sentences in (4): are", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "S 1 : \u2203x 0 , e 1 man(x 0 )\u2227agent(x 0 , e 1 )\u2227drive(e 1 ) S 2 : \u2203x 0 , e 1 , x 2 man(x 0 ) \u2227 agent(x 0 , e 1 ) \u2227 drive(e 1 ) \u2227 patient(e 1 , x 2 ) \u2227 car(x 2 )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "If we try to prove S 1 |= S 2 , the probability of the result() will be zero: There is no evidence for a car, and the hypothesis predicates are conjoined using a deterministic AND. For RTE, this makes sense: If one of the hypothesis predicates is False, the probability of entailment should be zero. For the STS task, this should in principle be the same, at least if the omitted facts are vital, but it seems that annotators rated the data points in this task more for overall similarity than for degrees of entailment. So in STS, we want the similarity to be a function of the number of elements in the hypothesis that are inferable. Therefore, we need to replace the deterministic AND with a different way of combining evidence. We chose to use the average evidence combiner for MLNs introduced by Natarajan et al.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "(2010). To use the average combiner, the full logical form is divided into smaller clauses (which we call mini-clauses), then the combiner averages their probabilities. In case the formula is a list of conjuncted predicates, a mini-clause is a conjunction of a single-variable predicate with a relation predicate(as in the example below). In case the logical form contains a negated sub-formula, the negated sub-formula is also a mini-clause. The hypothesis above after dividing clauses for the average combiner looks like this:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "man(x 0 ) \u2227 agent(x 0 , e 1 ) \u2192 result(x 0 , e 1 , x 2 ) | w drive(e 1 ) \u2227 agent(x 0 , e 1 ) \u2192 result(x 0 , e 1 , x 2 ) | w drive(e 1 ) \u2227 patient(e 1 , x 2 ) \u2192 result(x 0 , e 1 , x 2 ) | w car(x 2 ) \u2227 patient(e 1 , x 2 ) \u2192 result(x 0 , e 1 , x 2 ) | w", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "where result is again the query predicate. Here, result has all of the variables in the clause as arguments in order to maintain the binding of variables across all of the mini-clauses. The weights w are the following function of n, the number of mini-clauses (4 in the above example):", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "EQUATION", "cite_spans": [], "ref_spans": [], "eq_spans": [ { "start": 0, "end": 8, "text": "EQUATION", "ref_id": "EQREF", "raw_str": "w = 1 n \u00d7 (log( p 1 \u2212 p ) \u2212 prior)", "eq_num": "(6)" } ], "section": "MLN system", "sec_num": "3" }, { "text": "where p is a value close to 1 that is set to maximize performance on the training data, and prior is the same negative weight as before. Setting w this way produces a probability of p for the result() in cases that satisfy the antecedents of all mini-clauses. For the example above, the antecedents of the first two mini-clauses are satisfied, while the antecedents of the last two are not since the premise provides no evidence for an object of the verb drive. The similarity is then computed to be the maximum probability of any grounding of the result predicate, which in this case is around p 2 . 3 An interesting variation of the average combiner is to omit variable bindings between the miniclauses. In this case, the hypothesis clauses look like this for our example:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "man(x) \u2227 agent(x, e) \u2192 result() | w drive(e) \u2227 agent(x, e) \u2192 result() | w drive(e) \u2227 patient(e, x) \u2192 result() | w car(x) \u2227 patient(e, x) \u2192 result() | w", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "This implementation loses a lot of information, for example it does not differentiate between \"A man is walking and a woman is driving\" and \"A man is driving and a woman is walking\". In fact, logical form without variable binding degrades to a representation similar to a set of independent syntactic dependencies, 4 while the average combiner with variable binding retains all of the information in the original logical form. Still, omitting variable binding turns out to be useful for the STS task.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "It is also worth commenting on the efficiency of the inference algorithm when run on the three different approaches to combining evidence. The average combiner without variable binding is the fastest and has the least memory requirements because all cliques in the ground network are of limited size (just 3 or 4 nodes). Deterministic AND is much slower than the average combiner without variable binding, because the maximum clique size depends on the sentence. The average combiner with variable binding is the most memory intensive since the number of arguments of the result() predicate can become large (there is an argument for each individual and event in the sentence). Consequently, the inference algorithm needs to consider a combinatorial number of possible groundings of the result() predicate, making inference very slow.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "Adaptation of the logical form. As discussed by Garrette et al. (2011) , Boxer's output is mapped to logical form and augmented with additional information to handle a variety of semantic phenomena. However, we do not use their additional rules for handling implicatives and factives, as we wanted to test the system without background knowledge beyond that supplied by the vector space.", "cite_spans": [ { "start": 48, "end": 70, "text": "Garrette et al. (2011)", "ref_id": "BIBREF17" } ], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "Unfortunately, current MLN inference algorithms are not able to efficiently handle complex formulas with nested quantifiers. For that reason, we replaced universal quantifiers in Boxer's output with existentials since they caused serious problems for Alchemy. Although this is a radical change to the semantics of the logical form, due to the nature of the STS and RTE data, it only effects about 5% of the sentences, and we found that most of the universal quantifiers in these cases were actually due to parsing errors. We are currently exploring more effective ways of dealing with this issue.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MLN system", "sec_num": "3" }, { "text": "In order to compare directly to the logic-based system of Bos and Markert (2005) , we focus on the RTE-1 dataset (Dagan et al., 2005) , which includes 567 Text-Hypothesis (T-H) pairs in the development set and 800 pairs in the test set. The data covers a wide range of issues in entailment, including lexical, syntactic, logical, world knowledge, and combinations of these at different levels of difficulty. In both development and test sets, 50% of sentence pairs are true entailments and 50% are not.", "cite_spans": [ { "start": 58, "end": 80, "text": "Bos and Markert (2005)", "ref_id": "BIBREF5" }, { "start": 113, "end": 133, "text": "(Dagan et al., 2005)", "ref_id": "BIBREF12" } ], "ref_spans": [], "eq_spans": [], "section": "Dataset", "sec_num": "4.1" }, { "text": "We run our system for different configurations of inference rules and evidence combiners. For distributional inference rules (DIR), three different levels are tested: without inference rules (no DIR), inference rules for individual words (word DIR), and inference rules for words and phrases (phrase DIR). Phrase vectors were built using vector addition, as point-wise multiplication performed slightly worse. To combine evidence for the result() query, three different options are available: without average combiner which is just using Deterministic AND (Deterministic AND), average combiner with variable binding (AvgComb) and average combiner without variable binding (AvgComb w/o VarBind). Different combinations of configurations are tested according to its suitability for the task; RTE and STS.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Method", "sec_num": "4.2" }, { "text": "We also tested several \"distributional only\" systems. The first such system builds a vector representation for each sentence by adding its word vectors, then computes the cosine similarity between the sentence vectors for S 1 and S 2 (VS-Add). The second uses point-wise multiplication instead of vector addition (VS-Mul). The third scales pairwise words similarities to the sentence level using weighted average where weights are inverse document frequencies idf as suggested by Mihalcea et al. (2006) ", "cite_spans": [ { "start": 480, "end": 502, "text": "Mihalcea et al. (2006)", "ref_id": "BIBREF32" } ], "ref_spans": [], "eq_spans": [], "section": "Method", "sec_num": "4.2" }, { "text": "(VS- Pairwise).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Method", "sec_num": "4.2" }, { "text": "For the RTE task, systems were evaluated using both accuracy and confidence-weighted score (cws) as used by Bos and Markert (2005) and the RTE-1 challenge (Dagan et al., 2005) . In order to map a probability of entailment to a strict prediction of True or False, we determined a threshold that optimizes performance on the development set. The cws score rewards a system's ability to assign higher confidence scores to correct predictions than incorrect ones. For cws, a system's predictions are sorted in decreasing order of confidence and the score is computed as:", "cite_spans": [ { "start": 108, "end": 130, "text": "Bos and Markert (2005)", "ref_id": "BIBREF5" }, { "start": 155, "end": 175, "text": "(Dagan et al., 2005)", "ref_id": "BIBREF12" } ], "ref_spans": [], "eq_spans": [], "section": "Method", "sec_num": "4.2" }, { "text": "cws = 1 n n i=1 #correct-up-to-rank-i i", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Method", "sec_num": "4.2" }, { "text": "where n is the number of the items in the test set, and i ranges over the sorted items. In our systems, we defined the confidence value for a T-H pair as the distance between the computed probability for the result() predicate and the threshold.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Method", "sec_num": "4.2" }, { "text": "The results are shown in Table 1 . They show that the distributional only baselines perform very poorly. In particular, they perform worse than strict Bos and Markert (2005) , a system that uses only logic. This illustrates the important role of logic-based representations for the entailment task. Due to intractable memory demands of Alchemy inference, our current system with deterministic AND fails to execute on 118 of the 800 test pairs, so, by default, the system classifies these cases as False (non-entailing) with very low confidence. Comparing the two configurations of our system, using deterministic AND vs. the average combiner without variable binding (last two lines in Table 1) , we see that for RTE, it is essential to retain the full logical form.", "cite_spans": [ { "start": 151, "end": 173, "text": "Bos and Markert (2005)", "ref_id": "BIBREF5" } ], "ref_spans": [ { "start": 25, "end": 32, "text": "Table 1", "ref_id": null }, { "start": 686, "end": 694, "text": "Table 1)", "ref_id": null } ], "eq_spans": [], "section": "Results", "sec_num": "4.3" }, { "text": "Our system with deterministic AND obtains both an accuracy and cws of 0.57. The best result in the RTE-1 challenge by Bayer et al. (2005) obtained an accuracy of 0.59 and cws of 0.62. 5 In terms of both accuracy and cws, our system outperforms both \"distributional only\" systems and strict logical entailment, showing again that integrating both logical form and distributional inference rules using MLNs is beneficial. Interestingly, the strict entailment system of Bos and Markert incorporated generic knowledge, lexical knowledge (from Word-Net) and geographical knowledge that we do not utilize. This demonstrates the advantage of using a model that operationalizes entailment between words and phrases as distributional similarity.", "cite_spans": [ { "start": 118, "end": 137, "text": "Bayer et al. (2005)", "ref_id": "BIBREF3" }, { "start": 184, "end": 185, "text": "5", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Results", "sec_num": "4.3" }, { "text": "5 Task 2: Semantic Textual Similarity", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Results", "sec_num": "4.3" }, { "text": "The dataset we use in our experiments is the MSR Video Paraphrase Corpus (MSR-Vid) subset of the STS 2012 task, consisting of 1,500 sentence pairs. The corpus itself was built by asking annotators from Amazon Mechanical Turk to describe very short video fragments (Chen and Dolan, 2011) . The organizers of the STS 2012 task (Agirre et al., 2012) sampled video descriptions and asked Turkers to assign similarity scores (ranging from 0 to 5) to pairs of sentences, without access to the video. The gold standard score is the average of the Turkers' annotations. In addition to the MSR Video Paraphrase Corpus subset, the STS 2012 task involved data from machine translation and sense descriptions. We do not use these because they do not consist of full grammatical sentences, which the parser does not handle well. In addition, the STS 2012 data included sentences from the MSR Paraphrase Corpus, which we also do not currently use because some sentences are long and create intractable MLN inference problems. This issue is discussed further in section 6. Following STS standards, our evaluation compares a system's similarity judgments to the gold standard scores using Pearson's correlation coefficient r.", "cite_spans": [ { "start": 264, "end": 286, "text": "(Chen and Dolan, 2011)", "ref_id": "BIBREF8" }, { "start": 325, "end": 346, "text": "(Agirre et al., 2012)", "ref_id": "BIBREF0" } ], "ref_spans": [], "eq_spans": [], "section": "Dataset", "sec_num": "5.1" }, { "text": "Our system can be tested for different configuration of inference rules and evidence combiners which are explained in section 4.2. The tested systems on the STS task are listed in table 2. Out experiments showed that using average combiner (AvgComb) is very memory intensive and MLN inference for 28 of the 1,500 pairs either ran out of memory or did not finish in reasonable time. In such cases, we back off to AvgComb w/o VarBind.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Method", "sec_num": "5.2" }, { "text": "We compare to several baselines; our MLN system without distributional inference rules (AvgComb + no DIR), and distributional-only systems (VS-Add, VS-Mul, VS-Pairwise). These are the natural baselines for our system, since they use only one of the two types of information that we combine (i.e. logical form and distributional representations).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Method", "sec_num": "5.2" }, { "text": "Finally, we built an ensemble that combines the output of multiple systems using regression trained (B\u00e4r et al., 2012) 0.87 Table 2 : Results on the STS video dataset.", "cite_spans": [ { "start": 100, "end": 118, "text": "(B\u00e4r et al., 2012)", "ref_id": "BIBREF1" } ], "ref_spans": [ { "start": 124, "end": 131, "text": "Table 2", "ref_id": null } ], "eq_spans": [], "section": "Method", "sec_num": "5.2" }, { "text": "on the training data. We then compare the performance of an ensemble with and without our system. This is the same technique used by B\u00e4r et al. (2012) except we used additive regression (Friedman, 2002) instead of linear regression since it gave better results. Table 2 summarizes the results of our experiments. They show that adding distributional information improves results, as expected, and also that adding phrase rules gives further improvement: Using only word distributional inference rules improves results from 0.58 to 0.6, and adding phrase inference rules further improves them to 0.66. As for variable binding, note that although it provides more precise information, the STS scores actually improve when it is dropped, from 0.66 to 0.73. We offer two explanations for this result: First, this information is very sensitive to parsing errors, and the C&C parser, on which Boxer is based, produces many errors on this dataset, even for simple sentences. When the C&C CCG parse is wrong, the resulting logical form is wrong, and the resulting similarity score is greatly affected. Dropping variable binding makes the systems more robust to parsing errors. Second, in contrast to RTE, the STS dataset does not really test the important role of syntax and logical form in deter-mining meaning. This also explains why the \"distributional only\" baselines are actually doing better than the MLN systems. Although the MLN system on its own does not perform better than the distributional compositional models, it does provide complementary information, as shown by the fact that ensembling it with the rest of the models improves performance (0.85 with the MLN system, compared to 0.83 without it). The performance of this ensemble is close to the current best result for this dataset (0.87).", "cite_spans": [ { "start": 186, "end": 202, "text": "(Friedman, 2002)", "ref_id": "BIBREF16" } ], "ref_spans": [ { "start": 262, "end": 269, "text": "Table 2", "ref_id": null } ], "eq_spans": [], "section": "Method", "sec_num": "5.2" }, { "text": "The approach presented in this paper constitutes a step towards achieving the challenging goal of effectively combining logical representations with distributional information automatically acquired from text. In this section, we discuss some of limitations of the current work and directions for future research.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Future Work", "sec_num": "6" }, { "text": "As noted before, parse errors are currently a significant problem. We use Boxer to obtain a logical representation for a sentence, which in turn relies on the C&C parser. Unfortunately, C&C misparses many sentences, which leads to inaccurate logical forms. To reduce the impact of misparsing, we plan to use a version of C&C that can produce the top-n parses together with parse re-ranking (Ng and Curran, 2012) . As an alternative to re-ranking, one could obtain logical forms for each of the topn parses, and create an MLN that integrates all of them (together with their certainty) as an underspecified meaning representation that could then be used to directly support inferences such as STS and RTE.", "cite_spans": [ { "start": 390, "end": 411, "text": "(Ng and Curran, 2012)", "ref_id": "BIBREF37" } ], "ref_spans": [], "eq_spans": [], "section": "Future Work", "sec_num": "6" }, { "text": "We also plan to exploit a greater variety of distributional inference rules. First, we intend to incorporate logical form translations of existing distributional inference rule collections (e.g., Chan et al., 2011) ). Another issue is obtaining improved rule weights based on distributional phrase vectors. We plan to experiment with more sophisticated approaches to computing phrase vectors such as those recently presented by Baroni and Zamparelli (2010) and Grefenstette and Sadrzadeh (2011) . Furthermore, we are currently deriving symmetric similarity ratings between word pairs or phrase pairs, when really what we need is di-rectional similarity. We plan to incorporate directed similarity measures such as those of Kotlerman et al. (2010) and Clarke (2012) .", "cite_spans": [ { "start": 196, "end": 214, "text": "Chan et al., 2011)", "ref_id": "BIBREF7" }, { "start": 428, "end": 456, "text": "Baroni and Zamparelli (2010)", "ref_id": "BIBREF2" }, { "start": 461, "end": 494, "text": "Grefenstette and Sadrzadeh (2011)", "ref_id": "BIBREF21" }, { "start": 723, "end": 746, "text": "Kotlerman et al. (2010)", "ref_id": "BIBREF26" }, { "start": 751, "end": 764, "text": "Clarke (2012)", "ref_id": "BIBREF10" } ], "ref_spans": [], "eq_spans": [], "section": "Future Work", "sec_num": "6" }, { "text": "A primary problem for our approach is the limitations of existing MLN inference algorithms, which do not effectively scale to large and complex MLNs. We plan to explore \"coarser\" logical representations such as Minimal Recursion Semantics (MRS) (Copestake et al., 2005) . Another potential approach to this problem is to trade expressivity for efficiency. Domingos and Webb (2012) introduced a tractable subset of Markov Logic (TML) for which a future software release is planned. Formulating the inference problem in TML could potentially allow us to run our system on longer and more complex sentences.", "cite_spans": [ { "start": 245, "end": 269, "text": "(Copestake et al., 2005)", "ref_id": "BIBREF11" }, { "start": 356, "end": 380, "text": "Domingos and Webb (2012)", "ref_id": "BIBREF14" } ], "ref_spans": [], "eq_spans": [], "section": "Future Work", "sec_num": "6" }, { "text": "In this paper we have used an approach that combines logic-based and distributional representations for natural language meaning. It uses logic as the primary representation, transforms distributional similarity judgments to weighted inference rules, and uses Markov Logic Networks to perform inferences over the weighted clauses. This approach views textual entailment and sentence similarity as degrees of \"logical\" entailment, while at the same time using distributional similarity as an indicator of entailment at the word and short phrase level. We have evaluated the framework on two different tasks, RTE and STS, finding that it is able to handle both tasks given that we adapt the way evidence is combined in the MLN. Even though other entailment models could be applied to STS, given that similarity can obviously be operationalized as a degree of mutual entailment, this has not been done before to our best knowledge. Our framework achieves reasonable results on both tasks. On RTE-1 we obtain an accuracy of 0.57. On STS, we obtain a correlation of r = 0.66 with full logic, r = 0.73 in a system with weakened variable binding, and r = 0.85 in an ensemble model. We find that distributional word and phrase similarity, used as textual inference rules on the fly, leads to sizeable improvements on both tasks. We also find that using more flexible probabilistic combinations of evidence is crucial for STS.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion", "sec_num": "7" }, { "text": "We omit inference rules for words (a, b) where cos(a, b) < \u03b8 for a threshold \u03b8 set to maximize performance on the training data. Low-similarity pairs usually indicate dissimilar words. This removes a sizeable number of rules for STS, while for RTE the tuned threshold was near zero.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "It is customary to transform raw counts in a way that captures association between target words and dimensions, for example through point-wise mutual information(Lowe, 2001).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "One could also give mini-clauses different weights depending on their importance, but we have not experimented with this so far.4 However, it is not completely the same since we do not divide up formulas under negation into mini-clauses.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "On other RTE datasets there are higher previous results.Hickl (2008) achieves 0.89 accuracy and 0.88 cws on the combined RTE-2 and RTE-3 dataset.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null } ], "back_matter": [ { "text": "This research was supported in part by the NSF CA-REER grant IIS 0845925, by the DARPA DEFT program under AFRL grant FA8750-13-2-0026, by MURI ARO grant W911NF-08-1-0242 and by an NDSEG grant. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author and do not necessarily reflect the view of DARPA, AFRL, ARO, DoD or the US government.Some of our experiments were run on the Mastodon Cluster supported by NSF Grant EIA-0303609.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Acknowledgements", "sec_num": null } ], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "Semeval-2012 task 6: A pilot on semantic textual similarity", "authors": [ { "first": "Eneko", "middle": [], "last": "Agirre", "suffix": "" }, { "first": "Daniel", "middle": [], "last": "Cer", "suffix": "" }, { "first": "Mona", "middle": [], "last": "Diab", "suffix": "" }, { "first": "Aitor", "middle": [], "last": "Gonzalez-Agirre", "suffix": "" } ], "year": 2012, "venue": "Proceedings of SemEval", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Eneko Agirre, Daniel Cer, Mona Diab, and Aitor Gonzalez-Agirre. 2012. Semeval-2012 task 6: A pi- lot on semantic textual similarity. In Proceedings of SemEval.", "links": null }, "BIBREF1": { "ref_id": "b1", "title": "UKP: Computing semantic textual similarity by combining multiple content similarity measures", "authors": [ { "first": "Daniel", "middle": [], "last": "B\u00e4r", "suffix": "" }, { "first": "Chris", "middle": [], "last": "Biemann", "suffix": "" }, { "first": "Iryna", "middle": [], "last": "Gurevych", "suffix": "" }, { "first": "Torsten", "middle": [], "last": "Zesch", "suffix": "" } ], "year": 2012, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Daniel B\u00e4r, Chris Biemann, Iryna Gurevych, and Torsten Zesch. 2012. UKP: Computing semantic textual sim- ilarity by combining multiple content similarity mea- sures. In SemEval-2012.", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "Nouns are vectors, adjectives are matrices: Representing adjective-noun constructions in semantic space", "authors": [ { "first": "Marco", "middle": [], "last": "Baroni", "suffix": "" }, { "first": "Roberto", "middle": [], "last": "Zamparelli", "suffix": "" } ], "year": 2010, "venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "1183--1193", "other_ids": {}, "num": null, "urls": [], "raw_text": "Marco Baroni and Roberto Zamparelli. 2010. Nouns are vectors, adjectives are matrices: Representing adjective-noun constructions in semantic space. In Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing, pages 1183-1193, Cambridge, MA, October. Association for Computational Linguistics.", "links": null }, "BIBREF3": { "ref_id": "b3", "title": "MITREs Submissions to the EU Pascal RTE Challenge", "authors": [ { "first": "Samuel", "middle": [], "last": "Bayer", "suffix": "" }, { "first": "John", "middle": [], "last": "Burger", "suffix": "" }, { "first": "Lisa", "middle": [], "last": "Ferro", "suffix": "" }, { "first": "John", "middle": [], "last": "Henderson", "suffix": "" }, { "first": "Alexander", "middle": [], "last": "Yeh", "suffix": "" } ], "year": 2005, "venue": "Proceedings of the PASCAL Challenges Workshop on Recognising Textual Entailment", "volume": "", "issue": "", "pages": "41--44", "other_ids": {}, "num": null, "urls": [], "raw_text": "Samuel Bayer, John Burger, Lisa Ferro, John Hender- son, and Alexander Yeh. 2005. MITREs Submissions to the EU Pascal RTE Challenge. In In Proceedings of the PASCAL Challenges Workshop on Recognising Textual Entailment, pages 41-44.", "links": null }, "BIBREF4": { "ref_id": "b4", "title": "Global learning of typed entailment rules", "authors": [ { "first": "Jonathan", "middle": [], "last": "Berant", "suffix": "" }, { "first": "Ido", "middle": [], "last": "Dagan", "suffix": "" }, { "first": "Jacob", "middle": [], "last": "Goldberger", "suffix": "" } ], "year": 2011, "venue": "Proceedings of ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Jonathan Berant, Ido Dagan, and Jacob Goldberger. 2011. Global learning of typed entailment rules. In Proceedings of ACL, Portland, OR.", "links": null }, "BIBREF5": { "ref_id": "b5", "title": "Recognising textual entailment with logical inference", "authors": [ { "first": "Johan", "middle": [], "last": "Bos", "suffix": "" }, { "first": "Katja", "middle": [], "last": "Markert", "suffix": "" } ], "year": 2005, "venue": "Proceedings of EMNLP 2005", "volume": "", "issue": "", "pages": "628--635", "other_ids": {}, "num": null, "urls": [], "raw_text": "Johan Bos and Katja Markert. 2005. Recognising tex- tual entailment with logical inference. In Proceedings of EMNLP 2005, pages 628-635, Vancouver, B.C., Canada.", "links": null }, "BIBREF6": { "ref_id": "b6", "title": "Wide-coverage semantic analysis with boxer", "authors": [ { "first": "Johan", "middle": [], "last": "Bos", "suffix": "" } ], "year": 2008, "venue": "Conference Proceedings, Research in Computational Semantics", "volume": "", "issue": "", "pages": "277--286", "other_ids": {}, "num": null, "urls": [], "raw_text": "Johan Bos. 2008. Wide-coverage semantic analysis with boxer. In Johan Bos and Rodolfo Delmonte, editors, Semantics in Text Processing. STEP 2008 Conference Proceedings, Research in Computational Semantics, pages 277-286. College Publications.", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "Reranking bilingually extracted paraphrases using monolingual distributional similarity", "authors": [ { "first": "Chris", "middle": [], "last": "Tsz Ping Chan", "suffix": "" }, { "first": "Benjamin", "middle": [], "last": "Callison-Burch", "suffix": "" }, { "first": "", "middle": [], "last": "Van Durme", "suffix": "" } ], "year": 2011, "venue": "Proceedings of the GEMS 2011 Workshop on GEometrical Models of Natural Language Semantics", "volume": "", "issue": "", "pages": "33--42", "other_ids": {}, "num": null, "urls": [], "raw_text": "Tsz Ping Chan, Chris Callison-Burch, and Benjamin Van Durme. 2011. Reranking bilingually extracted paraphrases using monolingual distributional similar- ity. In Proceedings of the GEMS 2011 Workshop on GEometrical Models of Natural Language Semantics, pages 33-42, Edinburgh, UK.", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "Collecting highly parallel data for paraphrase evaluation", "authors": [ { "first": "L", "middle": [], "last": "David", "suffix": "" }, { "first": "William", "middle": [ "B" ], "last": "Chen", "suffix": "" }, { "first": "", "middle": [], "last": "Dolan", "suffix": "" } ], "year": 2011, "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "190--200", "other_ids": {}, "num": null, "urls": [], "raw_text": "David L. Chen and William B. Dolan. 2011. Collect- ing highly parallel data for paraphrase evaluation. In Proceedings of the 49th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 190-200, Portland, Oregon, USA, June.", "links": null }, "BIBREF9": { "ref_id": "b9", "title": "Parsing the WSJ using CCG and log-linear models", "authors": [ { "first": "Stephen", "middle": [], "last": "Clark", "suffix": "" }, { "first": "James", "middle": [ "R" ], "last": "Curran", "suffix": "" } ], "year": 2004, "venue": "Proceedings of ACL 2004", "volume": "", "issue": "", "pages": "104--111", "other_ids": {}, "num": null, "urls": [], "raw_text": "Stephen Clark and James R. Curran. 2004. Parsing the WSJ using CCG and log-linear models. In Proceed- ings of ACL 2004, pages 104-111, Barcelona, Spain.", "links": null }, "BIBREF10": { "ref_id": "b10", "title": "A context-theoretic framework for compositionality in distributional semantics", "authors": [ { "first": "Daoud", "middle": [], "last": "Clarke", "suffix": "" } ], "year": 2012, "venue": "Computational Linguistics", "volume": "", "issue": "1", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Daoud Clarke. 2012. A context-theoretic framework for compositionality in distributional semantics. Compu- tational Linguistics, 38(1).", "links": null }, "BIBREF11": { "ref_id": "b11", "title": "Minimal recursion semantics: An introduction", "authors": [ { "first": "Ann", "middle": [], "last": "Copestake", "suffix": "" }, { "first": "Dan", "middle": [], "last": "Flickinger", "suffix": "" }, { "first": "Carl", "middle": [], "last": "Pollard", "suffix": "" }, { "first": "Ivan", "middle": [ "A" ], "last": "Sag", "suffix": "" } ], "year": 2005, "venue": "Research on Language and Computation", "volume": "3", "issue": "2-3", "pages": "281--332", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ann Copestake, Dan Flickinger, Carl Pollard, and Ivan A Sag. 2005. Minimal recursion semantics: An intro- duction. Research on Language and Computation, 3(2-3):281-332.", "links": null }, "BIBREF12": { "ref_id": "b12", "title": "The PASCAL Recognising Textual Entailment Challenge", "authors": [ { "first": "Oren", "middle": [], "last": "Ido Dagan", "suffix": "" }, { "first": "Bernardo", "middle": [], "last": "Glickman", "suffix": "" }, { "first": "", "middle": [], "last": "Magnini", "suffix": "" } ], "year": 2005, "venue": "Proceedings of the PASCAL Challenges Workshop on Recognising Textual Entailment", "volume": "", "issue": "", "pages": "1--8", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ido Dagan, Oren Glickman, and Bernardo Magnini. 2005. The PASCAL Recognising Textual Entailment Challenge. In In Proceedings of the PASCAL Chal- lenges Workshop on Recognising Textual Entailment, pages 1-8.", "links": null }, "BIBREF13": { "ref_id": "b13", "title": "Markov Logic: An Interface Layer for Artificial Intelligence", "authors": [ { "first": "Pedro", "middle": [], "last": "Domingos", "suffix": "" }, { "first": "Daniel", "middle": [], "last": "Lowd", "suffix": "" } ], "year": 2009, "venue": "Synthesis Lectures on Artificial Intelligence and Machine Learning", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Pedro Domingos and Daniel Lowd. 2009. Markov Logic: An Interface Layer for Artificial Intelligence. Synthesis Lectures on Artificial Intelligence and Ma- chine Learning. Morgan & Claypool Publishers.", "links": null }, "BIBREF14": { "ref_id": "b14", "title": "A tractable first-order probabilistic logic", "authors": [ { "first": "Pedro", "middle": [], "last": "Domingos", "suffix": "" }, { "first": "Austin", "middle": [], "last": "Webb", "suffix": "" } ], "year": 2012, "venue": "Proceedings of the Twenty-Sixth National Conference on Artificial Intelligence", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Pedro Domingos and W Austin Webb. 2012. A tractable first-order probabilistic logic. In Proceedings of the Twenty-Sixth National Conference on Artificial Intel- ligence.", "links": null }, "BIBREF15": { "ref_id": "b15", "title": "A structured vector space model for word meaning in context", "authors": [ { "first": "Katrin", "middle": [], "last": "Erk", "suffix": "" }, { "first": "Sebastian", "middle": [], "last": "Pad\u00f3", "suffix": "" } ], "year": 2008, "venue": "Proceedings of EMNLP 2008", "volume": "", "issue": "", "pages": "897--906", "other_ids": {}, "num": null, "urls": [], "raw_text": "Katrin Erk and Sebastian Pad\u00f3. 2008. A structured vec- tor space model for word meaning in context. In Pro- ceedings of EMNLP 2008, pages 897-906, Honolulu, HI.", "links": null }, "BIBREF16": { "ref_id": "b16", "title": "Stochastic gradient boosting", "authors": [ { "first": "H", "middle": [], "last": "Jerome", "suffix": "" }, { "first": "", "middle": [], "last": "Friedman", "suffix": "" } ], "year": 2002, "venue": "Computational Statistics & Data Analysis", "volume": "38", "issue": "4", "pages": "367--378", "other_ids": {}, "num": null, "urls": [], "raw_text": "Jerome H Friedman. 2002. Stochastic gradient boosting. Computational Statistics & Data Analysis, 38(4):367- 378.", "links": null }, "BIBREF17": { "ref_id": "b17", "title": "Integrating logical representations with probabilistic information using markov logic", "authors": [ { "first": "Dan", "middle": [], "last": "Garrette", "suffix": "" }, { "first": "Katrin", "middle": [], "last": "Erk", "suffix": "" }, { "first": "Raymond", "middle": [], "last": "Mooney", "suffix": "" } ], "year": 2011, "venue": "Proceedings of IWCS", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Dan Garrette, Katrin Erk, and Raymond Mooney. 2011. Integrating logical representations with probabilistic information using markov logic. In Proceedings of IWCS, Oxford, UK.", "links": null }, "BIBREF18": { "ref_id": "b18", "title": "A formal approach to linking logical form and vectorspace lexical semantics", "authors": [ { "first": "Dan", "middle": [], "last": "Garrette", "suffix": "" }, { "first": "Katrin", "middle": [], "last": "Erk", "suffix": "" }, { "first": "Raymond", "middle": [], "last": "Mooney", "suffix": "" } ], "year": 2013, "venue": "Computing Meaning", "volume": "4", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Dan Garrette, Katrin Erk, and Raymond Mooney. 2013. A formal approach to linking logical form and vector- space lexical semantics. In Harry Bunt, Johan Bos, and Stephen Pulman, editors, Computing Meaning, Vol. 4.", "links": null }, "BIBREF19": { "ref_id": "b19", "title": "Introduction to Statistical Relational Learning", "authors": [], "year": 2007, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Lise Getoor and Ben Taskar, editors. 2007. Introduction to Statistical Relational Learning. MIT Press, Cam- bridge, MA.", "links": null }, "BIBREF20": { "ref_id": "b20", "title": "English Gigaword Third Edition", "authors": [ { "first": "David", "middle": [], "last": "Graff", "suffix": "" }, { "first": "Junbo", "middle": [], "last": "Kong", "suffix": "" }, { "first": "Ke", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Kazuaki", "middle": [], "last": "Maeda", "suffix": "" } ], "year": 2007, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "David Graff, Junbo Kong, Ke Chen, and Kazuaki Maeda. 2007. English Gigaword Third Edi- tion. http://www.ldc.upenn.edu/ Catalog/CatalogEntry.jsp?catalogId= LDC2007T07.", "links": null }, "BIBREF21": { "ref_id": "b21", "title": "Experimental support for a categorical compositional distributional model of meaning", "authors": [ { "first": "Edward", "middle": [], "last": "Grefenstette", "suffix": "" }, { "first": "Mehrnoosh", "middle": [], "last": "Sadrzadeh", "suffix": "" } ], "year": 2011, "venue": "Proceedings of EMNLP", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Edward Grefenstette and Mehrnoosh Sadrzadeh. 2011. Experimental support for a categorical compositional distributional model of meaning. In Proceedings of EMNLP, Edinburgh, Scotland, UK.", "links": null }, "BIBREF22": { "ref_id": "b22", "title": "Using Discourse Commitments to Recognize Textual Entailment", "authors": [ { "first": "Andrew", "middle": [], "last": "Hickl", "suffix": "" } ], "year": 2008, "venue": "Proceedings of COLING 2008", "volume": "", "issue": "", "pages": "337--344", "other_ids": {}, "num": null, "urls": [], "raw_text": "Andrew Hickl. 2008. Using Discourse Commitments to Recognize Textual Entailment. In Proceedings of COLING 2008, pages 337-344.", "links": null }, "BIBREF23": { "ref_id": "b23", "title": "Interpretation as abduction", "authors": [ { "first": "Jerry", "middle": [ "R" ], "last": "Hobbs", "suffix": "" }, { "first": "Mark", "middle": [], "last": "Stickel", "suffix": "" }, { "first": "Douglas", "middle": [], "last": "Appelt", "suffix": "" }, { "first": "Paul", "middle": [], "last": "Martin", "suffix": "" } ], "year": 1993, "venue": "Artificial Intelligence", "volume": "63", "issue": "1-2", "pages": "69--142", "other_ids": {}, "num": null, "urls": [], "raw_text": "Jerry R. Hobbs, Mark Stickel, Douglas Appelt, and Paul Martin. 1993. Interpretation as abduction. Artificial Intelligence, 63(1-2):69-142.", "links": null }, "BIBREF24": { "ref_id": "b24", "title": "From Discourse to Logic; An Introduction to Modeltheoretic Semantics of Natural Language, Formal Logic and DRT", "authors": [ { "first": "Hans", "middle": [], "last": "Kamp", "suffix": "" }, { "first": "Uwe", "middle": [], "last": "Reyle", "suffix": "" } ], "year": 1993, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Hans Kamp and Uwe Reyle. 1993. From Discourse to Logic; An Introduction to Modeltheoretic Semantics of Natural Language, Formal Logic and DRT. Kluwer, Dordrecht.", "links": null }, "BIBREF25": { "ref_id": "b25", "title": "The Alchemy system for statistical relational AI", "authors": [ { "first": "Stanley", "middle": [], "last": "Kok", "suffix": "" }, { "first": "Parag", "middle": [], "last": "Singla", "suffix": "" }, { "first": "Matthew", "middle": [], "last": "Richardson", "suffix": "" }, { "first": "Pedro", "middle": [], "last": "Domingos", "suffix": "" } ], "year": 2005, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Stanley Kok, Parag Singla, Matthew Richardson, and Pe- dro Domingos. 2005. The Alchemy system for sta- tistical relational AI. Technical report, Department of Computer Science and Engineering, University of Washington. http://www.cs.washington. edu/ai/alchemy.", "links": null }, "BIBREF26": { "ref_id": "b26", "title": "Directional distributional similarity for lexical inference", "authors": [ { "first": "Lili", "middle": [], "last": "Kotlerman", "suffix": "" }, { "first": "Ido", "middle": [], "last": "Dagan", "suffix": "" }, { "first": "Idan", "middle": [], "last": "Szpektor", "suffix": "" }, { "first": "Maayan", "middle": [], "last": "Zhitomirsky-Geffet", "suffix": "" } ], "year": 2010, "venue": "Natural Language Engineering", "volume": "16", "issue": "04", "pages": "359--389", "other_ids": {}, "num": null, "urls": [], "raw_text": "Lili Kotlerman, Ido Dagan, Idan Szpektor, and Maayan Zhitomirsky-Geffet. 2010. Directional distributional similarity for lexical inference. Natural Language En- gineering, 16(04):359-389.", "links": null }, "BIBREF27": { "ref_id": "b27", "title": "A solution to Platos problem: the latent semantic analysis theory of acquisition, induction, and representation of knowledge", "authors": [ { "first": "Thomas", "middle": [], "last": "Landauer", "suffix": "" }, { "first": "Susan", "middle": [], "last": "Dumais", "suffix": "" } ], "year": 1997, "venue": "Psychological Review", "volume": "104", "issue": "2", "pages": "211--240", "other_ids": {}, "num": null, "urls": [], "raw_text": "Thomas Landauer and Susan Dumais. 1997. A solution to Platos problem: the latent semantic analysis theory of acquisition, induction, and representation of knowl- edge. Psychological Review, 104(2):211-240.", "links": null }, "BIBREF28": { "ref_id": "b28", "title": "DIRT -discovery of inference rules from text", "authors": [ { "first": "Dekang", "middle": [], "last": "Lin", "suffix": "" }, { "first": "Patrick", "middle": [], "last": "Pantel", "suffix": "" } ], "year": 2001, "venue": "Proceedings of the ACM SIGKDD Conference on Knowledge Discovery and Data Mining", "volume": "", "issue": "", "pages": "323--328", "other_ids": {}, "num": null, "urls": [], "raw_text": "Dekang Lin and Patrick Pantel. 2001a. DIRT -discovery of inference rules from text. In In Proceedings of the ACM SIGKDD Conference on Knowledge Discovery and Data Mining, pages 323-328.", "links": null }, "BIBREF29": { "ref_id": "b29", "title": "Discovery of inference rules for question answering", "authors": [ { "first": "Dekang", "middle": [], "last": "Lin", "suffix": "" }, { "first": "Patrick", "middle": [], "last": "Pantel", "suffix": "" } ], "year": 2001, "venue": "Natural Language Engineering", "volume": "7", "issue": "4", "pages": "343--360", "other_ids": {}, "num": null, "urls": [], "raw_text": "Dekang Lin and Patrick Pantel. 2001b. Discovery of inference rules for question answering. Natural Lan- guage Engineering, 7(4):343-360.", "links": null }, "BIBREF30": { "ref_id": "b30", "title": "Towards a theory of semantic space", "authors": [ { "first": "Will", "middle": [], "last": "Lowe", "suffix": "" } ], "year": 2001, "venue": "Proceedings of the Cognitive Science Society", "volume": "", "issue": "", "pages": "576--581", "other_ids": {}, "num": null, "urls": [], "raw_text": "Will Lowe. 2001. Towards a theory of semantic space. In Proceedings of the Cognitive Science Society, pages 576-581.", "links": null }, "BIBREF31": { "ref_id": "b31", "title": "Producing high-dimensional semantic spaces from lexical cooccurrence", "authors": [ { "first": "Kevin", "middle": [], "last": "Lund", "suffix": "" }, { "first": "Curt", "middle": [], "last": "Burgess", "suffix": "" } ], "year": 1996, "venue": "Behavior Research Methods, Instruments, and Computers", "volume": "28", "issue": "", "pages": "203--208", "other_ids": {}, "num": null, "urls": [], "raw_text": "Kevin Lund and Curt Burgess. 1996. Producing high-dimensional semantic spaces from lexical co- occurrence. Behavior Research Methods, Instruments, and Computers, 28:203-208.", "links": null }, "BIBREF32": { "ref_id": "b32", "title": "Corpus-based and knowledge-based measures of text semantic similarity", "authors": [ { "first": "Rada", "middle": [], "last": "Mihalcea", "suffix": "" }, { "first": "Courtney", "middle": [], "last": "Corley", "suffix": "" }, { "first": "Carlo", "middle": [], "last": "Strapparava", "suffix": "" } ], "year": 2006, "venue": "Proceedings of the national conference on artificial intelligence", "volume": "21", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Rada Mihalcea, Courtney Corley, and Carlo Strapparava. 2006. Corpus-based and knowledge-based measures of text semantic similarity. In Proceedings of the na- tional conference on artificial intelligence, volume 21, page 775. Menlo Park, CA; Cambridge, MA; London;", "links": null }, "BIBREF33": { "ref_id": "b33", "title": "Vector-based models of semantic composition", "authors": [ { "first": "Jeff", "middle": [], "last": "Mitchell", "suffix": "" }, { "first": "Mirella", "middle": [], "last": "Lapata", "suffix": "" } ], "year": 2008, "venue": "Proceedings of ACL", "volume": "", "issue": "", "pages": "236--244", "other_ids": {}, "num": null, "urls": [], "raw_text": "Jeff Mitchell and Mirella Lapata. 2008. Vector-based models of semantic composition. In Proceedings of ACL, pages 236-244.", "links": null }, "BIBREF34": { "ref_id": "b34", "title": "Composition in distributional models of semantics", "authors": [ { "first": "Jeff", "middle": [], "last": "Mitchell", "suffix": "" }, { "first": "Mirella", "middle": [], "last": "Lapata", "suffix": "" } ], "year": 2010, "venue": "Cognitive Science", "volume": "34", "issue": "8", "pages": "1388--1429", "other_ids": {}, "num": null, "urls": [], "raw_text": "Jeff Mitchell and Mirella Lapata. 2010. Composition in distributional models of semantics. Cognitive Science, 34(8):1388-1429.", "links": null }, "BIBREF35": { "ref_id": "b35", "title": "Universal grammar", "authors": [ { "first": "Richard", "middle": [], "last": "Montague", "suffix": "" } ], "year": 1970, "venue": "Theoria", "volume": "36", "issue": "", "pages": "7--27", "other_ids": {}, "num": null, "urls": [], "raw_text": "Richard Montague. 1970. Universal grammar. Theoria, 36:373-398. Reprinted in Thomason (1974), pp 7-27.", "links": null }, "BIBREF36": { "ref_id": "b36", "title": "Exploiting causal independence in markov logic networks: Combining undirected and directed models", "authors": [ { "first": "Tushar", "middle": [], "last": "Sriraam Natarajan", "suffix": "" }, { "first": "Daniel", "middle": [], "last": "Khot", "suffix": "" }, { "first": "Prasad", "middle": [], "last": "Lowd", "suffix": "" }, { "first": "Kristian", "middle": [], "last": "Tadepalli", "suffix": "" }, { "first": "Jude", "middle": [], "last": "Kersting", "suffix": "" }, { "first": "", "middle": [], "last": "Shavlik", "suffix": "" } ], "year": 2010, "venue": "Proceedings of European Conference in Machine Learning (ECML)", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Sriraam Natarajan, Tushar Khot, Daniel Lowd, Prasad Tadepalli, Kristian Kersting, and Jude Shavlik. 2010. Exploiting causal independence in markov logic net- works: Combining undirected and directed models. In Proceedings of European Conference in Machine Learning (ECML), Barcelona, Spain.", "links": null }, "BIBREF37": { "ref_id": "b37", "title": "Dependency hashing for n-best ccg parsing", "authors": [ { "first": "Dominick", "middle": [], "last": "Ng", "suffix": "" }, { "first": "", "middle": [], "last": "James R Curran", "suffix": "" } ], "year": 2012, "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Dominick Ng and James R Curran. 2012. Dependency hashing for n-best ccg parsing. In In Proceedings of the 50th Annual Meeting of the Association for Com- putational Linguistics.", "links": null }, "BIBREF38": { "ref_id": "b38", "title": "Robust textual inference via learning and abductive reasoning", "authors": [ { "first": "Rajat", "middle": [], "last": "Raina", "suffix": "" }, { "first": "Andrew", "middle": [ "Y" ], "last": "Ng", "suffix": "" }, { "first": "Christopher", "middle": [ "D" ], "last": "Manning", "suffix": "" } ], "year": 2005, "venue": "Proceedings of AAAI", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Rajat Raina, Andrew Y. Ng, and Christopher D. Man- ning. 2005. Robust textual inference via learning and abductive reasoning. In Proceedings of AAAI.", "links": null }, "BIBREF40": { "ref_id": "b40", "title": "Markov logic networks", "authors": [], "year": null, "venue": "Machine Learning", "volume": "62", "issue": "", "pages": "107--136", "other_ids": {}, "num": null, "urls": [], "raw_text": "Markov logic networks. Machine Learning, 62:107- 136.", "links": null }, "BIBREF41": { "ref_id": "b41", "title": "Automatic word sense discrimination", "authors": [ { "first": "Hinrich", "middle": [], "last": "Sch\u00fctze", "suffix": "" } ], "year": 1998, "venue": "Computational Linguistics", "volume": "", "issue": "1", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Hinrich Sch\u00fctze. 1998. Automatic word sense discrimi- nation. Computational Linguistics, 24(1).", "links": null }, "BIBREF42": { "ref_id": "b42", "title": "Dynamic pooling and unfolding recursive autoencoders for paraphrase detection", "authors": [ { "first": "Richard", "middle": [], "last": "Socher", "suffix": "" }, { "first": "Eric", "middle": [], "last": "Huang", "suffix": "" }, { "first": "Jeffrey", "middle": [], "last": "Pennin", "suffix": "" }, { "first": "Andrew", "middle": [], "last": "Ng", "suffix": "" }, { "first": "Christopher", "middle": [], "last": "Manning", "suffix": "" } ], "year": 2011, "venue": "Proceedings of NIPS", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Richard Socher, Eric Huang, Jeffrey Pennin, Andrew Ng, and Christopher Manning. 2011. Dynamic pooling and unfolding recursive autoencoders for paraphrase detection. In J. Shawe-Taylor, R.S. Zemel, P. Bartlett, F.C.N. Pereira, and K.Q. Weinberger, editors, Pro- ceedings of NIPS.", "links": null }, "BIBREF43": { "ref_id": "b43", "title": "Knowledge and tree-edits in learnable entailment proofs", "authors": [ { "first": "Asher", "middle": [], "last": "Stern", "suffix": "" }, { "first": "Amnon", "middle": [], "last": "Lotan", "suffix": "" }, { "first": "Shachar", "middle": [], "last": "Mirkin", "suffix": "" }, { "first": "Eyal", "middle": [], "last": "Shnarch", "suffix": "" }, { "first": "Lili", "middle": [], "last": "Kotlerman", "suffix": "" }, { "first": "Jonathan", "middle": [], "last": "Berant", "suffix": "" }, { "first": "Ido", "middle": [], "last": "Dagan", "suffix": "" } ], "year": 2011, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Asher Stern, Amnon Lotan, Shachar Mirkin, Eyal Shnarch, Lili Kotlerman, Jonathan Berant, and Ido Da- gan. 2011. Knowledge and tree-edits in learnable en- tailment proofs. In TAC, Gathersburg, MD.", "links": null }, "BIBREF44": { "ref_id": "b44", "title": "Learning entailment rules for unary templates", "authors": [ { "first": "Idan", "middle": [], "last": "Szpektor", "suffix": "" }, { "first": "Ido", "middle": [], "last": "Dagan", "suffix": "" } ], "year": 2008, "venue": "Proceedings of COLING", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Idan Szpektor and Ido Dagan. 2008. Learning entail- ment rules for unary templates. In Proceedings of COLING.", "links": null }, "BIBREF45": { "ref_id": "b45", "title": "Contextualizing semantic representations using syntactically enriched vector models", "authors": [ { "first": "Stefan", "middle": [], "last": "Thater", "suffix": "" }, { "first": "Hagen", "middle": [], "last": "F\u00fcrstenau", "suffix": "" }, { "first": "Manfred", "middle": [], "last": "Pinkal", "suffix": "" } ], "year": 2010, "venue": "Proceedings of ACL 2010", "volume": "", "issue": "", "pages": "948--957", "other_ids": {}, "num": null, "urls": [], "raw_text": "Stefan Thater, Hagen F\u00fcrstenau, and Manfred Pinkal. 2010. Contextualizing semantic representations using syntactically enriched vector models. In Proceedings of ACL 2010, pages 948-957, Uppsala, Sweden.", "links": null }, "BIBREF46": { "ref_id": "b46", "title": "Formal Philosophy. Selected Papers of Richard Montague", "authors": [], "year": 1974, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Richmond H. Thomason, editor. 1974. Formal Philoso- phy. Selected Papers of Richard Montague. Yale Uni- versity Press, New Haven.", "links": null }, "BIBREF47": { "ref_id": "b47", "title": "From frequency to meaning: Vector space models of semantics", "authors": [ { "first": "Peter", "middle": [], "last": "Turney", "suffix": "" }, { "first": "Patrick", "middle": [], "last": "Pantel", "suffix": "" } ], "year": 2010, "venue": "Journal of Artificial Intelligence Research", "volume": "37", "issue": "", "pages": "141--188", "other_ids": {}, "num": null, "urls": [], "raw_text": "Peter Turney and Patrick Pantel. 2010. From frequency to meaning: Vector space models of semantics. Jour- nal of Artificial Intelligence Research, 37:141-188.", "links": null } }, "ref_entries": { "FIGREF0": { "type_str": "figure", "num": null, "text": "8x hamster(x) ! gerbil(x) | f (w)Turning distributional similarity into a weighted inference rule els", "uris": null } } } }