| { |
| "paper_id": "D18-1007", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:51:10.357944Z" |
| }, |
| "title": "Collecting Diverse Natural Language Inference Problems for Sentence Representation Evaluation", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Poliak", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Johns Hopkins University", |
| "location": { |
| "addrLine": "2 BITS Pilani", |
| "settlement": "Goa Campus", |
| "country": "India" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Aparajita", |
| "middle": [], |
| "last": "Haldar", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Johns Hopkins University", |
| "location": { |
| "addrLine": "2 BITS Pilani", |
| "settlement": "Goa Campus", |
| "country": "India" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Rudinger", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Johns Hopkins University", |
| "location": { |
| "addrLine": "2 BITS Pilani", |
| "settlement": "Goa Campus", |
| "country": "India" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "Edward" |
| ], |
| "last": "Hu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Johns Hopkins University", |
| "location": { |
| "addrLine": "2 BITS Pilani", |
| "settlement": "Goa Campus", |
| "country": "India" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ellie", |
| "middle": [], |
| "last": "Pavlick", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Brown University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [ |
| "Steven" |
| ], |
| "last": "White", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Rochester", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Johns Hopkins University", |
| "location": { |
| "addrLine": "2 BITS Pilani", |
| "settlement": "Goa Campus", |
| "country": "India" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We present a large scale collection of diverse natural language inference (NLI) datasets that help provide insight into how well a sentence representation captures distinct types of reasoning. The collection results from recasting 13 existing datasets from 7 semantic phenomena into a common NLI structure, resulting in over half a million labeled context-hypothesis pairs in total. We refer to our collection as the DNC: Diverse Natural Language Inference Collection.", |
| "pdf_parse": { |
| "paper_id": "D18-1007", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We present a large scale collection of diverse natural language inference (NLI) datasets that help provide insight into how well a sentence representation captures distinct types of reasoning. The collection results from recasting 13 existing datasets from 7 semantic phenomena into a common NLI structure, resulting in over half a million labeled context-hypothesis pairs in total. We refer to our collection as the DNC: Diverse Natural Language Inference Collection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "A plethora of new natural language inference (NLI) 1 datasets has been created in recent years (Bowman et al., 2015; Williams et al., 2017; Lai et al., 2017; Khot et al., 2018) . However, these datasets do not provide clear insight into what type of reasoning or inference a model may be performing. For example, these datasets cannot be used to evaluate whether competitive NLI models can determine if an event occurred, correctly differentiate between figurative and literal language, or accurately identify and categorize named entities. Consequently, these datasets cannot answer how well sentence representation learning models capture distinct semantic phenomena necessary for general natural language understanding (NLU).", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 116, |
| "text": "(Bowman et al., 2015;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 117, |
| "end": 139, |
| "text": "Williams et al., 2017;", |
| "ref_id": "BIBREF75" |
| }, |
| { |
| "start": 140, |
| "end": 157, |
| "text": "Lai et al., 2017;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 158, |
| "end": 176, |
| "text": "Khot et al., 2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To answer these questions, we introduce the Diverse NLI Collection (DNC), a large-scale NLI dataset that tests a model's ability to perform diverse types of reasoning. DNC is a collection of NLI problems, each requiring a model to perform 1 The task of determining if a hypothesis would likely be inferred from a context, or premise; also known as Recognizing Textual Entailment (RTE) (Dagan et al., 2006 (Dagan et al., , 2013 indicates the line is a context and the following line is its corresponding hypothesis. and respectively indicate that the context entails, or does not entail the hypothesis. Appendix A includes more recast examples. a unique type of reasoning. Each NLI dataset contains labeled context-hypothesis pairs that we recast from semantic annotations for specific structured prediction tasks. We extend various prior works on challenge NLI datasets (Zhang et al., 2017) , and define recasting as leveraging existing datasets to create NLI examples (Glickman, 2006; White et al., 2017) . We recast annotations from a total of 13 datasets across 7 NLP tasks into labeled NLI examples. The tasks include event factuality, named entity recognition, gendered anaphora resolution, sentiment analysis, relationship extraction, pun detection, and lexicosyntactic inference. Currently, the DNC contains over half a million labeled examples. Table 1 includes NLI pairs that test specific types of reasoning.", |
| "cite_spans": [ |
| { |
| "start": 239, |
| "end": 240, |
| "text": "1", |
| "ref_id": null |
| }, |
| { |
| "start": 385, |
| "end": 404, |
| "text": "(Dagan et al., 2006", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 405, |
| "end": 426, |
| "text": "(Dagan et al., , 2013", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 870, |
| "end": 890, |
| "text": "(Zhang et al., 2017)", |
| "ref_id": "BIBREF79" |
| }, |
| { |
| "start": 969, |
| "end": 985, |
| "text": "(Glickman, 2006;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 986, |
| "end": 1005, |
| "text": "White et al., 2017)", |
| "ref_id": "BIBREF71" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1353, |
| "end": 1360, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Using a hypothesis-only NLI model, with access to just hypothesis sentences, as a strong baseline (Tsuchiya, 2018; Gururangan et al., 2018; Poliak et al., 2018b) , our experiments demonstrate how DNC can be used to probe a model's ability to capture different types of semantic reasoning necessary for general NLU. In short, this work answers a recent plea to the community to test \"more kinds of inference\" than in previous challenge sets (Chatzikyriakidis et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 98, |
| "end": 114, |
| "text": "(Tsuchiya, 2018;", |
| "ref_id": "BIBREF66" |
| }, |
| { |
| "start": 115, |
| "end": 139, |
| "text": "Gururangan et al., 2018;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 140, |
| "end": 161, |
| "text": "Poliak et al., 2018b)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 440, |
| "end": 471, |
| "text": "(Chatzikyriakidis et al., 2017)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Compared to eliciting NLI datasets directly, i.e. asking humans to author contexts and/or hypothesis sentences, recasting can 1) help determine whether an NLU model performs distinct types of reasoning; 2) limit types of biases observed in previous NLI data; and 3) generate examples cheaply, potentially at large scales.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation & Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "NLU Insights Popular NLI datasets, e.g. Stanford Natural Language Inference (SNLI) (Bowman et al., 2015) and its successor Multi-NLI (Williams et al., 2017) , were created by eliciting hypotheses from humans. Crowd-source workers were tasked with writing one sentence each that is entailed, neutral, and contradicted by a caption extracted from the Flickr30k corpus (Young et al., 2014) . Although these datasets are widely used to train and evaluate sentence representations, a high accuracy is not indicative of what types of reasoning NLI models perform. Workers were free to create any type of hypothesis for each context and label. Such datasets cannot be used to determine how well an NLI model captures many desired capabilities of language understanding systems, e.g. paraphrastic inference, complex anaphora resolution (White et al., 2017), or compositionality (Pavlick and Callison-Burch, 2016; Dasgupta et al., 2018) . By converting prior annotation of a specific phenomenon into NLI examples, recasting allows us to create a diverse NLI benchmark that tests a model's ability to perform distinct types of reasoning.", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 104, |
| "text": "(Bowman et al., 2015)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 133, |
| "end": 156, |
| "text": "(Williams et al., 2017)", |
| "ref_id": "BIBREF75" |
| }, |
| { |
| "start": 366, |
| "end": 386, |
| "text": "(Young et al., 2014)", |
| "ref_id": "BIBREF78" |
| }, |
| { |
| "start": 870, |
| "end": 904, |
| "text": "(Pavlick and Callison-Burch, 2016;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 905, |
| "end": 927, |
| "text": "Dasgupta et al., 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation & Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Limit Biases Studies indicate that many NLI datasets contain significant biases. Examples in the early Pascal RTE datasets could be correctly predicted based on syntax alone (Vanderwende and Dolan, 2006; . Statistical irregularities, and annotation artifacts, within class labels allow a hypothesis-only model to significantly outperform the majority baseline on at least six recent NLI datasets (Poliak et al., 2018b) . Class label biases may be attributed to the human-elicited protocol. Moreover, examples in such NLI datasets may contain racial and gendered stereotypes .", |
| "cite_spans": [ |
| { |
| "start": 174, |
| "end": 203, |
| "text": "(Vanderwende and Dolan, 2006;", |
| "ref_id": "BIBREF68" |
| }, |
| { |
| "start": 396, |
| "end": 418, |
| "text": "(Poliak et al., 2018b)", |
| "ref_id": "BIBREF51" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation & Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We limit some biases by not relying on humans to generate hypotheses. Recast NLI datasets may still contain some biases, e.g. non-uniform distributions over NLI labels caused by the distribution of labels in the original dataset that we recast. 2 Experimental results using Poliak et al. (2018b) 's hypothesis-only model indicate to what degree the recast datasets retain some biases that may be present in the original semantic datasets.", |
| "cite_spans": [ |
| { |
| "start": 274, |
| "end": 295, |
| "text": "Poliak et al. (2018b)", |
| "ref_id": "BIBREF51" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation & Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "NLI Examples at Large-scale Generating NLI datasets from scratch is costly. Humans must be paid to generate or label natural language text. This linearly scales costs as the amount of generated NLI-pairs increases. Existing annotations for a wide array of semantic NLP tasks are freely available. By leveraging existing semantic annotations already invested in by the community we can generate and label NLI pairs at little cost and create large NLI datasets to train data hungry models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation & Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Why These Semantic Phenomena? A long term goal is to develop NLU systems that can achieve human levels of understanding and reasoning. Investigating how different architectures and training corpora can help a system perform human-level general NLU is an important step in this direction. DNC contains recast NLI pairs that are easily understandable by humans and can be used to evaluate different sentence encoders and NLU systems. These semantic phenomena cover distinct types of reasoning that an NLU system may often encounter in the wild. While higher performance on these benchmarks might not be conclusive proof of a system achieving human-level reasoning, a system that does poorly should not be viewed as performing human-level NLU. We argue that these semantic phenomena play integral roles in NLU. There exist more semantic phenomena integral to NLU (Allen, 1995) and we plan to include them in future versions of the DNC.", |
| "cite_spans": [ |
| { |
| "start": 860, |
| "end": 873, |
| "text": "(Allen, 1995)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation & Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Previous Recast NLI Example sentences in RTE1 (Dagan et al., 2006) were extracted from MT, IE, and QA datasets, with the process referred to as 'recasting' in the thesis by Glickman (2006) . NLU problems were reframed under the NLI framework and candidate sentence pairs were extracted from existing NLP datasets and then labeled under NLI (Dagan et al., 2006) . Years later, this term was independently used by White et al.", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 66, |
| "text": "RTE1 (Dagan et al., 2006)", |
| "ref_id": null |
| }, |
| { |
| "start": 173, |
| "end": 188, |
| "text": "Glickman (2006)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 336, |
| "end": 360, |
| "text": "NLI (Dagan et al., 2006)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation & Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "(2017), who proposed to \"leverage existing largescale semantic annotation collections as a source of targeted textual inference examples.\" The term 'recasting' was limited to automatically converting existing semantic annotations into labeled NLI examples without manual intervention. We adopt the broader definition of 'recasting' since our NLI examples were automatically or manually generated from prior NLU datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation & Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Traditionally, NLI has not been viewed as a downstream, applied NLP task. 3 Instead, the community has often used it as \"a generic evaluation framework\" to compare models for distinct downstream tasks (Dagan et al., 2006) or to determine whether a model performs distinct types of reasoning (Cooper et al., 1996) . These two different evaluation goals may affect which datasets are recast. We target both goals as we recast applied tasks and linguistically focused phenomena.", |
| "cite_spans": [ |
| { |
| "start": 201, |
| "end": 221, |
| "text": "(Dagan et al., 2006)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 291, |
| "end": 312, |
| "text": "(Cooper et al., 1996)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Applied Framework versus Inference Probing", |
| "sec_num": null |
| }, |
| { |
| "text": "We describe efforts to recast 7 semantic phenomena from a total of 13 datasets into labeled NLI examples. Many of the recasting methods rely on simple templates that do not include nuances and variances typical of natural language. This allows us to specifically test how sentence representations capture distinct types of reasoning. When recasting, we preserve each dataset's train/dev/test split. If a dataset does not contain such a split, we create a random split with roughly a 80:10:10 ratio. Table 2 reports statistics about each recast dataset.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 499, |
| "end": 506, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Recasting Semantic Phenomena", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Event Factuality (EF) Event factuality prediction is the task of determining whether an event described in text occurred. Determining whether an event occurred enables accurate inferences, e.g. monotonic inferences, based on the event . 4 Incorporating factuality has been shown to improve NLI (Sauri and Pustejovsky, 2007) .", |
| "cite_spans": [ |
| { |
| "start": 237, |
| "end": 238, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 294, |
| "end": 323, |
| "text": "(Sauri and Pustejovsky, 2007)", |
| "ref_id": "BIBREF60" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recasting Semantic Phenomena", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We recast event factuality annotations from UW (Lee et al., 2015) , MEANTIME (Minard et al., 2016) , and Decomp . We use sentences from original datasets as contexts and templates (1a) and (1b) as hypotheses. 5 (1) a. The Event happened b. The Event did not happen If the predicate denoting the Event was annotated as having happened in the factuality dataset, the context paired with (1a) is labeled as ENTAILED and the same context paired with (1b) is labeled as NOT-ENTAILED. Otherwise, we swap the labels.", |
| "cite_spans": [ |
| { |
| "start": 47, |
| "end": 65, |
| "text": "(Lee et al., 2015)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 77, |
| "end": 98, |
| "text": "(Minard et al., 2016)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 209, |
| "end": 210, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recasting Semantic Phenomena", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Named Entity Recognition (NER) Distinct types of entities have different properties and relational objects (Prince, 1978) that can help infer facts from a given context. For example, if a system can detect that an entity is a name of a nation, then that entity likely has a leader, a language, and a culture (Prince, 1978; Van Durme, 2010) . When classifying NLI pairs, a model can determine if an object mentioned in the hypothesis can be a relational object typically associated with the type of entity described in the context. NER tags can also be directly used to determine if a hypothesis is likely to not be entailed by a context, such as when entities in contexts and hypotheses do not share NER tags (Castillo and Alemany, 2008; Sammons et al., 2009; Pakray et al., 2010) . Given a sentence annotated with NER tags, we recast the annotations by preserving the original sentences as contexts and creating hypotheses using the template \"NP is a Label.\" 6 For ENTAILED hypotheses we replace Label with the correct NER label of the NP; for NOT-ENTAILED hypotheses, we choose an incorrect label from the prior distribution of NER tags for the given phrase. This prevents us from adding additional biases besides any class-label statistical irregularities present in the original data. We apply this procedure on the Gronigen Meaning Bank (Bos et al., 2017) and the ConLL-2003 Shared Task (Tjong Kim Sang and De Meulder, 2003) .", |
| "cite_spans": [ |
| { |
| "start": 107, |
| "end": 121, |
| "text": "(Prince, 1978)", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 308, |
| "end": 322, |
| "text": "(Prince, 1978;", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 323, |
| "end": 339, |
| "text": "Van Durme, 2010)", |
| "ref_id": "BIBREF67" |
| }, |
| { |
| "start": 709, |
| "end": 737, |
| "text": "(Castillo and Alemany, 2008;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 738, |
| "end": 759, |
| "text": "Sammons et al., 2009;", |
| "ref_id": "BIBREF59" |
| }, |
| { |
| "start": 760, |
| "end": 780, |
| "text": "Pakray et al., 2010)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 1342, |
| "end": 1360, |
| "text": "(Bos et al., 2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1392, |
| "end": 1429, |
| "text": "(Tjong Kim Sang and De Meulder, 2003)", |
| "ref_id": "BIBREF65" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recasting Semantic Phenomena", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The ability to perform pronoun resolution is essential to language understanding, in many cases requiring common-sense reasoning about the world (Levesque et al., 2012) . White et al. (2017) show that this task can be directly recast as an NLI problem by transforming Winograd schemas into NLI sentence pairs.", |
| "cite_spans": [ |
| { |
| "start": 145, |
| "end": 168, |
| "text": "(Levesque et al., 2012)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 171, |
| "end": 190, |
| "text": "White et al. (2017)", |
| "ref_id": "BIBREF71" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gendered Anaphora Resolution (GAR)", |
| "sec_num": null |
| }, |
| { |
| "text": "Using a similar formula Rudinger et al. (2018a) introduce Winogender schemas, minimal sentence pairs that differ only by pronoun gender. With this Sem. Phenomena Dataset # pairs Automated Decomp 42K (41,888) UW (Lee et al., 2015) 5K (5,094) Event Factuality", |
| "cite_spans": [ |
| { |
| "start": 24, |
| "end": 47, |
| "text": "Rudinger et al. (2018a)", |
| "ref_id": "BIBREF57" |
| }, |
| { |
| "start": 211, |
| "end": 229, |
| "text": "(Lee et al., 2015)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gendered Anaphora Resolution (GAR)", |
| "sec_num": null |
| }, |
| { |
| "text": "MeanTime (Minard et al., 2016) .7K 738Groningen (Bos et al., 2017) 260K (261,406) Named Entity Recognition CoNLL (Tjong Kim Sang and De Meulder, 2003) 60K 59,970Gendered Anaphora Winogender (Rudinger et al., 2018a) .4K 464VerbCorner ( Table 2 : Statistics summarizing the recast datasets. The first column refers to the original annotation that was recast, the 'Combined' row refers to the combination of our recast datasets. The second column indicates the datasets that were recast, and the 3rd column reports how many labeled NLI pairs were extracted from the corresponding dataset. The last column indicates whether the recasting method was fully-automatic without human involvement (), manual (), or used a semi-automatic method that included human intervention (). The Multi-NLI and SNLI numbers contextualize the scale of our dataset.", |
| "cite_spans": [ |
| { |
| "start": 9, |
| "end": 30, |
| "text": "(Minard et al., 2016)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 48, |
| "end": 66, |
| "text": "(Bos et al., 2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 113, |
| "end": 150, |
| "text": "(Tjong Kim Sang and De Meulder, 2003)", |
| "ref_id": "BIBREF65" |
| }, |
| { |
| "start": 190, |
| "end": 214, |
| "text": "(Rudinger et al., 2018a)", |
| "ref_id": "BIBREF57" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 235, |
| "end": 242, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Gendered Anaphora Resolution (GAR)", |
| "sec_num": null |
| }, |
| { |
| "text": "adapted pronoun resolution task, they demonstrate the presence of systematic gender bias in coreference resolution systems. We recast Winogender schemas as an NLI task, introducing a potential method of detecting gender bias in NLI systems or sentence embeddings. In recasting, the context is the original, unmodified Winogender sentence; the hypothesis is a short, manually constructed sentence having a correct (ENTAILED) or incorrect (NOT-ENTAILED) pronoun resolution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gendered Anaphora Resolution (GAR)", |
| "sec_num": null |
| }, |
| { |
| "text": "Lexicosyntactic Inference (Lex) While many inferences in natural language are triggered by lexical items alone, there exist pervasive inferences that arise from interactions between lexical items and their syntactic contexts. This is particularly apparent among propositional attitude verbs -e.g. think, want, know -which display complex distributional profiles (White and Rawlins, 2016). For instance, the verb remember can take both finite clausal complements and infinitival clausal complements.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gendered Anaphora Resolution (GAR)", |
| "sec_num": null |
| }, |
| { |
| "text": "(2) a. Jo didn't remember that she ate b. Jo didn't remember to eat", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gendered Anaphora Resolution (GAR)", |
| "sec_num": null |
| }, |
| { |
| "text": "This small change in the syntactic structure gives rise to large changes in the inferences that are licensed: (2a) presupposes that Jo ate while (2b) entails that Jo didn't eat. We recast data from three datasets that are relevant to these sorts of lexicosyntactic interactions. 7 They then asked annotators to answer questions of the form in (3) using three possible responses: yes, maybe or maybe not, and no (Karttunen et al., 2014) .", |
| "cite_spans": [ |
| { |
| "start": 279, |
| "end": 280, |
| "text": "7", |
| "ref_id": null |
| }, |
| { |
| "start": 411, |
| "end": 435, |
| "text": "(Karttunen et al., 2014)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gendered Anaphora Resolution (GAR)", |
| "sec_num": null |
| }, |
| { |
| "text": "(3) a. Someone {knew, didn't know} that a particular thing happened. b. Did that thing happen?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gendered Anaphora Resolution (GAR)", |
| "sec_num": null |
| }, |
| { |
| "text": "We use the same procedure to annotate sentences containing verbs that take various types of infinitival complement: [NP _ for NP to VP], [NP _ to VP], [NP _ NP to VP], and [NP was _ed to VP]. 8 To recast these annotations, we assign the context sentences like (3a) to the majority class -yes, maybe or maybe not, no -across 10 different annotators, after applying an ordinal model-based normalization to their responses. We then pair each context sentence with three hypotheses. If annotated yes, maybe or maybe not, or no, the pair (3a)-(4a), (3a)-(4b), or (3a)-(4c) is respectively assigned ENTAILED and the other pairings are assigned NOT-ENTAILED; train/dev/test split labels are randomly assigned to every pair that context sentence appears in.", |
| "cite_spans": [ |
| { |
| "start": 192, |
| "end": 193, |
| "text": "8", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gendered Anaphora Resolution (GAR)", |
| "sec_num": null |
| }, |
| { |
| "text": "Lex #2: Recasting VerbNet (VN) We create additional lexicosyntactic NLI examples from Verb-Net (Schuler, 2005) . VerbNet contains classes of verbs that each can have multiple frames. Each frame contains a mapping from syntactic arguments to thematic roles, which are used as arguments in Neo-Davidsonian first-order logical predicates (5b) that describe the frame's semantics. Each frame additionally contains an example sentence (5a) that we use as our NLI context and we create templates (5c) from the most frequent semantic predicates to generate hypotheses (5d). We use the Berkeley Parser (Petrov et al., 2006) to match tokens in an example sentence with the thematic roles and then fill in the templates with the matched tokens (5d). We also decompose multi-argument predicates into unary predicates to increase the number of hypotheses we generate. On average, each context is paired with 4.5 hypotheses. We generate NOT-ENTAILED hypotheses by filling in templates with incorrect thematic roles. 9 We partition the recast NLI examples into train/development/test splits such that all example sentences from a VerbNet class (which we use a NLI hypothesis) appear in only one partition of our dataset. In turn, the recast VerbNet dataset's partition is not exactly 80:10:10. et al., 2013) . Each sentence in VC is judged based on the decomposed semantic properties. We convert each semantic property into declarative statements 10 to create hypotheses and pair them with the original sentences which we preserve as contexts. The NLI pair is ENTAILED or NOT-ENTAILED depending on the given sentence's semantic judgment. (Reyes et al., 2012) . Puns are prime examples of figurative language that may perplex general NLU systems as they are one of the more regular uses of linguistic ambiguity (Binsted, 1996) and rely on a wide-range of phonetic, morphological, syntactic, and semantic ambiguity (Pepicello and Green, 1984; Binsted, 1996; Bekinschtein et al., 2011) .", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 110, |
| "text": "(Schuler, 2005)", |
| "ref_id": "BIBREF62" |
| }, |
| { |
| "start": 594, |
| "end": 615, |
| "text": "(Petrov et al., 2006)", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 1003, |
| "end": 1004, |
| "text": "9", |
| "ref_id": null |
| }, |
| { |
| "start": 1280, |
| "end": 1293, |
| "text": "et al., 2013)", |
| "ref_id": null |
| }, |
| { |
| "start": 1624, |
| "end": 1644, |
| "text": "(Reyes et al., 2012)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 1796, |
| "end": 1811, |
| "text": "(Binsted, 1996)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1899, |
| "end": 1926, |
| "text": "(Pepicello and Green, 1984;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 1927, |
| "end": 1941, |
| "text": "Binsted, 1996;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1942, |
| "end": 1968, |
| "text": "Bekinschtein et al., 2011)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gendered Anaphora Resolution (GAR)", |
| "sec_num": null |
| }, |
| { |
| "text": "We recast puns from Yang et al. 2015and Miller et al. (2017) using templates to generate contexts (6a) and hypotheses (6b), (6c). We replace Name with names sampled from a distribution based on US census data, 11 and Pun with the original sentence. If the original sentence was labeled as containing a pun, the (6a)-(6b) pair is labeled as ENTAILED and (6a)-(6c) is labeled as NOT-ENTAILED, otherwise we swap the labels. Relation Extraction (RE) The goal of the relation extraction (RE) task is to infer the real-world relationships between pairs of entities from natural language text. The task is \"grounded\" in the sense that the input is natural language text and the output is entity1, relation, entity2 tuples defined in the schema of some knowledge base. RE requires a system to understand the many different surface forms which may entail the same underlying relation, and to distinguish those from surface forms which involve the same entities but do not entail the relation of interest. For example, (7a) is entailed by (7b) and (7c) but not by (7d).", |
| "cite_spans": [ |
| { |
| "start": 40, |
| "end": 60, |
| "text": "Miller et al. (2017)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 210, |
| "end": 212, |
| "text": "11", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gendered Anaphora Resolution (GAR)", |
| "sec_num": null |
| }, |
| { |
| "text": "(7) a. Name was born in Place b. Name is from Place c. Name, a Place native, . . .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gendered Anaphora Resolution (GAR)", |
| "sec_num": null |
| }, |
| { |
| "text": "Natural language surface forms are often used in RE in a weak-supervision setting (Mintz et al., 2009; Hoffmann et al., 2011; Riedel et al., 2013) . That is, if entity1 and entity2 are known to be related by relation, it is assumed that every sentence observed which mentions both entity1 and entity2 is assumed to be a realization of relation: i.e. (7d) would (falsely) be taken as evidence of the birthPlace relation.", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 102, |
| "text": "(Mintz et al., 2009;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 103, |
| "end": 125, |
| "text": "Hoffmann et al., 2011;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 126, |
| "end": 146, |
| "text": "Riedel et al., 2013)", |
| "ref_id": "BIBREF55" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "d. Name visited Place", |
| "sec_num": null |
| }, |
| { |
| "text": "Here we first generate hypotheses and then corresponding contexts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "d. Name visited Place", |
| "sec_num": null |
| }, |
| { |
| "text": "To generate hypotheses, we begin with entity-relation triples extracted from DBPedia infoboxes: e.g. Barack Obama, birthPlace, Hawaii . These relation predicates were extracted directly from Wikipedia infoboxes and are not cleaned. As a result, many relations are redundant with one another (birthPlace, hometown) and some relations do not correspond to obvious natural language glosses based on the name alone (demographics1Info). Thus, we construct a template for each predicate p by manually inspecting 1) a sample of entities which are related by p 2) a sample of sentences in which those entities co-occur and 3) the most frequent natural language strings which join entities related by p according to a OpenIE triple database (Schmitz et al., 2012; Fader et al., 2011) extracted from a large text corpus. We then manually write a simple template (e.g. Mention1 was born in Mention2) for p, ignoring any unclear relations. In total, we end up with 574 unique relations, expressed by 354 unique templates.", |
| "cite_spans": [ |
| { |
| "start": 732, |
| "end": 754, |
| "text": "(Schmitz et al., 2012;", |
| "ref_id": "BIBREF61" |
| }, |
| { |
| "start": 755, |
| "end": 774, |
| "text": "Fader et al., 2011)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "d. Name visited Place", |
| "sec_num": null |
| }, |
| { |
| "text": "For each such hypothesis generated, we create a number of contexts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "d. Name visited Place", |
| "sec_num": null |
| }, |
| { |
| "text": "We begin with the FACC1 corpus (Gabrilovich et al., 2013) which contains natural language sentences from ClueWeb in which entities have been automatically linked to disambiguated Freebase entities, when possible.", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 57, |
| "text": "(Gabrilovich et al., 2013)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "d. Name visited Place", |
| "sec_num": null |
| }, |
| { |
| "text": "Then, given a tuple entity1, relation, entity2 , we find every sentence which contains both entity1 and entity2. Since many of these sentences are false positives (7d), we have human annotators vet each context/hypothesis pair, using the ordinal entailment scale described in Zhang et al. (2017) . We include optional binary labels by converting pairs labeled as 1 \u2212 4 and 5 to ENTAILED and NOT-ENTAILED respectively. 12 We apply pruning methods (described in Appendix B.4) to combat issues related to noisy, ungrammatical hypotheses and disagreement between multiple annotators. Subjectivity (Sentiment) Some of the previously discussed semantic phenomena deal with objective information -did an event occur or what type of entities does a specific name represent. Subjective information is often expressed differently (Wiebe et al., 2005) , making it important to use other tests to probe whether an NLU system understands language that expresses subjective information. We are interested in determining whether general NLU models capture 'subjective clues' that can help identify and understand emotions, opinions, and sentiment within a subjective text (Wilson et al., 2006) .", |
| "cite_spans": [ |
| { |
| "start": 276, |
| "end": 295, |
| "text": "Zhang et al. (2017)", |
| "ref_id": "BIBREF79" |
| }, |
| { |
| "start": 418, |
| "end": 420, |
| "text": "12", |
| "ref_id": null |
| }, |
| { |
| "start": 820, |
| "end": 840, |
| "text": "(Wiebe et al., 2005)", |
| "ref_id": "BIBREF74" |
| }, |
| { |
| "start": 1157, |
| "end": 1178, |
| "text": "(Wilson et al., 2006)", |
| "ref_id": "BIBREF76" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "d. Name visited Place", |
| "sec_num": null |
| }, |
| { |
| "text": "We recast a sentiment analysis dataset since the task is the \"expression of subjectivity as either a positive or negative opinion\" (Taboada, 2016) . We extract sentences from product, movie, and restaurant reviews labeled as containing positive or negative sentiment (Kotzias et al., 2015) . Contexts (8a) and hypotheses (8b), (8c) are generated using the following templates: Item is replaced with either \"product\", \"movie\", or \"restaurant\", and the Name is sampled as previously discussed. If the original sentence contained positive (negative) sentiment, the (8a)-(8b) pair is labeled as ENTAILED (NOT-ENTAILED) and (8a)-(8c) is labeled as NOT-ENTAILED (ENTAILED).", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 146, |
| "text": "(Taboada, 2016)", |
| "ref_id": "BIBREF64" |
| }, |
| { |
| "start": 267, |
| "end": 289, |
| "text": "(Kotzias et al., 2015)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "d. Name visited Place", |
| "sec_num": null |
| }, |
| { |
| "text": "Recasting can create noisy NLI examples that may potentially enable a model to achieve a high accuracy by learning dataset specific characteristics that are unrelated to NLU. For example, Poliak et al. (2018a,b) previously noted the association between ungrammaticality and NOT-ENTAILED examples based on how White et al. (2017) recast the FrameNet+ dataset (Pavlick et al., 2015) . refers to a model that was initialized with pre-trained parameters and then re-trained on the corresponding recast data. (fixed) refers to a model that was trained and then evaluated on these data sets. Bold numbers in each column indicate which settings were responsible for the highest accuracy on the specific recast dataset.", |
| "cite_spans": [ |
| { |
| "start": 188, |
| "end": 211, |
| "text": "Poliak et al. (2018a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 358, |
| "end": 380, |
| "text": "(Pavlick et al., 2015)", |
| "ref_id": "BIBREF46" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Noise in Recast Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In the DNC, most of the noisy examples are in the recast VerbNet and Relation Extraction portions. In recast VerbNet, some examples are noisy because of incorrect subject-verb agreement. 13 Since more noisy examples appeared in the Relation Extraction set, we relied on Amazon Mechanical Turk workers to flag ungrammatical hypotheses in the recast dataset, and we remove NLI pairs with ungrammatical hypotheses. 14", |
| "cite_spans": [ |
| { |
| "start": 187, |
| "end": 189, |
| "text": "13", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Noise in Recast Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Our experiments demonstrate how these recast datasets may be used to evaluate how well models capture different types of semantic reasoning necessary for general language understanding. We also include results from a hypothesis-only model as a strong baseline. This may reveal whether the recast datasets retain statistical irregularities from the original, task-specific annotations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For demonstrating how well an NLI model performs these fine-grained types of reasoning, we use InferSent (Conneau et al., 2017) . InferSent independently encodes a context and hypothesis with a bi-directional LSTM and combines the sentence representations by concatenating the individual sentence representations, 13 \"Her teeth was cared for\" or \"Floss were used\". 14 See Appendix B.4 for details.", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 127, |
| "text": "(Conneau et al., 2017)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 314, |
| "end": 316, |
| "text": "13", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "their element-wise subtraction and product. The combined representation is then fed into a MLP with a single hidden layer. The hypothesis-only model is a modified version of InferSent that only accesses hypotheses (Poliak et al., 2018b) . We report experimental details in Appendix C. Table 3 reports the models' accuracies across the recast NLI datasets. Even though we categorize VerbNet, MegaVeridicality, and VerbCorner as lexicosyntatic inference, we train and evaluate models separately on these three datasets because we use different strategies to individually recast them. When evaluating NLI models, our baseline is the maximum between the accuracies of the hypothesis-only model and the majority class label (MAJ). In six of the eight recast datasets that we use to train our models the hypothesisonly model outperforms MAJ. The two datasets where the hypothesis-only model does not outperform MAJ are Sentiment and VN, each of which contain less than 10K examples. 15 We do not train on GAR because of its small size.", |
| "cite_spans": [ |
| { |
| "start": 214, |
| "end": 236, |
| "text": "(Poliak et al., 2018b)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 977, |
| "end": 979, |
| "text": "15", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 285, |
| "end": 292, |
| "text": "Table 3", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Our results suggest that InferSent, when not pre-trained on any other data, might capture specific semantic phenomena better than other seman-tic phenomena. InferSent seems to learn the most about determining if an event occurred, since the difference between its accuracy and that of the hypothesis-only baseline (+13.93) is largest on the recast EF dataset compared to the other recast annotations. The model seems to similarly learn to perform (or detect) the type of lexicosyntactic inference present in VC and MV. Interestingly, the hypothesis-only model outperforms InferSent on the recast RE.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The hypothesis-only model can demonstrate how likely it is that an NLI label applies to a hypothesis, regardless of its context and indicates how well each recast dataset tests a model's ability to perform each specific type of reasoning when performing NLI. The high hypothesis-only accuracy on the recast NER dataset may demonstrate that the hypothesis-only model is able to detect that the distribution of class labels for a given word may be peaky. For example, Hong Kong appears 130 times in the training set and is always labeled as a location. Based on this, in future work we may consider different methods to recast NER annotations into labeled NLI examples, or limit the dataset's training size.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hypothesis Only Baseline", |
| "sec_num": null |
| }, |
| { |
| "text": "Pre-training models on DNC We would like to know whether initializing models with pre-trained parameters improves scores. We notice that when we pre-train our models on DNC, for the larger datasets, a pre-trained model does not seem to significantly outperform randomly initializing the parameters. For the smaller datasets, specifically Puns, Sentiment and VN, a pre-trained model significantly outperforms random initialization. 16 We are also interested to know whether finetuning these pre-trained models on each category (update) improves a model's ability to perform well on the category compared to keeping the pre-trained models' parameters static (fixed). Across all of the recast datasets, updating the pretrained model's parameters during training improves InferSent's accuracies more than keeping the model's parameters fixed. When updating a model pre-trained on the entire DNC, we see the largest improvements on VN (+9.15). Williams et al. (2017) argue that Multi-NLI \"[makes] it possible to evaluate systems on nearly the full complexity of the language.\" However, how well does Multi-NLI test a model's capability to understand the diverse semantic phenomena captured in DNC? We posit that if a model, trained on and performing well on Multi-NLI, does not perform well on our recast datasets, then Multi-NLI might not evaluate a model's ability to understand the \"full complexity\" of language as argued. 17 When trained on Multi-NLI, our InferSent model achieves an accuracy of 70.22% on (matched) Multi-NLI. 18 When we test the model on the recast datasets (without updating the parameters), we see significant drops. 19 On the datasets testing a model's lexicosyntactic inference capabilities, the model performs below the majority class baseline. On the NER, EF, and Puns datasets its performs below the hypothesis-only baseline. We also notice that on three of the datasets (EF, Puns, and VN), the fixed hypothesis-only model outperforms the fixed InferSent model.", |
| "cite_spans": [ |
| { |
| "start": 431, |
| "end": 433, |
| "text": "16", |
| "ref_id": null |
| }, |
| { |
| "start": 939, |
| "end": 961, |
| "text": "Williams et al. (2017)", |
| "ref_id": "BIBREF75" |
| }, |
| { |
| "start": 1421, |
| "end": 1423, |
| "text": "17", |
| "ref_id": null |
| }, |
| { |
| "start": 1526, |
| "end": 1528, |
| "text": "18", |
| "ref_id": null |
| }, |
| { |
| "start": 1636, |
| "end": 1638, |
| "text": "19", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hypothesis Only Baseline", |
| "sec_num": null |
| }, |
| { |
| "text": "These results might suggest that Multi-NLI does not evaluate whether sentence representations capture these distinct semantic phenomena. This is a bit surprising for some of the recast phenomena. We would expect Multi-NLI's fiction section (especially its humor subset) in the training set to contain some figurative language that might be similar to puns, and the travel guides (and possibly telephone conversations) to contain text related to sentiment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models trained on Multi-NLI", |
| "sec_num": null |
| }, |
| { |
| "text": "Pre-training on DNC or Multi-NLI? Initializing a model with parameters pre-trained on DNC or Multi-NLI often outperforms random initialization. 20 Is it better to pre-train on DNC or Multi-NLI? On five of the recast datasets, using a model pre-trained on DNC outperforms a model pre-trained on Multi-NLI. The results are flipped on the two datasets focused on downstream tasks (Sentiment and RE) and MV. However, the differences between pre-training on the DNC or Multi-NLI are small. From this, it is unclear whether pre-training on DNC is better than Multi-NLI.", |
| "cite_spans": [ |
| { |
| "start": 144, |
| "end": 146, |
| "text": "20", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models trained on Multi-NLI", |
| "sec_num": null |
| }, |
| { |
| "text": "Size of Pre-trained DNC Data We randomly sample 10K and 20K examples from each datasets' training set to investigate what happens if we train our models on a subsample of each training set instead of the entire DNC. Although we noticed a slight decrease across each recast test set, the decrease was not significant. We leave this investigating for a future thorough study.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models trained on Multi-NLI", |
| "sec_num": null |
| }, |
| { |
| "text": "Exploring what linguistic phenomena neural models learn Many tests have been used to probe how well neural models learn different linguistic phenomena. Linzen et al. (2016) use \"number agreement in English subject-verb dependencies\" to show that LSTMs learn about syntaxsensitive dependencies. In addition to syntax (Shi et al., 2016) , researchers have used other labeling tasks to investigate whether neural machine translation (NMT) models learn different linguistic phenomena (Belinkov et al., 2017a,b; Dalvi et al., 2017; Marvin and Koehn, 2018) . Recently, Poliak et al. (2018a) used recast NLI datasets to investigate semantics captured by NMT encoders.", |
| "cite_spans": [ |
| { |
| "start": 152, |
| "end": 172, |
| "text": "Linzen et al. (2016)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 316, |
| "end": 334, |
| "text": "(Shi et al., 2016)", |
| "ref_id": "BIBREF63" |
| }, |
| { |
| "start": 480, |
| "end": 506, |
| "text": "(Belinkov et al., 2017a,b;", |
| "ref_id": null |
| }, |
| { |
| "start": 507, |
| "end": 526, |
| "text": "Dalvi et al., 2017;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 527, |
| "end": 550, |
| "text": "Marvin and Koehn, 2018)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 563, |
| "end": 584, |
| "text": "Poliak et al. (2018a)", |
| "ref_id": "BIBREF50" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Targeted Tests for Natural Language Understanding We follow a long line of work focused on building datasets to test how well NLU systems perform distinct types of semantic reasoning. FraCaS uses a limited number of sentencepairs to test whether systems understand semantic phenomena, e.g. generalized quantifiers, temporal references, and (nominal) anaphora (Cooper et al., 1996) . FraCas cannot be used to train neural models -it includes just roughly 300 highquality instances manually created by linguists. MacCartney (2009) created the FraCaS textual inference test suite by automatically \"convert [ing] each FraCaS question into a declarative hypothesis.\" Levesque et al. (2012)'s Winograd Schema Challenge forces a model to choose between two possible answers for a question based on a sentence describing an event.", |
| "cite_spans": [ |
| { |
| "start": 359, |
| "end": 380, |
| "text": "(Cooper et al., 1996)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 603, |
| "end": 608, |
| "text": "[ing]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Recent benchmarks test whether NLI models handle adjective-noun composition (Pavlick and Callison-Burch, 2016) , other types of composition (Dasgupta et al., 2018) , paraphrastic inference, anaphora resolution, and semantic protoroles (White et al., 2017) . Concurrently, Conneau et al. (2018)'s benchmark can be used to probe whether sentence representations capture many linguistic properties. It includes syntactic and surface form tests but does not focus on as a wide range of semantic phenomena as in the DNC. Glockner et al. (2018) introduce a modified version of SNLI to test how well NLI models perform when requiring lexical and world knowledge. Wang et al. (2018) 's GLUE dataset is intended to evaluate and potentially train a sentence representation to perform well across different NLP tasks. This continues an aspect of the initial RTE collection, designed to be representative of downstream tasks like QA, MT, and IR (Dagan et al., 2010) . While GLUE is therefore concerned with applied tasks, DNC, as well as Naik et al. (2018) 's NLI stress tests, is concerned with probing the capabilities of NLU models to capture explicitly distinguished aspects of meaning. While one may conjecture that the latter is needed to be \"solved\" to eventually \"solve\" the former, it may be that these goals only partially overlap. Some NLP researchers might focus on probing for semantic phenomena in sentence representations while others may be more interested in developing single sentence representations that can help models perform well on a wide array of downstream tasks.", |
| "cite_spans": [ |
| { |
| "start": 76, |
| "end": 110, |
| "text": "(Pavlick and Callison-Burch, 2016)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 140, |
| "end": 163, |
| "text": "(Dasgupta et al., 2018)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 235, |
| "end": 255, |
| "text": "(White et al., 2017)", |
| "ref_id": "BIBREF71" |
| }, |
| { |
| "start": 516, |
| "end": 538, |
| "text": "Glockner et al. (2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 656, |
| "end": 674, |
| "text": "Wang et al. (2018)", |
| "ref_id": "BIBREF70" |
| }, |
| { |
| "start": 922, |
| "end": 953, |
| "text": "MT, and IR (Dagan et al., 2010)", |
| "ref_id": null |
| }, |
| { |
| "start": 1026, |
| "end": 1044, |
| "text": "Naik et al. (2018)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We described how we recast a wide range of semantic phenomena from many NLP datasets into labeled NLI sentence pairs. These examples serve as a diverse NLI framework that may help diagnose whether NLU models capture and perform distinct types of reasoning. Our experiments demonstrate how to use this framework as an NLU benchmark. The DNC is actively growing as we continue recasting more datasets into labeled NLI examples. We encourage dataset creators to recast their datasets in NLI and invite them to add their recast datasets into the DNC. The collection, along with baselines and trained models are available online at http://www.decomp.net. Table 4 includes examples from all of the recast NLI datasets. We include one ENTAILED and one NOT-ENTAILED example from each dataset that tests a distinct type of reasoning.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 650, |
| "end": 657, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Here we add secondary information about the original datasets and our recasting efforts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Recasting Semantic Phenomena", |
| "sec_num": null |
| }, |
| { |
| "text": "We demonstrate how determining whether an event occurred can enable accurate inferences based on the event. Consider the following sentences:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.1 Event Factuality", |
| "sec_num": null |
| }, |
| { |
| "text": "(9) a. She walked a beagle b. She walked a dog c. She walked a brown beagle", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.1 Event Factuality", |
| "sec_num": null |
| }, |
| { |
| "text": "If the walking occurred, (9a) entails (9b) but not (9c). If we negate the action in sentences (9a), (9b), and (9c) to respectively become:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.1 Event Factuality", |
| "sec_num": null |
| }, |
| { |
| "text": "(10) a. She did not walk a beagle b. She did not walk a dog c. She did not walk a brown beagle", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.1 Event Factuality", |
| "sec_num": null |
| }, |
| { |
| "text": "The new hypothesis (10c) is now entailed by the context (10a) while (10b) is not.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.1 Event Factuality", |
| "sec_num": null |
| }, |
| { |
| "text": "When recasting VerbCorner, we use the following templates for hypotheses, assigning them as EN-TAILED and NOT-ENTAILED based on the positive or negative answers to the annotation task questions about the context sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.2.1 VerbCorner", |
| "sec_num": null |
| }, |
| { |
| "text": "(11) a. Someone {moved/did not move} from their location b. Something touched another thing / Nothing touched anything else c. Someone or something {applied/did not apply} force onto something d. Someone or something {changed/did not change} physically e. Someone {changed/did not change} their thoughts, feelings, or beliefs f. Something {good/neutral/bad} happened", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.2.1 VerbCorner", |
| "sec_num": null |
| }, |
| { |
| "text": "Puns in Yang et al. (2015) were originally extracted from punsoftheday.com, and sentences without puns came from newswire and proverbs. The sentences are labeled as containing a pun or not. Puns in Miller et al. (2017) were sampled from prior pun detection datasets (Miller and Gurevych, 2015; Miller and Turkovi\u0107, 2016) and includes new examples generated from scratch for the shared task; the original labels denote whether the sentences contain homographic, heterographic, or no pun at all. Here, we are only interested in whether a sentence contains a pun or not instead of discriminating between homographic and heterographic puns.", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 26, |
| "text": "Yang et al. (2015)", |
| "ref_id": "BIBREF77" |
| }, |
| { |
| "start": 198, |
| "end": 218, |
| "text": "Miller et al. (2017)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 266, |
| "end": 293, |
| "text": "(Miller and Gurevych, 2015;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 294, |
| "end": 320, |
| "text": "Miller and Turkovi\u0107, 2016)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.3 Figurative Language", |
| "sec_num": null |
| }, |
| { |
| "text": "Since hypotheses were automatically generated from Wikipedia infoboxes, many examples are noisy and ungrammatical. We presented hypotheses (independent of their corresponding contexts) to Mechanical Turk workers and asked them to label each sentence as containing no grammatical error, minor grammatical issues, or major grammatical issues. We removed the 2, 056 NLI examples with hypothesis containing major grammatical issues, resulting in 28, 041 labeled pairs. Interestingly, almost 70% of those examples where labeled between 1 \u2212 4, which we view as NOT-ENTAILED. We release the ungrammatical NLI examples as supplementary data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.4 Relation Extraction", |
| "sec_num": null |
| }, |
| { |
| "text": "A second source of noise in the recast relation extraction dataset can be caused by disagreement amongst multiple annotators. Examples in our training and development sets are annotated by a single annotator while we use 3to 5-way redundancy to annotate the test examples. To guarantee high-quality test examples, we only include examples with 100% inner-annotator agreement. Additionally, we remove the 16 examples labeled with 4 from our NOT-ENTAILED examples in this pruned test set since some of these examples are arguably entailments. Consequently, the test set contains 761 examples, out of the original 3, 670 test examples. Nevertheless, we separately release all 3, 670 test examples and include the original annotations as well, enabling others to consider other methods to collapse the multi-way annotations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.4 Relation Extraction", |
| "sec_num": null |
| }, |
| { |
| "text": "In a corpus with part-of-speech tags, the distribution of labels for the word \"the\" will likely peak at the Det tag.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "This changed as large NLI datasets have recently been used to train, or pre-train, models to perform NLI, or other tasks(Conneau et al., 2017;Pasunuru and Bansal, 2017).4 Appendix B.1 provides an example.5 We replace Event with the event described in the context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We ensure grammatical hypotheses by appropriately conjugating \"is a\" when needed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "NP is always instantiated by someone; and S is always instantiated by a particular thing happened.8 NP is always instantiated by either someone, a particular person, or a particular thing; and VP is always instantiated by happen, do a particular thing, or have a particular thing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "This is similar to Aharon et al. (2010)'s template matching to generate entailment rules fromFrameNet (Baker et al., 1998).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We list the declarative statements in Appendix B.2.1. 11 http://www.ssa.gov/oact/babynames/ names.zip", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Following the label set in SNLI,Zhang et al. (2017) converted pairs labeled with 1 as CONTRADICTION, 2 \u2212 4 as NEUTRAL and 5 to ENTAILMENT. Since here we are generally interested in binary classification, we merge the CON-TRADICTION and NEUTRAL examples as NOT-ENTAILED.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "This is similar to Poliak et al. (2018b)'s results where a hypothesis-only model did not outperform MAJ on datasets with \u2264 10K examples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "By 32.81, 31.00, and 30.83 points respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We treat Multi-NLI's NEUTRAL and CONTRADICTION labels as equivalent to the DNC's NOT-ENTAILED label.18 Although this is about 10 points below SoTA, we believe that the pre-trained model performs well enough to evaluate whether Multi-NLI tests a model's capability to understand the diverse semantic phenomena in the DNC.19 InferSent (pre-trained, fixed) inTable 3.20 Pre-training does not improve accuracies on NER or MV.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.yelp.com/dataset_challenge divided by 5. As described in Poliak et al. (2018b), our hypothesis-only model feeds the hypotheses' encoded representation directly into the MLP.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank Diyi Yang for help with the Pun-sOfTheDay dataset, the JSALT \"Sentence Representation\" team for insightful discussions, and three anonymous reviewers for feedback. This work was supported by the JHU HLT-COE, DARPA LORELEI and AIDA, NSF-BCS (1748969/1749025), and NSF-GRFP (1232825). The views and conclusions contained in this publication are those of the authors and should not be interpreted as representing official policies or endorsements of DARPA or the U.S. Government.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| }, |
| { |
| "text": "In all our experiments, we use pre-computed GloVe embeddings (Pennington et al., 2014) and use the OOV vector for words that do not have a defined embedding. We follow Conneau et al. (2017) 's procedure to train our models. During training, our models are optimized with SGD. Our initial learning rate is 0.1 with a decay rate of 0.99. Our models train for at most 20 epochs and can optionally terminate early when the learning rate is less than 10 \u22125 . If the accuracy deceases on the development set in any epoch, the learning rate is", |
| "cite_spans": [ |
| { |
| "start": 61, |
| "end": 86, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 168, |
| "end": 189, |
| "text": "Conneau et al. (2017)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C Experimental Details", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Generating entailment rules from framenet", |
| "authors": [ |
| { |
| "first": "Roni", |
| "middle": [], |
| "last": "Ben Aharon", |
| "suffix": "" |
| }, |
| { |
| "first": "Idan", |
| "middle": [], |
| "last": "Szpektor", |
| "suffix": "" |
| }, |
| { |
| "first": "Ido", |
| "middle": [], |
| "last": "Dagan", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the ACL 2010 Conference Short Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "241--246", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roni Ben Aharon, Idan Szpektor, and Ido Dagan. 2010. Generating entailment rules from framenet. In Pro- ceedings of the ACL 2010 Conference Short Papers, pages 241-246. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Natural language understanding", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Allen", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Allen. 1995. Natural language understanding. Pearson.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The berkeley framenet project", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Collin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Baker", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Charles", |
| "suffix": "" |
| }, |
| { |
| "first": "John B", |
| "middle": [], |
| "last": "Fillmore", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lowe", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of the 17th international conference on Computational linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "86--90", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Collin F Baker, Charles J Fillmore, and John B Lowe. 1998. The berkeley framenet project. In Proceed- ings of the 17th international conference on Compu- tational linguistics-Volume 1, pages 86-90. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Why clowns taste funny: the relationship between humor and semantic ambiguity", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Tristan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bekinschtein", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [ |
| "M" |
| ], |
| "last": "Davis", |
| "suffix": "" |
| }, |
| { |
| "first": "Adrian", |
| "middle": [ |
| "M" |
| ], |
| "last": "Rodd", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Owen", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Neuroscience", |
| "volume": "31", |
| "issue": "26", |
| "pages": "9665--9671", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tristan A Bekinschtein, Matthew H Davis, Jennifer M Rodd, and Adrian M Owen. 2011. Why clowns taste funny: the relationship between humor and semantic ambiguity. Journal of Neuroscience, 31(26):9665- 9671.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "What do neural machine translation models learn about morphology?", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadir", |
| "middle": [], |
| "last": "Durrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Fahim", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "861--872", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Belinkov, Nadir Durrani, Fahim Dalvi, Has- san Sajjad, and James Glass. 2017a. What do neu- ral machine translation models learn about morphol- ogy? In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 861-872. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Evaluating layers of representation in neural machine translation on part-of-speech and semantic tagging tasks", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Llu\u00eds", |
| "middle": [], |
| "last": "M\u00e0rquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Sajjad", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadir", |
| "middle": [], |
| "last": "Durrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Fahim", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Belinkov, Llu\u00eds M\u00e0rquez, Hassan Sajjad, Nadir Durrani, Fahim Dalvi, and James Glass. 2017b. Evaluating layers of representation in neural machine translation on part-of-speech and seman- tic tagging tasks. In Proceedings of the Eighth In- ternational Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1-10, Taipei, Taiwan. Asian Federation of Natural Lan- guage Processing.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Machine humour: An implemented model of puns", |
| "authors": [ |
| { |
| "first": "Kim", |
| "middle": [], |
| "last": "Binsted", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kim Binsted. 1996. Machine humour: An implemented model of puns. Ph.D. thesis, University of Edin- burgh, Edinburgh, Scotland.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "The groningen meaning bank", |
| "authors": [ |
| { |
| "first": "Johan", |
| "middle": [], |
| "last": "Bos", |
| "suffix": "" |
| }, |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Kilian", |
| "middle": [], |
| "last": "Evang", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Noortje", |
| "suffix": "" |
| }, |
| { |
| "first": "Johannes", |
| "middle": [], |
| "last": "Venhuizen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bjerva", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Handbook of Linguistic Annotation", |
| "volume": "", |
| "issue": "", |
| "pages": "463--496", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Johan Bos, Valerio Basile, Kilian Evang, Noortje J Venhuizen, and Johannes Bjerva. 2017. The gronin- gen meaning bank. In Handbook of Linguistic An- notation, pages 463-496. Springer.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A large annotated corpus for learning natural language inference", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Potts", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP). Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large an- notated corpus for learning natural language infer- ence. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP). Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "An approach using named entities for recognizing textual entailment", |
| "authors": [ |
| { |
| "first": "Julio", |
| "middle": [ |
| "Javier" |
| ], |
| "last": "Castillo", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [ |
| "Alonso" |
| ], |
| "last": "Alemany", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Notebook Papers of the Text Analysis Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julio Javier Castillo and Laura Alonso Alemany. 2008. An approach using named entities for recognizing textual entailment. In Notebook Papers of the Text Analysis Conference, TAC Workshop.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "An overview of natural language inference data collection: The way forward?", |
| "authors": [ |
| { |
| "first": "Stergios", |
| "middle": [], |
| "last": "Chatzikyriakidis", |
| "suffix": "" |
| }, |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Cooper", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Computing Natural Language Inference Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stergios Chatzikyriakidis, Robin Cooper, Simon Dob- nik, and Staffan Larsson. 2017. An overview of natural language inference data collection: The way forward? In Proceedings of the Computing Natural Language Inference Workshop.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Supervised learning of universal sentence representations from natural language inference data", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "670--680", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Douwe Kiela, Holger Schwenk, Lo\u00efc Barrault, and Antoine Bordes. 2017. Supervised learning of universal sentence representations from natural language inference data. In Proceedings of the 2017 Conference on Empirical Methods in Nat- ural Language Processing, pages 670-680, Copen- hagen, Denmark. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "What you can cram into a single $&!#* vector: Probing sentence embeddings for linguistic properties", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Germ\u00e3\u010d\u00e2\u0105n", |
| "middle": [], |
| "last": "Kruszewski", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Lo\u00e3\u010d\u00e2\u0155c", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "2126--2136", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Germ\u00c3\u010c\u00c2\u0105n Kruszewski, Guillaume Lample, Lo\u00c3\u010c\u00c2\u0155c Barrault, and Marco Baroni. 2018. What you can cram into a single $&!#* vector: Probing sentence embeddings for linguistic properties. In Proceedings of the 56th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2126-2136, Mel- bourne, Australia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Using the framework", |
| "authors": [ |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Cooper", |
| "suffix": "" |
| }, |
| { |
| "first": "Dick", |
| "middle": [], |
| "last": "Crouch", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Van Eijck", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Fox", |
| "suffix": "" |
| }, |
| { |
| "first": "Johan", |
| "middle": [], |
| "last": "Van Genabith", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Jaspars", |
| "suffix": "" |
| }, |
| { |
| "first": "Hans", |
| "middle": [], |
| "last": "Kamp", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Milward", |
| "suffix": "" |
| }, |
| { |
| "first": "Manfred", |
| "middle": [], |
| "last": "Pinkal", |
| "suffix": "" |
| }, |
| { |
| "first": "Massimo", |
| "middle": [], |
| "last": "Poesio", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robin Cooper, Dick Crouch, Jan Van Eijck, Chris Fox, Johan Van Genabith, Jan Jaspars, Hans Kamp, David Milward, Manfred Pinkal, Massimo Poesio, et al. 1996. Using the framework. Technical report.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Recognizing textual entailment: Rational, evaluation and approaches-erratum", |
| "authors": [ |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Ido Dagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernardo", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [ |
| "Roth" |
| ], |
| "last": "Magnini", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Natural Language Engineering", |
| "volume": "16", |
| "issue": "1", |
| "pages": "105--105", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ido Dagan, Bill Dolan, Bernardo Magnini, and Dan Roth. 2010. Recognizing textual entailment: Ra- tional, evaluation and approaches-erratum. Natural Language Engineering, 16(1):105-105.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "The pascal recognising textual entailment challenge", |
| "authors": [ |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Ido Dagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernardo", |
| "middle": [], |
| "last": "Glickman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Magnini", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Machine learning challenges. evaluating predictive uncertainty, visual object classification, and recognising tectual entailment", |
| "volume": "", |
| "issue": "", |
| "pages": "177--190", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ido Dagan, Oren Glickman, and Bernardo Magnini. 2006. The pascal recognising textual entailment challenge. In Machine learning challenges. evalu- ating predictive uncertainty, visual object classifica- tion, and recognising tectual entailment, pages 177- 190. Springer.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Recognizing textual entailment: Models and applications", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Ido Dagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabio", |
| "middle": [ |
| "Massimo" |
| ], |
| "last": "Sammons", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zanzotto", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Synthesis Lectures on Human Language Technologies", |
| "volume": "6", |
| "issue": "4", |
| "pages": "1--220", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ido Dagan, Dan Roth, Mark Sammons, and Fabio Mas- simo Zanzotto. 2013. Recognizing textual entail- ment: Models and applications. Synthesis Lectures on Human Language Technologies, 6(4):1-220.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Understanding and improving morphological learning in the neural machine translation decoder", |
| "authors": [ |
| { |
| "first": "Fahim", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadir", |
| "middle": [], |
| "last": "Durrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Sajjad", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Vogel", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "142--151", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fahim Dalvi, Nadir Durrani, Hassan Sajjad, Yonatan Belinkov, and Stephan Vogel. 2017. Understanding and improving morphological learning in the neu- ral machine translation decoder. In Proceedings of the Eighth International Joint Conference on Natu- ral Language Processing (Volume 1: Long Papers), pages 142-151, Taipei, Taiwan. Asian Federation of Natural Language Processing.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Evaluating Compositionality in Sentence Embeddings", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Dasgupta", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Stuhlm\u00fcller", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "J" |
| ], |
| "last": "Gershman", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "D" |
| ], |
| "last": "Goodman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "I. Dasgupta, D. Guo, A. Stuhlm\u00fcller, S. J. Gershman, and N. D. Goodman. 2018. Evaluating Composi- tionality in Sentence Embeddings. ArXiv e-prints.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Identifying relations for open information extraction", |
| "authors": [ |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Fader", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Soderland", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the conference on empirical methods in natural language processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1535--1545", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anthony Fader, Stephen Soderland, and Oren Etzioni. 2011. Identifying relations for open information ex- traction. In Proceedings of the conference on empir- ical methods in natural language processing, pages 1535-1545. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Facc1: Freebase annotation of clueweb corpora, version 1 (release date 2013-06-26, format version 1, correction level 0)", |
| "authors": [ |
| { |
| "first": "Evgeniy", |
| "middle": [], |
| "last": "Gabrilovich", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Ringgaard", |
| "suffix": "" |
| }, |
| { |
| "first": "Amarnag", |
| "middle": [], |
| "last": "Subramanya", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Evgeniy Gabrilovich, Michael Ringgaard, and Amar- nag Subramanya. 2013. Facc1: Freebase an- notation of clueweb corpora, version 1 (re- lease date 2013-06-26, format version 1, cor- rection level 0). Note: http://lemurproject. org/clueweb09/FACC1/Cited by, 5.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Applied textual entailment", |
| "authors": [ |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Glickman", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oren Glickman. 2006. Applied textual entailment. Ph.D. thesis, Bar Ilan University.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Breaking nli systems with sentences that require simple lexical inferences", |
| "authors": [ |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Glockner", |
| "suffix": "" |
| }, |
| { |
| "first": "Vered", |
| "middle": [], |
| "last": "Shwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "650--655", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Max Glockner, Vered Shwartz, and Yoav Goldberg. 2018. Breaking nli systems with sentences that re- quire simple lexical inferences. In Proceedings of the 56th Annual Meeting of the Association for Com- putational Linguistics (Volume 2: Short Papers), pages 650-655, Melbourne, Australia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Annotation artifacts in natural language inference data", |
| "authors": [ |
| { |
| "first": "Swabha", |
| "middle": [], |
| "last": "Suchin Gururangan", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Swayamdipta", |
| "suffix": "" |
| }, |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "2", |
| "issue": "", |
| "pages": "107--112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Suchin Gururangan, Swabha Swayamdipta, Omer Levy, Roy Schwartz, Samuel Bowman, and Noah A. Smith. 2018. Annotation artifacts in natural lan- guage inference data. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 107-112, New Orleans, Louisiana. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "The verbcorner project: Toward an empirically-based semantic decomposition of verbs", |
| "authors": [ |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Joshua K Hartshorne", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Bonial", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1438--1442", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joshua K Hartshorne, Claire Bonial, and Martha Palmer. 2013. The verbcorner project: Toward an empirically-based semantic decomposition of verbs. In Proceedings of the 2013 Conference on Empiri- cal Methods in Natural Language Processing, pages 1438-1442.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Knowledge-based weak supervision for information extraction of overlapping relations", |
| "authors": [ |
| { |
| "first": "Raphael", |
| "middle": [], |
| "last": "Hoffmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Congle", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiao", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "S" |
| ], |
| "last": "Weld", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "541--550", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raphael Hoffmann, Congle Zhang, Xiao Ling, Luke Zettlemoyer, and Daniel S. Weld. 2011. Knowledge-based weak supervision for information extraction of overlapping relations. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Tech- nologies, pages 541-550, Portland, Oregon, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "The Chameleon-like Nature of Evaluative Adjectives", |
| "authors": [ |
| { |
| "first": "Lauri", |
| "middle": [], |
| "last": "Karttunen", |
| "suffix": "" |
| }, |
| { |
| "first": "Stanley", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Annie", |
| "middle": [], |
| "last": "Zaenen", |
| "suffix": "" |
| }, |
| { |
| "first": "Cleo", |
| "middle": [], |
| "last": "Condoravdi", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "In Empirical Issues in Syntax and Semantics", |
| "volume": "10", |
| "issue": "", |
| "pages": "233--250", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lauri Karttunen, Stanley Peters, Annie Zaenen, and Cleo Condoravdi. 2014. The Chameleon-like Na- ture of Evaluative Adjectives. In Empirical Issues in Syntax and Semantics 10, pages 233-250. CSSP- CNRS.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "SciTail: A textual entailment dataset from science question answering", |
| "authors": [ |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Sabharwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tushar Khot, Ashish Sabharwal, and Peter Clark. 2018. SciTail: A textual entailment dataset from science question answering. In AAAI.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "From group to individual labels using deep features", |
| "authors": [ |
| { |
| "first": "Dimitrios", |
| "middle": [], |
| "last": "Kotzias", |
| "suffix": "" |
| }, |
| { |
| "first": "Misha", |
| "middle": [], |
| "last": "Denil", |
| "suffix": "" |
| }, |
| { |
| "first": "Padhraic", |
| "middle": [], |
| "last": "Nando De Freitas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smyth", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 21th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "597--606", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dimitrios Kotzias, Misha Denil, Nando De Freitas, and Padhraic Smyth. 2015. From group to individual la- bels using deep features. In Proceedings of the 21th ACM SIGKDD International Conference on Knowl- edge Discovery and Data Mining, pages 597-606. ACM.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Natural language inference from multiple premises", |
| "authors": [ |
| { |
| "first": "Alice", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Bisk", |
| "suffix": "" |
| }, |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hockenmaier", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "100--109", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alice Lai, Yonatan Bisk, and Julia Hockenmaier. 2017. Natural language inference from multiple premises. In Proceedings of the Eighth International Joint Conference on Natural Language Processing (Vol- ume 1: Long Papers), pages 100-109, Taipei, Tai- wan. Asian Federation of Natural Language Pro- cessing.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Event detection and factuality assessment with non-expert supervision", |
| "authors": [ |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1643--1648", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenton Lee, Yoav Artzi, Yejin Choi, and Luke Zettle- moyer. 2015. Event detection and factuality assess- ment with non-expert supervision. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 1643-1648.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "The winograd schema challenge", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Hector", |
| "suffix": "" |
| }, |
| { |
| "first": "Ernest", |
| "middle": [], |
| "last": "Levesque", |
| "suffix": "" |
| }, |
| { |
| "first": "Leora", |
| "middle": [], |
| "last": "Davis", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Morgenstern", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Thirteenth International Conference on Principles of Knowledge Representation and Reasoning", |
| "volume": "", |
| "issue": "", |
| "pages": "552--561", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hector J Levesque, Ernest Davis, and Leora Morgen- stern. 2012. The winograd schema challenge. In Proceedings of the Thirteenth International Con- ference on Principles of Knowledge Representation and Reasoning, pages 552-561. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Assessing the ability of LSTMs to learn syntax-sensitive dependencies", |
| "authors": [ |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| }, |
| { |
| "first": "Emmanuel", |
| "middle": [], |
| "last": "Dupoux", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "4", |
| "issue": "", |
| "pages": "521--535", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tal Linzen, Emmanuel Dupoux, and Yoav Goldberg. 2016. Assessing the ability of LSTMs to learn syntax-sensitive dependencies. Transactions of the Association for Computational Linguistics, 4:521- 535.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Learning word vectors for sentiment analysis", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "E" |
| ], |
| "last": "Maas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Daly", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th annual meeting of the association for computational linguistics: Human language technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "142--150", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew L Maas, Raymond E Daly, Peter T Pham, Dan Huang, Andrew Y Ng, and Christopher Potts. 2011. Learning word vectors for sentiment analysis. In Proceedings of the 49th annual meeting of the as- sociation for computational linguistics: Human lan- guage technologies-volume 1, pages 142-150. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Natural language inference", |
| "authors": [ |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Maccartney", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bill MacCartney. 2009. Natural language inference. Ph.D. thesis, Stanford University.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Exploring Word Sense Disambiguation Abilities of Neural Machine Translation Systems", |
| "authors": [ |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Marvin", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 13th Conference of The Association for Machine Translation in the Americas", |
| "volume": "1", |
| "issue": "", |
| "pages": "125--131", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rebecca Marvin and Philipp Koehn. 2018. Explor- ing Word Sense Disambiguation Abilities of Neu- ral Machine Translation Systems. In Proceedings of the 13th Conference of The Association for Ma- chine Translation in the Americas (Volume 1: Re- search Track, pages 125-131.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Hidden factors and hidden topics: understanding rating dimensions with review text", |
| "authors": [ |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Mcauley", |
| "suffix": "" |
| }, |
| { |
| "first": "Jure", |
| "middle": [], |
| "last": "Leskovec", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 7th ACM conference on Recommender systems", |
| "volume": "", |
| "issue": "", |
| "pages": "165--172", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julian McAuley and Jure Leskovec. 2013. Hidden fac- tors and hidden topics: understanding rating dimen- sions with review text. In Proceedings of the 7th ACM conference on Recommender systems, pages 165-172. ACM.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Automatic disambiguation of english puns", |
| "authors": [ |
| { |
| "first": "Tristan", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "719--729", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tristan Miller and Iryna Gurevych. 2015. Automatic disambiguation of english puns. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), volume 1, pages 719-729.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Semeval-2017 task 7: Detection and interpretation of english puns", |
| "authors": [ |
| { |
| "first": "Tristan", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Hempelmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017)", |
| "volume": "", |
| "issue": "", |
| "pages": "58--68", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tristan Miller, Christian Hempelmann, and Iryna Gurevych. 2017. Semeval-2017 task 7: Detection and interpretation of english puns. In Proceedings of the 11th International Workshop on Semantic Eval- uation (SemEval-2017), pages 58-68, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Towards the automatic detection and identification of english puns", |
| "authors": [ |
| { |
| "first": "Tristan", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Mladen", |
| "middle": [], |
| "last": "Turkovi\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "The European Journal of Humour Research", |
| "volume": "4", |
| "issue": "1", |
| "pages": "59--75", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tristan Miller and Mladen Turkovi\u0107. 2016. Towards the automatic detection and identification of english puns. The European Journal of Humour Research, 4(1):59-75.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Meantime, the newsreader multilingual event and time corpus", |
| "authors": [ |
| { |
| "first": "Anne-Lyse", |
| "middle": [], |
| "last": "Minard", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Speranza", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruben", |
| "middle": [], |
| "last": "Urizar", |
| "suffix": "" |
| }, |
| { |
| "first": "Begona", |
| "middle": [], |
| "last": "Altuna", |
| "suffix": "" |
| }, |
| { |
| "first": "Anneleen", |
| "middle": [], |
| "last": "Marieke Van Erp", |
| "suffix": "" |
| }, |
| { |
| "first": "Chantal", |
| "middle": [], |
| "last": "Schoen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Van Son", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Language Resources and Evaluation Conference (LREC)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anne-Lyse Minard, Manuela Speranza, Ruben Urizar, Begona Altuna, Marieke van Erp, Anneleen Schoen, and Chantal van Son. 2016. Meantime, the news- reader multilingual event and time corpus. In Language Resources and Evaluation Conference (LREC).", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Distant supervision for relation extraction without labeled data", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Mintz", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bills", |
| "suffix": "" |
| }, |
| { |
| "first": "Rion", |
| "middle": [], |
| "last": "Snow", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1003--1011", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Mintz, Steven Bills, Rion Snow, and Daniel Ju- rafsky. 2009. Distant supervision for relation ex- traction without labeled data. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP, pages 1003-1011, Suntec, Singapore. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Stress test evaluation for natural language inference", |
| "authors": [ |
| { |
| "first": "Aakanksha", |
| "middle": [], |
| "last": "Naik", |
| "suffix": "" |
| }, |
| { |
| "first": "Abhilasha", |
| "middle": [], |
| "last": "Ravichander", |
| "suffix": "" |
| }, |
| { |
| "first": "Norman", |
| "middle": [], |
| "last": "Sadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Carolyn", |
| "middle": [], |
| "last": "Rose", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2340--2353", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aakanksha Naik, Abhilasha Ravichander, Norman Sadeh, Carolyn Rose, and Graham Neubig. 2018. Stress test evaluation for natural language inference. In Proceedings of the 27th International Conference on Computational Linguistics, pages 2340-2353, Santa Fe, New Mexico, USA. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Ju_cse_tac: Textual entailment recognition system at tac rte-6", |
| "authors": [ |
| { |
| "first": "Partha", |
| "middle": [], |
| "last": "Pakray", |
| "suffix": "" |
| }, |
| { |
| "first": "Santanu", |
| "middle": [], |
| "last": "Pal", |
| "suffix": "" |
| }, |
| { |
| "first": "Soujanya", |
| "middle": [], |
| "last": "Poria", |
| "suffix": "" |
| }, |
| { |
| "first": "Sivaji", |
| "middle": [], |
| "last": "Bandyopadhyay", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "F" |
| ], |
| "last": "Gelbukh", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Partha Pakray, Santanu Pal, Soujanya Poria, Sivaji Bandyopadhyay, and Alexander F Gelbukh. 2010. Ju_cse_tac: Textual entailment recognition system at tac rte-6. In TAC Workshop.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Multitask video captioning with video and entailment generation", |
| "authors": [ |
| { |
| "first": "Ramakanth", |
| "middle": [], |
| "last": "Pasunuru", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1273--1283", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ramakanth Pasunuru and Mohit Bansal. 2017. Multi- task video captioning with video and entailment generation. In Proceedings of the 55th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 1273- 1283.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Most \"babies\" are \"little\" and most \"problems\" are \"huge\": Compositional entailment in adjectivenouns", |
| "authors": [ |
| { |
| "first": "Ellie", |
| "middle": [], |
| "last": "Pavlick", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "2164--2173", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellie Pavlick and Chris Callison-Burch. 2016. Most \"babies\" are \"little\" and most \"problems\" are \"huge\": Compositional entailment in adjective- nouns. In Proceedings of the 54th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2164-2173. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Framenet+: Fast paraphrastic tripling of framenet", |
| "authors": [ |
| { |
| "first": "Ellie", |
| "middle": [], |
| "last": "Pavlick", |
| "suffix": "" |
| }, |
| { |
| "first": "Travis", |
| "middle": [], |
| "last": "Wolfe", |
| "suffix": "" |
| }, |
| { |
| "first": "Pushpendre", |
| "middle": [], |
| "last": "Rastogi", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "2", |
| "issue": "", |
| "pages": "408--413", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellie Pavlick, Travis Wolfe, Pushpendre Rastogi, Chris Callison-Burch, Mark Dredze, and Benjamin Van Durme. 2015. Framenet+: Fast paraphrastic tripling of framenet. In Proceedings of the 53rd An- nual Meeting of the Association for Computational Linguistics and the 7th International Joint Confer- ence on Natural Language Processing (Volume 2: Short Papers), pages 408-413, Beijing, China. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christo- pher D. Manning. 2014. Glove: Global vectors for word representation. In Empirical Methods in Nat- ural Language Processing (EMNLP), pages 1532- 1543.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Language of riddles: new perspectives", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas A", |
| "middle": [], |
| "last": "Pepicello", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Green", |
| "suffix": "" |
| } |
| ], |
| "year": 1984, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William J Pepicello and Thomas A Green. 1984. Lan- guage of riddles: new perspectives. The Ohio State University Press.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Learning accurate, compact, and interpretable tree annotation", |
| "authors": [ |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| }, |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Barrett", |
| "suffix": "" |
| }, |
| { |
| "first": "Romain", |
| "middle": [], |
| "last": "Thibaux", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 21st International Conference on Computational Linguistics and the 44th annual meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "433--440", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Slav Petrov, Leon Barrett, Romain Thibaux, and Dan Klein. 2006. Learning accurate, compact, and inter- pretable tree annotation. In Proceedings of the 21st International Conference on Computational Lin- guistics and the 44th annual meeting of the Associa- tion for Computational Linguistics, pages 433-440. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "On the evaluation of semantic phenomena in neural machine translation using natural language inference", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Poliak", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "513--523", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Poliak, Yonatan Belinkov, James Glass, and Benjamin Van Durme. 2018a. On the evaluation of semantic phenomena in neural machine transla- tion using natural language inference. In Proceed- ings of the 2018 Conference of the North Ameri- can Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 2 (Short Papers), pages 513-523, New Orleans, Louisiana. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Hypothesis only baselines in natural language inference", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Poliak", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Naradowsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Aparajita", |
| "middle": [], |
| "last": "Haldar", |
| "suffix": "" |
| }, |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Rudinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics", |
| "volume": "", |
| "issue": "", |
| "pages": "180--191", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Poliak, Jason Naradowsky, Aparajita Haldar, Rachel Rudinger, and Benjamin Van Durme. 2018b. Hypothesis only baselines in natural language in- ference. In Proceedings of the Seventh Joint Con- ference on Lexical and Computational Semantics, pages 180-191, New Orleans, Louisiana. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "On the function of existential presupposition in discourse", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Ellen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Prince", |
| "suffix": "" |
| } |
| ], |
| "year": 1978, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellen F Prince. 1978. On the function of existential presupposition in discourse. In Papers from the...", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "From humor recognition to irony detection: The figurative language of social media", |
| "authors": [ |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Reyes", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| }, |
| { |
| "first": "Davide", |
| "middle": [], |
| "last": "Buscaldi", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Data & Knowledge Engineering", |
| "volume": "74", |
| "issue": "", |
| "pages": "1--12", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antonio Reyes, Paolo Rosso, and Davide Buscaldi. 2012. From humor recognition to irony detection: The figurative language of social media. Data & Knowledge Engineering, 74:1-12.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "Relation extraction with matrix factorization and universal schemas", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| }, |
| { |
| "first": "Limin", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [ |
| "M" |
| ], |
| "last": "Marlin", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "74--84", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Riedel, Limin Yao, Andrew McCallum, and Benjamin M. Marlin. 2013. Relation extraction with matrix factorization and universal schemas. In Pro- ceedings of the 2013 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 74-84, Atlanta, Georgia. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "Social bias in elicited natural language inferences", |
| "authors": [ |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Rudinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Chandler", |
| "middle": [], |
| "last": "May", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the First ACL Workshop on Ethics in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "74--79", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rachel Rudinger, Chandler May, and Benjamin Van Durme. 2017. Social bias in elicited natural lan- guage inferences. In Proceedings of the First ACL Workshop on Ethics in Natural Language Process- ing, pages 74-79, Valencia, Spain. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "Gender bias in coreference resolution", |
| "authors": [ |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Rudinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Naradowsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Leonard", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "2", |
| "issue": "", |
| "pages": "8--14", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rachel Rudinger, Jason Naradowsky, Brian Leonard, and Benjamin Van Durme. 2018a. Gender bias in coreference resolution. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 8-14, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Neural models of factuality", |
| "authors": [ |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Rudinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [ |
| "Steven" |
| ], |
| "last": "White", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "731--744", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rachel Rudinger, Aaron Steven White, and Benjamin Van Durme. 2018b. Neural models of factuality. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Compu- tational Linguistics: Human Language Technolo- gies, Volume 1 (Long Papers), pages 731-744, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "Relation alignment for textual entailment recognition", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Sammons", |
| "suffix": "" |
| }, |
| { |
| "first": "Vinod", |
| "middle": [], |
| "last": "Vg", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Vydiswaran", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Vieira", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Johri", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Vivek", |
| "middle": [], |
| "last": "Goldwasser", |
| "suffix": "" |
| }, |
| { |
| "first": "Gourab", |
| "middle": [], |
| "last": "Srikumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuancheng", |
| "middle": [], |
| "last": "Kundu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Tu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Small", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "TAC Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Sammons, VG Vinod Vydiswaran, Tim Vieira, Nikhil Johri, Ming-Wei Chang, Dan Goldwasser, Vivek Srikumar, Gourab Kundu, Yuancheng Tu, Kevin Small, et al. 2009. Relation alignment for tex- tual entailment recognition. In TAC Workshop.", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "Determining modality and factuality for text entailment", |
| "authors": [ |
| { |
| "first": "Roser", |
| "middle": [], |
| "last": "Sauri", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Semantic Computing, 2007. ICSC 2007. International Conference on", |
| "volume": "", |
| "issue": "", |
| "pages": "509--516", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roser Sauri and James Pustejovsky. 2007. Determin- ing modality and factuality for text entailment. In Semantic Computing, 2007. ICSC 2007. Interna- tional Conference on, pages 509-516. IEEE.", |
| "links": null |
| }, |
| "BIBREF61": { |
| "ref_id": "b61", |
| "title": "Open language learning for information extraction", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Schmitz", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Bart", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Soderland", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "523--534", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Schmitz, Robert Bart, Stephen Soderland, Oren Etzioni, et al. 2012. Open language learn- ing for information extraction. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 523-534. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF62": { |
| "ref_id": "b62", |
| "title": "Verbnet: A broadcoverage, comprehensive verb lexicon", |
| "authors": [ |
| { |
| "first": "Karin Kipper", |
| "middle": [], |
| "last": "Schuler", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karin Kipper Schuler. 2005. Verbnet: A broad- coverage, comprehensive verb lexicon.", |
| "links": null |
| }, |
| "BIBREF63": { |
| "ref_id": "b63", |
| "title": "Does string-based neural mt learn source syntax?", |
| "authors": [ |
| { |
| "first": "Xing", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Inkit", |
| "middle": [], |
| "last": "Padhi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1526--1534", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xing Shi, Inkit Padhi, and Kevin Knight. 2016. Does string-based neural mt learn source syntax? In Pro- ceedings of the 2016 Conference on Empirical Meth- ods in Natural Language Processing, pages 1526- 1534, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF64": { |
| "ref_id": "b64", |
| "title": "Sentiment analysis: an overview from linguistics", |
| "authors": [ |
| { |
| "first": "Maite", |
| "middle": [], |
| "last": "Taboada", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Annual Review of Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "325--347", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maite Taboada. 2016. Sentiment analysis: an overview from linguistics. Annual Review of Linguistics, 2:325-347.", |
| "links": null |
| }, |
| "BIBREF65": { |
| "ref_id": "b65", |
| "title": "Introduction to the conll-2003 shared task: Language-independent named entity recognition", |
| "authors": [ |
| { |
| "first": "Erik", |
| "middle": [ |
| "F" |
| ], |
| "last": "Tjong", |
| "suffix": "" |
| }, |
| { |
| "first": "Kim", |
| "middle": [], |
| "last": "Sang", |
| "suffix": "" |
| }, |
| { |
| "first": "Fien", |
| "middle": [], |
| "last": "De Meulder", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003", |
| "volume": "4", |
| "issue": "", |
| "pages": "142--147", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Erik F. Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the conll-2003 shared task: Language-independent named entity recognition. In Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003 -Volume 4, CONLL '03, pages 142-147, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF66": { |
| "ref_id": "b66", |
| "title": "Performance impact caused by hidden bias of training data for recognizing textual entailment", |
| "authors": [ |
| { |
| "first": "Masatoshi", |
| "middle": [], |
| "last": "Tsuchiya", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "11th International Conference on Language Resources and Evaluation (LREC2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Masatoshi Tsuchiya. 2018. Performance impact caused by hidden bias of training data for recog- nizing textual entailment. In 11th International Conference on Language Resources and Evaluation (LREC2018).", |
| "links": null |
| }, |
| "BIBREF67": { |
| "ref_id": "b67", |
| "title": "Extracting Implicit Knowledge from Text", |
| "authors": [ |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Benjamin Van Durme. 2010. Extracting Implicit Knowledge from Text. Ph.D. thesis, University of Rochester, Rochester, NY 14627.", |
| "links": null |
| }, |
| "BIBREF68": { |
| "ref_id": "b68", |
| "title": "What syntax can contribute in the entailment task", |
| "authors": [ |
| { |
| "first": "Lucy", |
| "middle": [], |
| "last": "Vanderwende", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "William B Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Machine Learning Challenges. Evaluating Predictive Uncertainty, Visual Object Classification, and Recognising Tectual Entailment", |
| "volume": "", |
| "issue": "", |
| "pages": "205--216", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lucy Vanderwende and William B Dolan. 2006. What syntax can contribute in the entailment task. In Machine Learning Challenges. Evaluating Predic- tive Uncertainty, Visual Object Classification, and Recognising Tectual Entailment, pages 205-216. Springer.", |
| "links": null |
| }, |
| "BIBREF69": { |
| "ref_id": "b69", |
| "title": "Microsoft research at rte-2: Syntactic contributions in the entailment task: an implementation", |
| "authors": [ |
| { |
| "first": "Lucy", |
| "middle": [], |
| "last": "Vanderwende", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Second PASCAL Challenges Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lucy Vanderwende, Arul Menezes, and Rion Snow. 2006. Microsoft research at rte-2: Syntactic con- tributions in the entailment task: an implementation. In Second PASCAL Challenges Workshop.", |
| "links": null |
| }, |
| "BIBREF70": { |
| "ref_id": "b70", |
| "title": "Glue: A multi-task benchmark and analysis platform for natural language understanding", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Amapreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel R", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1804.07461" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Wang, Amapreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. 2018. Glue: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461.", |
| "links": null |
| }, |
| "BIBREF71": { |
| "ref_id": "b71", |
| "title": "Inference is everything: Recasting semantic resources into a unified evaluation framework", |
| "authors": [ |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Steven White", |
| "suffix": "" |
| }, |
| { |
| "first": "Pushpendre", |
| "middle": [], |
| "last": "Rastogi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "996--1005", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aaron Steven White, Pushpendre Rastogi, Kevin Duh, and Benjamin Van Durme. 2017. Inference is ev- erything: Recasting semantic resources into a uni- fied evaluation framework. In Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 996-1005, Taipei, Taiwan. Asian Federation of Natural Language Processing.", |
| "links": null |
| }, |
| "BIBREF72": { |
| "ref_id": "b72", |
| "title": "A computational model of s-selection", |
| "authors": [ |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Steven White", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Rawlins", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Semantics and linguistic theory", |
| "volume": "26", |
| "issue": "", |
| "pages": "641--663", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aaron Steven White and Kyle Rawlins. 2016. A com- putational model of s-selection. In Semantics and linguistic theory, volume 26, pages 641-663.", |
| "links": null |
| }, |
| "BIBREF73": { |
| "ref_id": "b73", |
| "title": "The role of veridicality and factivity in clause selection", |
| "authors": [ |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Steven White", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Rawlins", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 48th Annual Meeting of the North East Linguistic Society", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aaron Steven White and Kyle Rawlins. 2018. The role of veridicality and factivity in clause selection. In Proceedings of the 48th Annual Meeting of the North East Linguistic Society, page to appear, Amherst, MA. GLSA Publications.", |
| "links": null |
| }, |
| "BIBREF74": { |
| "ref_id": "b74", |
| "title": "Annotating expressions of opinions and emotions in language. Language resources and evaluation", |
| "authors": [ |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "39", |
| "issue": "", |
| "pages": "165--210", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Janyce Wiebe, Theresa Wilson, and Claire Cardie. 2005. Annotating expressions of opinions and emo- tions in language. Language resources and evalua- tion, 39(2-3):165-210.", |
| "links": null |
| }, |
| "BIBREF75": { |
| "ref_id": "b75", |
| "title": "A broad-coverage challenge corpus for sentence understanding through inference", |
| "authors": [ |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Nangia", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel R", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1704.05426" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adina Williams, Nikita Nangia, and Samuel R Bow- man. 2017. A broad-coverage challenge corpus for sentence understanding through inference. arXiv preprint arXiv:1704.05426.", |
| "links": null |
| }, |
| "BIBREF76": { |
| "ref_id": "b76", |
| "title": "Recognizing strong and weak opinion clauses", |
| "authors": [ |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Hwa", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Computational intelligence", |
| "volume": "22", |
| "issue": "2", |
| "pages": "73--99", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Theresa Wilson, Janyce Wiebe, and Rebecca Hwa. 2006. Recognizing strong and weak opinion clauses. Computational intelligence, 22(2):73-99.", |
| "links": null |
| }, |
| "BIBREF77": { |
| "ref_id": "b77", |
| "title": "Humor recognition and humor anchor extraction", |
| "authors": [ |
| { |
| "first": "Diyi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2367--2376", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diyi Yang, Alon Lavie, Chris Dyer, and Eduard Hovy. 2015. Humor recognition and humor anchor ex- traction. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Pro- cessing, pages 2367-2376. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF78": { |
| "ref_id": "b78", |
| "title": "From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| }, |
| { |
| "first": "Alice", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Micah", |
| "middle": [], |
| "last": "Hodosh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hockenmaier", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "67--78", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. 2014. From image descriptions to visual denotations: New similarity metrics for se- mantic inference over event descriptions. Transac- tions of the Association for Computational Linguis- tics, 2:67-78.", |
| "links": null |
| }, |
| "BIBREF79": { |
| "ref_id": "b79", |
| "title": "Ordinal common-sense inference", |
| "authors": [ |
| { |
| "first": "Sheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Rudinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association of Computational Linguistics", |
| "volume": "5", |
| "issue": "1", |
| "pages": "379--395", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sheng Zhang, Rachel Rudinger, Kevin Duh, and Ben- jamin Van Durme. 2017. Ordinal common-sense in- ference. Transactions of the Association of Compu- tational Linguistics, 5(1):379-395.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "(4) a. That thing happened b. That thing may or may not have happened c. That thing didn't happen", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "num": null, |
| "text": "(5) a. Michael swatted the fly b. cause(E, Agent) c. Agent caused the E d. Michael caused the swatting", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "num": null, |
| "text": "Language (Puns) Figurative language demonstrates natural language's expressiveness and wide variations. Understanding and recognizing figurative language \"entail[s] cognitive capabilities to abstract and meta-represent meanings beyond physical words\"", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF3": { |
| "num": null, |
| "text": "(6) a. Name heard that Pun b. Name heard a pun c. Name did not hear a pun", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF4": { |
| "num": null, |
| "text": "(8) a. When asked about Item, Name said Review b. Name liked the Item c. Name did not like the Item", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF1": { |
| "num": null, |
| "type_str": "table", |
| "text": "Example sentence pairs for different semantic phenomena.", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF6": { |
| "num": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |