ACL-OCL / Base_JSON /prefixR /json /R15 /R15-1025.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "R15-1025",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T14:57:38.194485Z"
},
"title": "Weakly Supervised Definition Extraction *",
"authors": [
{
"first": "Luis",
"middle": [],
"last": "Espinosa-Anke",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Francesco",
"middle": [],
"last": "Ronzano",
"suffix": "",
"affiliation": {},
"email": "francesco.ronzano@upf.edu"
},
{
"first": "Horacio",
"middle": [],
"last": "Saggion",
"suffix": "",
"affiliation": {},
"email": "horacio.saggion@upf.edu"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Definition Extraction (DE) is the task to extract textual definitions from naturally occurring text. It is gaining popularity as a prior step for constructing taxonomies, ontologies, automatic glossaries or dictionary entries. These fields of application motivate greater interest in well-formed encyclopedic text from which to extract definitions, and therefore DE for academic or lay discourse has received less attention. In this paper we propose a weakly supervised bootstrapping approach for identifying textual definitions with higher linguistic variability than the classic encyclopedic genus-et-differentia definition, and take the domain of Natural Language Processing as a use case. We also introduce a novel set of features for DE and explore their relevance. Evaluation is carried out on two datasets that reflect opposed ways of expressing definitional knowledge.",
"pdf_parse": {
"paper_id": "R15-1025",
"_pdf_hash": "",
"abstract": [
{
"text": "Definition Extraction (DE) is the task to extract textual definitions from naturally occurring text. It is gaining popularity as a prior step for constructing taxonomies, ontologies, automatic glossaries or dictionary entries. These fields of application motivate greater interest in well-formed encyclopedic text from which to extract definitions, and therefore DE for academic or lay discourse has received less attention. In this paper we propose a weakly supervised bootstrapping approach for identifying textual definitions with higher linguistic variability than the classic encyclopedic genus-et-differentia definition, and take the domain of Natural Language Processing as a use case. We also introduce a novel set of features for DE and explore their relevance. Evaluation is carried out on two datasets that reflect opposed ways of expressing definitional knowledge.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Definition Extraction (DE) is the task to automatically extract textual definitions from text . It has received notorious attention for its potential application to glossary generation (Muresan and Klavans, 2002; Park et al., 2002) , terminological databases (Nakamura and Nagao, 1988) , question answering systems (Saggion and Gaizauskas, 2004; Cui et al., 2005) , for supporting terminological applications (Meyer, 2001; Sierra et al., 2006) , e-learning (Westerhout and Monachesi, 2007) , and more recently for multilingual paraphrase extraction (Yan et al., 2013) , ontology learning (Velardi et al., 2013) or hypernym discovery (Flati et al., 2014) .",
"cite_spans": [
{
"start": 185,
"end": 212,
"text": "(Muresan and Klavans, 2002;",
"ref_id": "BIBREF23"
},
{
"start": 213,
"end": 231,
"text": "Park et al., 2002)",
"ref_id": "BIBREF27"
},
{
"start": 259,
"end": 285,
"text": "(Nakamura and Nagao, 1988)",
"ref_id": "BIBREF24"
},
{
"start": 315,
"end": 345,
"text": "(Saggion and Gaizauskas, 2004;",
"ref_id": "BIBREF32"
},
{
"start": 346,
"end": 363,
"text": "Cui et al., 2005)",
"ref_id": "BIBREF7"
},
{
"start": 409,
"end": 422,
"text": "(Meyer, 2001;",
"ref_id": "BIBREF21"
},
{
"start": 423,
"end": 443,
"text": "Sierra et al., 2006)",
"ref_id": "BIBREF37"
},
{
"start": 457,
"end": 489,
"text": "(Westerhout and Monachesi, 2007)",
"ref_id": "BIBREF42"
},
{
"start": 549,
"end": 567,
"text": "(Yan et al., 2013)",
"ref_id": "BIBREF44"
},
{
"start": 588,
"end": 610,
"text": "(Velardi et al., 2013)",
"ref_id": "BIBREF41"
},
{
"start": 633,
"end": 653,
"text": "(Flati et al., 2014)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The corpora that have been used for evaluating DE systems are varied, although in general efforts have been greatly focused on academic and encyclopedic genres. Some prominent examples include German technical texts (Storrer and Wellinghoff, 2006) , the IULA Technical Corpus (in Spanish) (Alarc\u00f3n et al., 2009) , the ACL Anthology (Jin et al., 2013; Reiplinger et al., 2012) , the BNC corpus (Rodr\u00edguez, 2004) , Wikipedia , ensembles of domain glossaries and Web documents (Velardi et al., 2008) , or technical texts in various languages (Westerhout and Monachesi, 2007; Przepi\u00f3rkowski et al., 2007; Borg et al., 2009; Deg\u00f3rski et al., 2008; Del Gaudio et al., 2013) .",
"cite_spans": [
{
"start": 216,
"end": 247,
"text": "(Storrer and Wellinghoff, 2006)",
"ref_id": "BIBREF38"
},
{
"start": 289,
"end": 311,
"text": "(Alarc\u00f3n et al., 2009)",
"ref_id": "BIBREF0"
},
{
"start": 332,
"end": 350,
"text": "(Jin et al., 2013;",
"ref_id": "BIBREF17"
},
{
"start": 351,
"end": 375,
"text": "Reiplinger et al., 2012)",
"ref_id": "BIBREF30"
},
{
"start": 393,
"end": 410,
"text": "(Rodr\u00edguez, 2004)",
"ref_id": "BIBREF31"
},
{
"start": 474,
"end": 496,
"text": "(Velardi et al., 2008)",
"ref_id": "BIBREF40"
},
{
"start": 539,
"end": 571,
"text": "(Westerhout and Monachesi, 2007;",
"ref_id": "BIBREF42"
},
{
"start": 572,
"end": 600,
"text": "Przepi\u00f3rkowski et al., 2007;",
"ref_id": "BIBREF28"
},
{
"start": 601,
"end": 619,
"text": "Borg et al., 2009;",
"ref_id": "BIBREF4"
},
{
"start": 620,
"end": 642,
"text": "Deg\u00f3rski et al., 2008;",
"ref_id": "BIBREF9"
},
{
"start": 643,
"end": 667,
"text": "Del Gaudio et al., 2013)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We propose a DE approach which, from a starting set of encyclopedic definition seeds, self-trains iteratively and gradually fits its classification capability to a target domain-specific test set. Evaluation is carried out on two corpora: First, a set of 50 abstracts of papers in the field of NLP 1 . Here, the target term is defined in the first sentence, and additional information may appear in the form of \"syntactically plausible false definitions\", i.e. sentences where the target term is also present, relevant information is provided, but do not constitute a definition . Second, the W00 corpus (Jin et al., 2013) , a subset of the ACL Anthology manually annotated with definitions, and which includes highly variable definitions both in terms of content and syntax. We achieve competitive results in both corpora.",
"cite_spans": [
{
"start": 604,
"end": 622,
"text": "(Jin et al., 2013)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The main contributions of our paper are: (1) A set of experiments demonstrating the soundness of our approach for DE in two different linguistic registers;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "(2) A novel set of features and an exploration of their influence in the learning process; and (3) A small, focused benchmarking dataset for DE evaluation in the NLP domain.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The remainder of this paper is structured as follows: Section 2 reviews prominent work in DE; Section 3 provides a detailed description of the datasets used; Section 4 presents the features used in our classification procedure and describes the bootstrapping algorithm; Section 5 shows the performance of our approach; Section 6 lists the best features at important iterations and discusses these findings; and finally Section 7 summarizes the main ideas contained in this paper and outlines potential directions for future work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Definitions are a well-studied topic, which traces back to the Aristotelian genus et differentia model of a definition, where the defined term (definiendum) is described by mentioning its immediate superordinate, usually a hypernym (genus), and the cluster of words that differentiate such definiendum from others of its class (definiens). Furthermore, additional research has elaborated on different criteria to take into consideration when deciding what is a definition: either by looking at their degree of formality (Trimble, 1985) , the extent to which they are specific to an instance of an object or to the object itself (Sepp\u00e4l\u00e4, 2009) , the semantic relations holding between definiendum and concepts included in the definiens (Alarc\u00f3n et al., 2009; Schumann, 2011) , the fitness of a definition for target users (Bergenholtz and Tarp, 2003; Fuertes-Olivera, 2010) or their stylistic and domain features (Velardi et al., 2008) . In this work we elaborate on some ideas from the latter, especially on their domain and stylistic filters, which motivated the design of statistically-motivated features to describe a word's salience in terms of definitional knowledge (cf. Section 4).",
"cite_spans": [
{
"start": 520,
"end": 535,
"text": "(Trimble, 1985)",
"ref_id": "BIBREF39"
},
{
"start": 628,
"end": 643,
"text": "(Sepp\u00e4l\u00e4, 2009)",
"ref_id": "BIBREF36"
},
{
"start": 736,
"end": 758,
"text": "(Alarc\u00f3n et al., 2009;",
"ref_id": "BIBREF0"
},
{
"start": 759,
"end": 774,
"text": "Schumann, 2011)",
"ref_id": "BIBREF35"
},
{
"start": 822,
"end": 850,
"text": "(Bergenholtz and Tarp, 2003;",
"ref_id": "BIBREF1"
},
{
"start": 851,
"end": 873,
"text": "Fuertes-Olivera, 2010)",
"ref_id": "BIBREF16"
},
{
"start": 913,
"end": 935,
"text": "(Velardi et al., 2008)",
"ref_id": "BIBREF40"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Background",
"sec_num": "2"
},
{
"text": "Regarding DE, the earliest attempts focused on lexico-syntactic pattern-matching, either by looking at cue verbs (Rebeyrolle and Tanguy, 2000; Saggion and Gaizauskas, 2004; Sarmento et al., 2006; Storrer and Wellinghoff, 2006) , or other features like punctuation or layout (Muresan and Klavans, 2002; Malais\u00e9 et al., 2004; S\u00e1nchez and M\u00e1rquez, 2005; Przepi\u00f3rkowski et al., 2007; Monachesi and Westerhout, 2008) . As for supervised settings, let us refer to , who propose a generalization of word lattices for identifying definitional components and ultimately identifying definitional text fragments. Finally, more complex morphosyntactic patterns were used by (Boella et al., 2014) , who model single tokens as relations over the sentence syntactic dependencies.",
"cite_spans": [
{
"start": 113,
"end": 142,
"text": "(Rebeyrolle and Tanguy, 2000;",
"ref_id": "BIBREF29"
},
{
"start": 143,
"end": 172,
"text": "Saggion and Gaizauskas, 2004;",
"ref_id": "BIBREF32"
},
{
"start": 173,
"end": 195,
"text": "Sarmento et al., 2006;",
"ref_id": "BIBREF34"
},
{
"start": 196,
"end": 226,
"text": "Storrer and Wellinghoff, 2006)",
"ref_id": "BIBREF38"
},
{
"start": 274,
"end": 301,
"text": "(Muresan and Klavans, 2002;",
"ref_id": "BIBREF23"
},
{
"start": 302,
"end": 323,
"text": "Malais\u00e9 et al., 2004;",
"ref_id": "BIBREF20"
},
{
"start": 324,
"end": 350,
"text": "S\u00e1nchez and M\u00e1rquez, 2005;",
"ref_id": "BIBREF33"
},
{
"start": 351,
"end": 379,
"text": "Przepi\u00f3rkowski et al., 2007;",
"ref_id": "BIBREF28"
},
{
"start": 380,
"end": 411,
"text": "Monachesi and Westerhout, 2008)",
"ref_id": "BIBREF22"
},
{
"start": 662,
"end": 683,
"text": "(Boella et al., 2014)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Background",
"sec_num": "2"
},
{
"text": "We refer now to unsupervised approaches to DE. (Reiplinger et al., 2012) benefit from hand crafted definitional patterns. Starting from a set of seed terms and patterns, term/definition pairs are iteratively acquired, together with bootstrapped new patterns. These are obtained via a generalization approach over part-of-speech and term wildcards. Additionally, two interconnected works are (De Benedictis et al., 2013) and , in that both bootstrap the web for acquiring large multilingual domain glossaries starting with a few seeds for term and gloss. While both systems behave similarly in extracting glosses and learning new patterns by exploiting html tags, they are substantially different in how acquired glosses are ranked. Specifically, the former exploits the bag-of-words representation of each extracted gloss and its intersection with the domain terminology, while the latter leverages Probabilistic Topic Models (PTM) by estimating the probability of words and term/gloss pairs to be pertinent to the domain.",
"cite_spans": [
{
"start": 47,
"end": 72,
"text": "(Reiplinger et al., 2012)",
"ref_id": "BIBREF30"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Background",
"sec_num": "2"
},
{
"text": "Our weakly supervised DE approach requires: (1) A general-domain (encyclopedic) set of seeds of textual definitions (T S) and (2) A domain-specific development set, e.g. a collection of papers (DS).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3"
},
{
"text": "For our experiments, we use as T S the WCL Corpus , a subset of Wikipedia manually annotated with definitions and hypernyms. This dataset is constructed under the intuition that the first sentence of a Wikipedia article constitutes its textual definition. It is important to highlight that, while this dataset includes semantic information manually annotated such as definiendum or hypernym, we do not exploit any of it, which makes the seed-construction step highly flexible as it only requires the sentence definition/non-definition class. We use as DS a subset of the ACL ARC corpus (Bird et al., 2008) , processed with ParsCit (Councill et al., 2008) . In this dataset, a well-formedness confidence score is given to each sentence (as these come from pdf parsing and noise is introduced in the process). We exploit this information and keep 500k sentences with a score of over .95.",
"cite_spans": [
{
"start": 586,
"end": 605,
"text": "(Bird et al., 2008)",
"ref_id": "BIBREF2"
},
{
"start": 631,
"end": 654,
"text": "(Councill et al., 2008)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3"
},
{
"text": "For evaluation, we use two datasets: The MSR-NLP 2 and the W00 corpus. The MSR-NLP is a manually constructed small list of 50 abstracts in the NLP field, amounting to 304 sentences: 49 definitions and 255 non-definitions. They are extracted from the Microsoft Academic Research website 3 , where abstracts including a definition provide a \"Definition Context\" section. This small dataset complies with the stylistic requirements of academic abstract writing, i.e. the use of well-developed, unified, coherent and concise language, and understandability to a wide audience 4 . A different register can be found in the W00 dataset, which includes many definitional sentences that are highly domain-specific, sometimes including the definition of a very specific concept, and showing higher linguistic variability (e.g. the definiendum might not appear at the beginning of the sentence, and unlike most abstracts, citations might be present). We illustrate this difference with two sentences containing a definition from the MSR-NLP (1) and the W00 (2) corpora:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3"
},
{
"text": "(1) The Hidden Markov Model (HMM) is a probabilistic model used widely in the fields of Bioinformatics and Speech Recognition .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3"
},
{
"text": "(2) This corpus is collected and annotated for the GNOME project (Poesio, 2000) , which aims at developing general algorithms for generating nominal expressions",
"cite_spans": [
{
"start": 65,
"end": 79,
"text": "(Poesio, 2000)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3"
},
{
"text": "Note that in the case of (2), only the sequence \"GNOME project aims at developing general algorithms for generating nominal expressions\" is labelled as definition in the original dataset. In this work a definitional sentence is generalized as being or containing a definition, which enables casting the task as a sentence-classification problem, which is common practice in DE Boella et al., 2014; Espinosa-Anke and Saggion, 2014) .",
"cite_spans": [
{
"start": 377,
"end": 397,
"text": "Boella et al., 2014;",
"ref_id": "BIBREF3"
},
{
"start": 398,
"end": 430,
"text": "Espinosa-Anke and Saggion, 2014)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3"
},
{
"text": "Intuitively, we would expect a general-purpose DE system to be more likely to label sentence (1), as it includes the required elements for a canonical genus-et-differentia definition. This motivates our experiments, where we attempt to fit a model iteratively to be able to perform better in sentences like (2).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3"
},
{
"text": "As mentioned in Section 3, we approach the DE task as a sentence classification problem, where a sentence can be either a definition (def ) or not (nodef ). However, instead of modelling sentence-level features like sentence length or depth of the parse tree, we rather encode word-level features in order to exploit individual items' characteristics in terms of position within the sentence, frequency or relevance in a definition corpus. These word-level features are used for classifying each word in a sentence (def |nodef ).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Modelling the Data",
"sec_num": "4"
},
{
"text": "We adopt two extraction strategies depending on whether we operate over DS or any of the two evaluation corpora (MSR-NLP and W00). In the case DS, the goal is to extract complete high-quality definitional and non-definitional sentences. Therefore, we only consider as potential candidates for bootstrapping those sentences where all the words have the same label (i.e. discarding, for example, a 10word sentence where nine are tagged as def and one as nodef ). This is in fact the most frequent case by a large margin, so we are confident that there are very few potentially relevant sentences being left out. Since evaluation is carried out at word level, this constraint does not apply.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Modelling the Data",
"sec_num": "4"
},
{
"text": "We exploit the potential of the Conditional Random Fields 5 algorithm (Lafferty et al., 2001) to encode prior and posterior contextual information of a given element in a sequence (in our case, a word in a sentence). Specifically, we consider a context window of [-2,2] . For each word, we generate a feature vector consisting on the following features: 1. sur: Surface form of the current token without stemming.",
"cite_spans": [
{
"start": 70,
"end": 93,
"text": "(Lafferty et al., 2001)",
"ref_id": "BIBREF19"
},
{
"start": 263,
"end": 269,
"text": "[-2,2]",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Modelling the Data",
"sec_num": "4"
},
{
"text": "2. lem: Lemma of the current token.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Modelling the Data",
"sec_num": "4"
},
{
"text": "3. pos: Part-of-speech of the current token.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Modelling the Data",
"sec_num": "4"
},
{
"text": "4. bio-np: Whether the current word is at the beginning (B), inside (I) or outside (O) a noun phrase. Noun phrases are obtained with the following regular expression over part-of-speech tags: [JN]*N.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Modelling the Data",
"sec_num": "4"
},
{
"text": "Dependency relation between the current token and its head.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "dep:",
"sec_num": "5."
},
{
"text": "6. head-id: The index of the head-word (or governor) in the syntactic dependency tree.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "dep:",
"sec_num": "5."
},
{
"text": "7. bio-def: An extension of the bio-np feature that also takes into account the definition-wise position. We perform this na\u00efvely by finding the first verb of the sentence, and tagging all words before it as definiendum and the rest as definiens. We illustrate this feature below, where each word's NP-chunking comes from the bio-np feature, D refers to definiendum and d refers to definiens.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "dep:",
"sec_num": "5."
},
{
"text": "The o-D Abwehr b-D was o-d a o-d German b-d intelligence i-d organization i-d from o-d 1921 o-d to o-d 1944 o-d . 8",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "dep:",
"sec_num": "5."
},
{
"text": ". termhood: This metric determines the importance of a candidate token to be a terminological unit by looking at its frequency in general and domain-specific corpora (Kit and Liu, 2008) . It is obtained as follows:",
"cite_spans": [
{
"start": 166,
"end": 185,
"text": "(Kit and Liu, 2008)",
"ref_id": "BIBREF18"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "dep:",
"sec_num": "5."
},
{
"text": "Termhood(w) = r D (w) |V D | \u2212 r B (w) |V B | 5",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "dep:",
"sec_num": "5."
},
{
"text": "We use the CRF++ toolkit: http://crfpp.googlecode.com/svn/trunk/doc/index.html Where r D is the frequency-wise ranking of word w in a domain corpus (in our case, T S), and r B is the frequency-wise ranking of such word in a general corpus, namely the Brown corpus (Francis and Kucera, 1979) . Denominators refer to the token-level size of each corpus. If word w only appears in the general corpus, we set the value of Termhood(w) to \u2212\u221e, and to \u221e in the opposite case. 9. tf-gen: Frequency of the current word in the general-domain corpus r B (Brown Corpus).",
"cite_spans": [
{
"start": 264,
"end": 290,
"text": "(Francis and Kucera, 1979)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "dep:",
"sec_num": "5."
},
{
"text": "10. tf-dom: Frequency of the current word in the domain-specific corpus r D (T S).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "dep:",
"sec_num": "5."
},
{
"text": "11. tfidf: Tf-idf of the current word over the training set, where each sentence is considered a separate document.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "dep:",
"sec_num": "5."
},
{
"text": "12. def prom: We introduce the notion of Definitional Prominence aiming at establishing the probability of a word w to appear in a definitional sentence (s = def ). For this, we consider its frequency in definitions and nondefinitions in the T S as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "dep:",
"sec_num": "5."
},
{
"text": "DefProm(w) = DF |Defs| \u2212 NF |Nodefs| where DF = i=n i=0 (s i = def \u2227w \u2208 s i ) and NF = i=n i=0 (s i = nodef \u2227w \u2208 s i ).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "dep:",
"sec_num": "5."
},
{
"text": "Similarly as with the termhood feature, in cases where a word w is only found in definitional sentences, we set the DefProm(w) value to \u221e, and to \u2212\u221e if it was only seen in non-definitional sentences.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "dep:",
"sec_num": "5."
},
{
"text": "Prominence in order to model our intuition that a word appearing more often in position of potential definiendum might reveal its role as a definitional keyword. This feature is computed as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "D prom: We also introduce Definiendum",
"sec_num": "13."
},
{
"text": "DP(w) = i=n i=0 w i \u2208 term D |DT |",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "D prom: We also introduce Definiendum",
"sec_num": "13."
},
{
"text": "where term D is a noun phrase (i.e. a term candidate) appearing in potential definiendum po-sition and |DT| refers to the size of the candidate term corpus in candidate definienda position.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "D prom: We also introduce Definiendum",
"sec_num": "13."
},
{
"text": "Similarly computed as D prom, but considering position of potential definiens.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "d prom:",
"sec_num": "14."
},
{
"text": "As noted in Section 3, the initial T S consists of the WCL dataset, which makes our model suitable for DE in well-formed encyclopedic texts. However, our hypothesis that it would perform poorly in a linguistically more complex setting (e.g. in a corpus like the W00 dataset) is confirmed by the results at iteration 1 (see Table 1 ). Our bootstrapping approach is aimed at gradually obtaining a better fit model for W00, starting from our generic baseline trained exclusively on the WCL corpus. The following description of our approach is summarized in Algorithm 1. As mentioned above, T S is a manually labelled dataset where each sentence s \u2208 S is given a label d \u2208 D = {def, nodef }. Likewise, DS is an unlabelled subset of the ACL-ARC corpus, which amounts to 500k sentences. The first step is to initialize (1) The training set vocabulary V , which simply contains all the words in T S; and (2) The feature set F associated to each word w \u2208 V . Then, for each iteration until we reach 200, the algorithm extracts the best-scoring sentences as predicted by our CRF-based classififer (recall that only sentences where all words are assigned the same label are considered) for both labels def and nodef (s and s respectively), and uses them to increase the initial feature set and vocabulary. Next, it removes s and s from DS, trains and evaluates a model on both the MSR-NLP and the W00 datasets, and repeats until it reaches our manually set end point: iteration 200th.",
"cite_spans": [],
"ref_spans": [
{
"start": 323,
"end": 330,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Bootstrapping",
"sec_num": "4.1"
},
{
"text": "One important aspect to consider is that increasing the size of the training data does not have an effect of the features associated to a word. Incorporating definitions having concepts related to the target domain (NLP in our case) is a step forward, but their definitional salience (expressed by def prom, D prom and d prom) remains the same, as they were calculated before firing the bootstrapping algorithm. For this reason, we include a feature update step at iteration 100, our sole motivation being that, for evaluation purposes, we will have the same number of iterations before and after such step. It consists in resetting F to \u2205 and recalculating it. We hypothesize that the new feature values can reflect better the linguistic idiosyncrasies of a domain-specific definitional corpus. After 200 iterations, our bootstrapped dataset T S boot includes the original training data and 400 new sentences: 200 definitions and 200 nondefinitions.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bootstrapping",
"sec_num": "4.1"
},
{
"text": "As the bootstrapping process advances, s and s show greater linguistic variability because the training data includes more non-canonical definitions (Table 1) . V := {w :",
"cite_spans": [],
"ref_spans": [
{
"start": 149,
"end": 158,
"text": "(Table 1)",
"ref_id": null
}
],
"eq_spans": [],
"section": "Bootstrapping",
"sec_num": "4.1"
},
{
"text": "\u2203 (s, d) \u2208 T S \u2227 w \u2208 s} F := {f T S (w) : w \u2208 V } 1: for i = 0, i < 200, i + + do s = argmax s\u2208DS P (s = def ) s = argmax s\u2208DS P (s = nodef ) 2: for w \u2208 s \u222a s do 3: if w / \u2208 V then F = F \u222a {f T S (w)} V = V \u222a {w} 4:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bootstrapping",
"sec_num": "4.1"
},
{
"text": "end if 5:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bootstrapping",
"sec_num": "4.1"
},
{
"text": "end for T S = T S \u222a {(s , def ) , (s , nodef )} DS = DS \\ {(s , def ) , (s , nodef )} 6: if i = 100 then F = \u2205 7: for w \u2208 V do F = F \u222a {f T S (w)} 8:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bootstrapping",
"sec_num": "4.1"
},
{
"text": "end for 9: Table 1 : Definitions extracted throughout the bootstrapping process from the ACL ARC corpus and P/R/F results at that iteration on the two evaluation corpora (without post-classification heuristics). Note the gradual increase in syntactic and terminological variability in the extracted definitions.",
"cite_spans": [],
"ref_spans": [
{
"start": 11,
"end": 18,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Bootstrapping",
"sec_num": "4.1"
},
{
"text": "end if model i = trainM odel (T S i , F i ) evaluateM odel (model i , {MSR-NLP,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bootstrapping",
"sec_num": "4.1"
},
{
"text": "2009). It consists in a set of rules for labelswitching aimed at increasing the recall and ideally without hurting precision significantly. Let w i be a word classified as not being part of a definition (nodef ) at iteration i, we can rectify its class (w new i ) to being part of a definition (def ) as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bootstrapping",
"sec_num": "4.1"
},
{
"text": "w new i = def if P (w i ) = def > \u03b8 def if P (w i ) = nodef < \u03bb, w syn i = P",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bootstrapping",
"sec_num": "4.1"
},
{
"text": "Where w syn i refers to the dependency relation of the word examined at iteration i, and P is the predicative syntactic function of the word.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bootstrapping",
"sec_num": "4.1"
},
{
"text": "Our goal is to increase the number of def words in a sentence in cases where they were discarded by a small margin. We hypothesize that this could be particularly useful in \"borderline\" cases (some words classified in a sentence as def, some as nodef ), where this heuristics helps our algorithm to make a decision always favouring definition labelling over non-definition. As for the constants, \u03b8 and \u03bb are empirically set to .35 and .8 respectively after experimenting with several thresholds and inspecting manually the resulting classification.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bootstrapping",
"sec_num": "4.1"
},
{
"text": "We evaluate the performance of our approach at each iteration on both datasets (MSR-NLP and W00) using the classic Precision, Recall and F-Measure scores. All the scores reported in this article are at word-level.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5"
},
{
"text": "The learning curves shown in Figure 1 demonstrate that our approach is suitable for fitting a model to a domain-specific dataset starting from generalpurpose encyclopedic seeds. Unsurprisingly, performance on the MSR-NLP corpus drops soon after reaching its peak due to the fact that the training set gradually becomes less standard. Interestingly, the feature-update step has a dramatic influence in performance in both corpora: On one hand, the performance peak in a dataset with less linguistic variability (MSR-NLP) is reached early, and after iteration 100, where the feature update step occurs, Precision decreases, while Recall remains the same. On the other hand, the numbers in the W00 dataset are fairly stable until iteration 100, where a significant improvement in both Precision and Recall is achieved.",
"cite_spans": [],
"ref_spans": [
{
"start": 29,
"end": 37,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5"
},
{
"text": "Let us look first at the results without applying recall-boosting post-classification heuristics: The performance of our models decreases in the MSR-NLP corpus after a few iterations (our best model is reached at iteration 23, where F=76.23), and this situation is unsurprisingly aggravated by the feature update step. However, our results improve significantly in the W00 dataset 6 after feature updating. Our best-performing model reaches F=70.72 at iteration 198.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5"
},
{
"text": "Moreover, we observed a minor improvement after incorporating the label-switching heuristics in both corpora. Specifically, for the MSR-NLP corpus the improvement was from the aforementioned F=76.34 to F=77.46, while in the W00 dataset, it improved from F=70.72 to F=71.85. Tables 2 and 3 show Precision, Recall and F-Score for our best models in both datasets.",
"cite_spans": [],
"ref_spans": [
{
"start": 274,
"end": 289,
"text": "Tables 2 and 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5"
},
{
"text": "These numbers confirm that we are able to generate a domain and genre-sensitive model provided we have a development set available of similar characteristics. The discrepancy in terms of performance as the bootstrapping algorithm advances is an indicator that the models we obtain become more tailored towards the specific corpus, and therefore less apt for performing well in the encyclopedic genre. Our approach seems suitable for partially alleviating the lack of manually labelled domain-specific data in the DE field.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5"
},
{
"text": "Let us also refer to the importance of having a development set as close as possible to the target corpus in terms of register and domain, and with a reasonable level of quality. In relation to this, we also performed experiments with a development set automatically constructed from the Web, but due to lack of preprocessing for noise filtering, results were unsatisfactory and therefore unreported in this paper.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5"
},
{
"text": "As for comparative evaluation, we cannot contrast our results directly with the ones reported in (Jin et 6 Note that since the W00 corpus is also a subset of the ACL ARC dataset, we first confirmed that it did not overlap with our dev-set. Table 3 : Best results for both the MSR-NLP dataset before (Pre-PCH) and after (Post-PCH) applying the post-classification heuristics.",
"cite_spans": [
{
"start": 97,
"end": 106,
"text": "(Jin et 6",
"ref_id": null
}
],
"ref_spans": [
{
"start": 240,
"end": 247,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5"
},
{
"text": "al., 2013), since while in both cases word-level evaluation is carried out, in our case we generalized all the words inside a sentence containing a definition to the label def. In addition, as it is pointed out in (Jin et al., 2013) , only in (Reiplinger et al., 2012) there is an attempt to extract definitions from the ACL ARC corpus, but their evaluation relies on human judgement, and their reported coverage refers to a pre-defined list of terms. In general, the results reported in this article are consistent with the ones obtained in previous work for similar tasks. For instance, prior experiments on the WCL dataset showed results ranging from F=54.42 to F=75.16 Boella et al., 2014) . In the case of the W00 dataset, (Jin et al., 2013) reported numbers between F=40 and F=56 for different configurations. Since the availability of manually labelled gold standard is scarce, other authors evaluated Glossary/Definition Extraction systems in terms of manually assessed precision (Reiplinger et al., 2012; De Benedictis et al., 2013) .",
"cite_spans": [
{
"start": 214,
"end": 232,
"text": "(Jin et al., 2013)",
"ref_id": "BIBREF17"
},
{
"start": 243,
"end": 268,
"text": "(Reiplinger et al., 2012)",
"ref_id": "BIBREF30"
},
{
"start": 673,
"end": 693,
"text": "Boella et al., 2014)",
"ref_id": "BIBREF3"
},
{
"start": 728,
"end": 746,
"text": "(Jin et al., 2013)",
"ref_id": "BIBREF17"
},
{
"start": 988,
"end": 1013,
"text": "(Reiplinger et al., 2012;",
"ref_id": "BIBREF30"
},
{
"start": 1014,
"end": 1041,
"text": "De Benedictis et al., 2013)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5"
},
{
"text": "In order to understand the discriminative power of the features designed for our experiments, we computed Information Gain, which measures the decrease in entropy when the feature is present vs. ab- sent (Forman, 2003) , using the Weka toolkit (Witten and Frank, 2005) . We did this for the original training set T S and the training set resulting at iteration 200 T S boot . Then, we captured the top 30 features in T S boot , and averaged their Information Gain score over all the available contexts. Finally, we compare these features in both datasets T S and T S boot (see Figure 2 ).",
"cite_spans": [
{
"start": 204,
"end": 218,
"text": "(Forman, 2003)",
"ref_id": "BIBREF14"
},
{
"start": 244,
"end": 268,
"text": "(Witten and Frank, 2005)",
"ref_id": "BIBREF43"
}
],
"ref_spans": [
{
"start": 577,
"end": 585,
"text": "Figure 2",
"ref_id": "FIGREF3"
}
],
"eq_spans": [],
"section": "Feature Analysis",
"sec_num": "6"
},
{
"text": "We observe an improvement of definitionallymotivated features after iteration 100, which combined with the gradual improvement in performance in the W00 dataset, suggests that def prom and d prom contribute decisively to domain-specific DE, while D prom proved less relevant. Note that in our setting, we do not focus in term/definition pairs, but rather a full-sentence definition. Therefore, we do not know a priori which term is the definiendum, and thus we do not perform a generalization step to convert it to a wildcard, which is common practice in the DE literature Reiplinger et al., 2012; Jin et al., 2013; Boella et al., 2014) . This provokes high sparsity in D prom and we hypothesize that this may be the reason for this feature to not gain predictive power after many iterations or the feature update step. ",
"cite_spans": [
{
"start": 573,
"end": 597,
"text": "Reiplinger et al., 2012;",
"ref_id": "BIBREF30"
},
{
"start": 598,
"end": 615,
"text": "Jin et al., 2013;",
"ref_id": "BIBREF17"
},
{
"start": 616,
"end": 636,
"text": "Boella et al., 2014)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Feature Analysis",
"sec_num": "6"
},
{
"text": "We have presented a weakly supervised DE approach that gradually increments the size of the training set with high quality definitions and clear examples of non-definitions. Two main conclusions can be drawn: (1) The definition-aware features we introduce show, in general, high informativeness for the task of DE; and (2) Our approach is valid for generating genre and domain specific training data capable of fitting corpora, even though this differs greatly in terms of content and register from the encyclopedic genre.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "7"
},
{
"text": "In addition, a small and focused benchmarking dataset of real-world definitions in the NLP domain has been released, which can be used both for linguistic and stylistic purposes and for evaluating DE systems.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "7"
},
{
"text": "These results motivate us to extend our experiments to several domains and textual genres, and to perform a longer iterative cycle where feature update is carried out more frequently. We believe that another interesting avenue for future work is multilingual definition extraction, which could benefit significantly from existing multilingual semantic networks and knowledge bases.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "7"
},
{
"text": "Henceforth, we refer to this corpus as the MSR-NLP dataset.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Available at http://www.taln.upf.edu/MSR-NLP RANLP20153 http://academic.research.microsoft.com/ 4 http://www.cameron.edu/\u02dccarolynk/Abstracts.html",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Description and evaluation of a definition extraction system for spanish language",
"authors": [
{
"first": "Rodrigo",
"middle": [],
"last": "Alarc\u00f3n",
"suffix": ""
},
{
"first": "Gerardo",
"middle": [],
"last": "Sierra",
"suffix": ""
},
{
"first": "Carme",
"middle": [],
"last": "Bach",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the 1st Workshop on Definition Extraction, WDE '09",
"volume": "",
"issue": "",
"pages": "7--13",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Rodrigo Alarc\u00f3n, Gerardo Sierra, and Carme Bach. 2009. Description and evaluation of a definition ex- traction system for spanish language. In Proceedings of the 1st Workshop on Definition Extraction, WDE '09, pages 7-13, Stroudsburg, PA, USA. Association for Computational Linguistics.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Two opposing theories: On h.e. wiegand's recent discovery of lexicographic functions",
"authors": [
{
"first": "Henning",
"middle": [],
"last": "Bergenholtz",
"suffix": ""
},
{
"first": "Sven",
"middle": [],
"last": "Tarp",
"suffix": ""
}
],
"year": 2003,
"venue": "Hermes, Journal of Linguistics",
"volume": "",
"issue": "",
"pages": "171--196",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Henning Bergenholtz and Sven Tarp. 2003. Two op- posing theories: On h.e. wiegand's recent discovery of lexicographic functions. Hermes, Journal of Linguis- tics, 3-1:171-196.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "The acl anthology reference corpus: A reference dataset for bibliographic research in computational linguistics",
"authors": [
{
"first": "Steven",
"middle": [],
"last": "Bird",
"suffix": ""
},
{
"first": "Robert",
"middle": [],
"last": "Dale",
"suffix": ""
},
{
"first": "Bonnie",
"middle": [],
"last": "Dorr",
"suffix": ""
},
{
"first": "Bryan",
"middle": [],
"last": "Gibson",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Joseph",
"suffix": ""
},
{
"first": "Min-Yen",
"middle": [],
"last": "Kan",
"suffix": ""
},
{
"first": "Dongwon",
"middle": [],
"last": "Lee",
"suffix": ""
},
{
"first": "Brett",
"middle": [],
"last": "Powley",
"suffix": ""
},
{
"first": "Dragomir",
"middle": [],
"last": "Radev",
"suffix": ""
},
{
"first": "Yee",
"middle": [
"Fan"
],
"last": "Tan",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC-08), Marrakech",
"volume": "",
"issue": "",
"pages": "8--1005",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Steven Bird, Robert Dale, Bonnie Dorr, Bryan Gibson, Mark Joseph, Min-Yen Kan, Dongwon Lee, Brett Powley, Dragomir Radev, and Yee Fan Tan. 2008. The acl anthology reference corpus: A reference dataset for bibliographic research in computational linguistics. In Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC-08), Mar- rakech, Morocco, May. European Language Resources Association (ELRA). ACL Anthology Identifier: L08- 1005.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Learning from syntax generalizations for automatic semantic annotation",
"authors": [
{
"first": "Guido",
"middle": [],
"last": "Boella",
"suffix": ""
},
{
"first": "Luigi",
"middle": [
"Di"
],
"last": "Caro",
"suffix": ""
},
{
"first": "Alice",
"middle": [],
"last": "Ruggeri",
"suffix": ""
},
{
"first": "Livio",
"middle": [],
"last": "Robaldo",
"suffix": ""
}
],
"year": 2014,
"venue": "Journal of Intelligent Information Systems",
"volume": "",
"issue": "",
"pages": "1--16",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Guido Boella, Luigi Di Caro, Alice Ruggeri, and Livio Robaldo. 2014. Learning from syntax generalizations for automatic semantic annotation. Journal of Intelli- gent Information Systems, pages 1-16.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Evolutionary algorithms for definition extraction",
"authors": [
{
"first": "Claudia",
"middle": [],
"last": "Borg",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Rosner",
"suffix": ""
},
{
"first": "Gordon",
"middle": [],
"last": "Pace",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the 1st Workshop in Definition Extraction",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Claudia Borg, Michael Rosner, and Gordon Pace. 2009. Evolutionary algorithms for definition extraction. In Proceedings of the 1st Workshop in Definition Extrac- tion.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Named entity recognition in italian using crf",
"authors": [
{
"first": "Peng",
"middle": [],
"last": "Cai",
"suffix": ""
},
{
"first": "Hangzai",
"middle": [],
"last": "Luo",
"suffix": ""
},
{
"first": "Aoying",
"middle": [],
"last": "Zhou",
"suffix": ""
}
],
"year": 2009,
"venue": "EVALITA",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Peng Cai, HangZai Luo, and AoYing Zhou. 2009. Named entity recognition in italian using crf. In EVALITA.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Parscit: an open-source crf reference string parsing package",
"authors": [
{
"first": "Lee",
"middle": [],
"last": "Isaac G Councill",
"suffix": ""
},
{
"first": "Min-Yen",
"middle": [],
"last": "Giles",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Kan",
"suffix": ""
}
],
"year": 2008,
"venue": "LREC",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Isaac G Councill, C Lee Giles, and Min-Yen Kan. 2008. Parscit: an open-source crf reference string parsing package. In LREC.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Generic soft pattern models for definitional question answering",
"authors": [
{
"first": "Hang",
"middle": [],
"last": "Cui",
"suffix": ""
},
{
"first": "Min-Yen",
"middle": [],
"last": "Kan",
"suffix": ""
},
{
"first": "Tat-Seng",
"middle": [],
"last": "Chua",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the 28th annual international ACM SIGIR conference on Research and development in information retrieval",
"volume": "",
"issue": "",
"pages": "384--391",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hang Cui, Min-Yen Kan, and Tat-Seng Chua. 2005. Generic soft pattern models for definitional question answering. In Proceedings of the 28th annual in- ternational ACM SIGIR conference on Research and development in information retrieval, pages 384-391. ACM.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Glossboot: Bootstrapping multilingual domain glossaries from the web",
"authors": [
{
"first": "Stefano",
"middle": [],
"last": "Flavio De Benedictis",
"suffix": ""
},
{
"first": "Roberto",
"middle": [],
"last": "Faralli",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Navigli",
"suffix": ""
}
],
"year": 2013,
"venue": "ACL (1)",
"volume": "",
"issue": "",
"pages": "528--538",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Flavio De Benedictis, Stefano Faralli, Roberto Navigli, et al. 2013. Glossboot: Bootstrapping multilingual domain glossaries from the web. In ACL (1), pages 528-538.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Definition extraction using a sequential combination of baseline grammars and machine learning classifiers",
"authors": [
{
"first": "Lukasz",
"middle": [],
"last": "Deg\u00f3rski",
"suffix": ""
},
{
"first": "Micha",
"middle": [],
"last": "Marci\u0144czuk",
"suffix": ""
},
{
"first": "Adam",
"middle": [],
"last": "Przepi\u00f3rkowski",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 6th International Conference on Language Resources and Evaluation (LREC)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lukasz Deg\u00f3rski, Micha Marci\u0144czuk, and Adam Przepi\u00f3rkowski. 2008. Definition extraction using a sequential combination of baseline grammars and ma- chine learning classifiers. In Proceedings of the 6th International Conference on Language Resources and Evaluation (LREC), Marrakech, Morocco, may.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Coping with highly imbalanced datasets: A case study with definition extraction in a multilingual setting",
"authors": [
{
"first": "Rosa",
"middle": [],
"last": "Del Gaudio",
"suffix": ""
},
{
"first": "Gustavo",
"middle": [],
"last": "Batista",
"suffix": ""
},
{
"first": "Ant\u00f3nio",
"middle": [],
"last": "Branco",
"suffix": ""
}
],
"year": 2013,
"venue": "Natural Language Engineering",
"volume": "",
"issue": "",
"pages": "1--33",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Rosa Del Gaudio, Gustavo Batista, and Ant\u00f3nio Branco. 2013. Coping with highly imbalanced datasets: A case study with definition extraction in a multilingual set- ting. Natural Language Engineering, pages 1-33.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Applying dependency relations to definition extraction",
"authors": [
{
"first": "Luis",
"middle": [],
"last": "Espinosa-Anke",
"suffix": ""
},
{
"first": "Horacio",
"middle": [],
"last": "Saggion",
"suffix": ""
}
],
"year": 2014,
"venue": "Natural Language Processing and Information Systems",
"volume": "",
"issue": "",
"pages": "63--74",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Luis Espinosa-Anke and Horacio Saggion. 2014. Ap- plying dependency relations to definition extraction. In Natural Language Processing and Information Sys- tems, pages 63-74. Springer.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Growing multi-domain glossaries from a few seeds using probabilistic topic models",
"authors": [
{
"first": "Stefano",
"middle": [],
"last": "Faralli",
"suffix": ""
},
{
"first": "Roberto",
"middle": [],
"last": "Navigli",
"suffix": ""
}
],
"year": 2013,
"venue": "EMNLP",
"volume": "",
"issue": "",
"pages": "170--181",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Stefano Faralli and Roberto Navigli. 2013. Growing multi-domain glossaries from a few seeds using prob- abilistic topic models. In EMNLP, pages 170-181.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Two is bigger (and better) than one: the wikipedia bitaxonomy project",
"authors": [
{
"first": "Tiziano",
"middle": [],
"last": "Flati",
"suffix": ""
},
{
"first": "Daniele",
"middle": [],
"last": "Vannella",
"suffix": ""
},
{
"first": "Tommaso",
"middle": [],
"last": "Pasini",
"suffix": ""
},
{
"first": "Roberto",
"middle": [],
"last": "Navigli",
"suffix": ""
}
],
"year": 2014,
"venue": "Proc. of the 52nd Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tiziano Flati, Daniele Vannella, Tommaso Pasini, and Roberto Navigli. 2014. Two is bigger (and better) than one: the wikipedia bitaxonomy project. In Proc. of the 52nd Annual Meeting of the Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "An extensive empirical study of feature selection metrics for text classification",
"authors": [
{
"first": "George",
"middle": [],
"last": "Forman",
"suffix": ""
}
],
"year": 2003,
"venue": "The Journal of machine learning research",
"volume": "3",
"issue": "",
"pages": "1289--1305",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "George Forman. 2003. An extensive empirical study of feature selection metrics for text classification. The Journal of machine learning research, 3:1289-1305.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Brown corpus manual",
"authors": [
{
"first": "Nelson",
"middle": [],
"last": "Francis",
"suffix": ""
},
{
"first": "Henry",
"middle": [],
"last": "Kucera",
"suffix": ""
}
],
"year": 1979,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "W Nelson Francis and Henry Kucera. 1979. Brown cor- pus manual. Brown University.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Specialised Dictionaries for Learners",
"authors": [
{
"first": "Pedro",
"middle": [],
"last": "Fuertes-Olivera",
"suffix": ""
}
],
"year": 2010,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pedro Fuertes-Olivera. 2010. Specialised Dictionaries for Learners. Berlin/New York: De Gruyter. Lexico- graphica Series Maior, 136.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Mining scientific terms and their definitions: A study of the ACL anthology",
"authors": [
{
"first": "Yiping",
"middle": [],
"last": "Jin",
"suffix": ""
},
{
"first": "Min-Yen",
"middle": [],
"last": "Kan",
"suffix": ""
},
{
"first": "Jun-Ping",
"middle": [],
"last": "Ng",
"suffix": ""
},
{
"first": "Xiangnan",
"middle": [],
"last": "He",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "780--790",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yiping Jin, Min-Yen Kan, Jun-Ping Ng, and Xiangnan He. 2013. Mining scientific terms and their defini- tions: A study of the ACL anthology. In Proceed- ings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 780-790, Seat- tle, Washington, USA, October. Association for Com- putational Linguistics.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Measuring monoword termhood by rank difference via corpus comparison",
"authors": [
{
"first": "Chunyu",
"middle": [],
"last": "Kit",
"suffix": ""
},
{
"first": "Xiaoyue",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2008,
"venue": "Terminology",
"volume": "14",
"issue": "2",
"pages": "204--229",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chunyu Kit and Xiaoyue Liu. 2008. Measuring mono- word termhood by rank difference via corpus compar- ison. Terminology, 14(2):204-229.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Conditional Random Fields: Probabilistic Models for Segmenting and Labeling Sequence Data",
"authors": [
{
"first": "John",
"middle": [
"D"
],
"last": "Lafferty",
"suffix": ""
},
{
"first": "Andrew",
"middle": [],
"last": "Mccallum",
"suffix": ""
},
{
"first": "Fernando",
"middle": [
"C N"
],
"last": "Pereira",
"suffix": ""
}
],
"year": 2001,
"venue": "Proceedings of the Eighteenth International Conference on Machine Learning, ICML '01",
"volume": "",
"issue": "",
"pages": "282--289",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "John D. Lafferty, Andrew McCallum, and Fernando C. N. Pereira. 2001. Conditional Random Fields: Proba- bilistic Models for Segmenting and Labeling Sequence Data. In Proceedings of the Eighteenth International Conference on Machine Learning, ICML '01, pages 282-289, San Francisco, CA, USA. Morgan Kauf- mann Publishers Inc.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Detecting semantic relations between terms in definitions",
"authors": [
{
"first": "V\u00e9ronique",
"middle": [],
"last": "Malais\u00e9",
"suffix": ""
},
{
"first": "Pierre",
"middle": [],
"last": "Zweigenbaum",
"suffix": ""
},
{
"first": "Bruno",
"middle": [],
"last": "Bachimont",
"suffix": ""
}
],
"year": 2004,
"venue": "International Conference on Computational Linguistics (COLING 2004) -CompuTerm 2004: 3rd International Workshop on Computational Terminology",
"volume": "",
"issue": "",
"pages": "55--62",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "V\u00e9ronique Malais\u00e9, Pierre Zweigenbaum, and Bruno Bachimont. 2004. Detecting semantic relations be- tween terms in definitions. In Sophia Ananadiou and Pierre Zweigenbaum, editors, International Confer- ence on Computational Linguistics (COLING 2004) -CompuTerm 2004: 3rd International Workshop on Computational Terminology, pages 55-62, Geneva, Switzerland, August 29.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Extracting knowledge-rich contexts for terminography",
"authors": [
{
"first": "Ingrid",
"middle": [],
"last": "Meyer",
"suffix": ""
}
],
"year": 2001,
"venue": "",
"volume": "2",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ingrid Meyer. 2001. Extracting knowledge-rich contexts for terminography. Recent advances in computational terminology, 2:279.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "What can NLP techniques do for eLearning",
"authors": [
{
"first": "Paola",
"middle": [],
"last": "Monachesi",
"suffix": ""
},
{
"first": "Eline",
"middle": [],
"last": "Westerhout",
"suffix": ""
}
],
"year": 2008,
"venue": "International Conference on Informatics and Systems (IN-FOS08)",
"volume": "",
"issue": "",
"pages": "150--156",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Paola. Monachesi and Eline. Westerhout. 2008. What can NLP techniques do for eLearning? In Inter- national Conference on Informatics and Systems (IN- FOS08), pages 150-156.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "A method for automatically building and evaluating dictionary resources",
"authors": [
{
"first": "A",
"middle": [],
"last": "Muresan",
"suffix": ""
},
{
"first": "Judith",
"middle": [],
"last": "Klavans",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of the Language Resources and Evaluation Conference",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A Muresan and Judith Klavans. 2002. A method for automatically building and evaluating dictionary re- sources. In Proceedings of the Language Resources and Evaluation Conference (LREC.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Extraction of semantic information from an ordinary english dictionary and its evaluation",
"authors": [
{
"first": "Jun-Ichi",
"middle": [],
"last": "Nakamura",
"suffix": ""
},
{
"first": "Makoto",
"middle": [],
"last": "Nagao",
"suffix": ""
}
],
"year": 1988,
"venue": "Proceedings of the 12th Conference on Computational Linguistics",
"volume": "2",
"issue": "",
"pages": "459--464",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jun-ichi Nakamura and Makoto Nagao. 1988. Extraction of semantic information from an ordinary english dic- tionary and its evaluation. In Proceedings of the 12th Conference on Computational Linguistics -Volume 2, COLING '88, pages 459-464, Stroudsburg, PA, USA. Association for Computational Linguistics.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Learning word-class lattices for definition and hypernym extraction",
"authors": [
{
"first": "Roberto",
"middle": [],
"last": "Navigli",
"suffix": ""
},
{
"first": "Paola",
"middle": [],
"last": "Velardi",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics, ACL '10",
"volume": "",
"issue": "",
"pages": "1318--1327",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Roberto Navigli and Paola Velardi. 2010. Learning word-class lattices for definition and hypernym extrac- tion. In Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics, ACL '10, pages 1318-1327, Stroudsburg, PA, USA. Association for Computational Linguistics.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "An annotated dataset for extracting definitions and hypernyms from the web",
"authors": [
{
"first": "Roberto",
"middle": [],
"last": "Navigli",
"suffix": ""
},
{
"first": "Paola",
"middle": [],
"last": "Velardi",
"suffix": ""
},
{
"first": "Juana Mar\u00eda Ruiz-Mart\u00ednez. ; Khalid",
"middle": [],
"last": "Choukri",
"suffix": ""
},
{
"first": "Bente",
"middle": [],
"last": "Maegaard",
"suffix": ""
},
{
"first": "Joseph",
"middle": [],
"last": "Mariani",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC'10)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Roberto Navigli, Paola Velardi, and Juana Mar\u00eda Ruiz- Mart\u00ednez. 2010. An annotated dataset for extracting definitions and hypernyms from the web. In Nico- letta Calzolari (Conference Chair), Khalid Choukri, Bente Maegaard, Joseph Mariani, Jan Odijk, Stelios Piperidis, Mike Rosner, and Daniel Tapias, editors, Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC'10), Valletta, Malta, may. European Language Resources Association (ELRA).",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "Automatic Glossary Extraction: Beyond Terminology Identification",
"authors": [
{
"first": "Youngja",
"middle": [],
"last": "Park",
"suffix": ""
},
{
"first": "Roy",
"middle": [
"J"
],
"last": "Byrd",
"suffix": ""
},
{
"first": "Branimir",
"middle": [
"K"
],
"last": "Boguraev",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of the 19th International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "1--7",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Youngja Park, Roy J. Byrd, and Branimir K. Boguraev. 2002. Automatic Glossary Extraction: Beyond Ter- minology Identification. In Proceedings of the 19th International Conference on Computational Linguis- tics, pages 1-7, Morristown, NJ, USA. Association for Computational Linguistics.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Towards the automatic extraction of definitions in Slavic",
"authors": [
{
"first": "Adam",
"middle": [],
"last": "Przepi\u00f3rkowski",
"suffix": ""
},
{
"first": "Miroslav",
"middle": [],
"last": "Spousta",
"suffix": ""
},
{
"first": "Kiril",
"middle": [],
"last": "Simov",
"suffix": ""
},
{
"first": "Petya",
"middle": [],
"last": "Osenova",
"suffix": ""
},
{
"first": "Lothar",
"middle": [],
"last": "Lemnitzer",
"suffix": ""
},
{
"first": "Vladislav",
"middle": [],
"last": "Kubo",
"suffix": ""
},
{
"first": "Beata",
"middle": [],
"last": "W\u00f3jtowicz",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings ofo the BSNLP workshop at ACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Adam Przepi\u00f3rkowski, Miroslav Spousta, Kiril Simov, Petya Osenova, Lothar Lemnitzer, Vladislav Kubo, and Beata W\u00f3jtowicz. 2007. Towards the automatic extraction of definitions in Slavic. In Proceedings ofo the BSNLP workshop at ACL 2007.",
"links": null
},
"BIBREF29": {
"ref_id": "b29",
"title": "Rep\u00e9rage automatique de structures linguistiques en corpus : le cas des\u00e9nonc\u00e9s d\u00e9finitoires",
"authors": [
{
"first": "Josette",
"middle": [],
"last": "Rebeyrolle",
"suffix": ""
},
{
"first": "Ludovic",
"middle": [],
"last": "Tanguy",
"suffix": ""
}
],
"year": 2000,
"venue": "Cahiers de Grammaire",
"volume": "25",
"issue": "",
"pages": "153--174",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Josette Rebeyrolle and Ludovic Tanguy. 2000. Rep\u00e9rage automatique de structures linguistiques en corpus : le cas des\u00e9nonc\u00e9s d\u00e9finitoires. Cahiers de Grammaire, 25:153-174.",
"links": null
},
"BIBREF30": {
"ref_id": "b30",
"title": "Extracting glossary sentences from scholarly articles: A comparative evaluation of pattern bootstrapping and deep analysis",
"authors": [
{
"first": "Melanie",
"middle": [],
"last": "Reiplinger",
"suffix": ""
},
{
"first": "Ulrich",
"middle": [],
"last": "Sch\u00e4fer",
"suffix": ""
},
{
"first": "Magdalena",
"middle": [],
"last": "Wolska",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the ACL-2012 Special Workshop on Rediscovering 50 Years of Discoveries",
"volume": "",
"issue": "",
"pages": "55--65",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Melanie Reiplinger, Ulrich Sch\u00e4fer, and Magdalena Wol- ska. 2012. Extracting glossary sentences from schol- arly articles: A comparative evaluation of pattern bootstrapping and deep analysis. In Proceedings of the ACL-2012 Special Workshop on Rediscovering 50 Years of Discoveries, pages 55-65, Jeju Island, Korea, July. Association for Computational Linguistics.",
"links": null
},
"BIBREF31": {
"ref_id": "b31",
"title": "Metalinguistic Information Extraction from Specialized Texts to Enrich Computational Lexicons",
"authors": [
{
"first": "Carlos",
"middle": [],
"last": "Rodr\u00edguez",
"suffix": ""
}
],
"year": 2004,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Carlos Rodr\u00edguez. 2004. Metalinguistic Information Extraction from Specialized Texts to Enrich Compu- tational Lexicons. Ph.D. thesis, Universitat Pompeu Fabra.",
"links": null
},
"BIBREF32": {
"ref_id": "b32",
"title": "Mining on-line sources for definition knowledge",
"authors": [
{
"first": "Horacio",
"middle": [],
"last": "Saggion",
"suffix": ""
},
{
"first": "Robert",
"middle": [],
"last": "Gaizauskas",
"suffix": ""
}
],
"year": 2004,
"venue": "17th FLAIRS",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Horacio Saggion and Robert Gaizauskas. 2004. Min- ing on-line sources for definition knowledge. In 17th FLAIRS, Miami Bearch, Florida.",
"links": null
},
"BIBREF33": {
"ref_id": "b33",
"title": "Hacia un sistema de extracci\u00f3n de definiciones en textos jur\u00eddicos",
"authors": [
{
"first": "A",
"middle": [],
"last": "S\u00e1nchez",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "M\u00e1rquez",
"suffix": ""
}
],
"year": 2005,
"venue": "Actas de la 1er Jornada Venezolana de Investigaci\u00f3n en Ling\u00fc\u00edstica e Inform\u00e1tica",
"volume": "",
"issue": "",
"pages": "1--10",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. S\u00e1nchez and J. M\u00e1rquez. 2005. Hacia un sistema de extracci\u00f3n de definiciones en textos jur\u00eddicos. In Actas de la 1er Jornada Venezolana de Investigaci\u00f3n en Ling\u00fc\u00edstica e Inform\u00e1tica, pages 1-10.",
"links": null
},
"BIBREF34": {
"ref_id": "b34",
"title": "Corp\u00f3grafo V3 From Terminological Aid to Semi-automatic Knowledge Engineering",
"authors": [
{
"first": "Lu\u00eds",
"middle": [],
"last": "Sarmento",
"suffix": ""
},
{
"first": "Belinda",
"middle": [],
"last": "Maia",
"suffix": ""
},
{
"first": "Diana",
"middle": [],
"last": "Santos",
"suffix": ""
},
{
"first": "Ana",
"middle": [],
"last": "Pinto",
"suffix": ""
},
{
"first": "Lu\u00eds",
"middle": [],
"last": "Cabral",
"suffix": ""
}
],
"year": 2006,
"venue": "5th International Conference on Language Resources and Evaluation (LREC'06)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lu\u00eds Sarmento, Belinda Maia, Diana Santos, Ana Pinto, and Lu\u00eds Cabral. 2006. Corp\u00f3grafo V3 From Ter- minological Aid to Semi-automatic Knowledge Engi- neering. In 5th International Conference on Language Resources and Evaluation (LREC'06), Geneva.",
"links": null
},
"BIBREF35": {
"ref_id": "b35",
"title": "A bilingual study of knowledge -rich context extraction in russian and german",
"authors": [
{
"first": "Anne-Kathrin",
"middle": [],
"last": "Schumann",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the Fifth Language and Technology Conference",
"volume": "",
"issue": "",
"pages": "516--520",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Anne-Kathrin Schumann. 2011. A bilingual study of knowledge -rich context extraction in russian and ger- man. In Proceedings of the Fifth Language and Tech- nology Conference, pages 516-520.",
"links": null
},
"BIBREF36": {
"ref_id": "b36",
"title": "A proposal for a framework to evaluate feature relevance for terminographic definitions",
"authors": [
{
"first": "Selja",
"middle": [],
"last": "Sepp\u00e4l\u00e4",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the 1st Workshop on Definition Extraction, WDE '09",
"volume": "",
"issue": "",
"pages": "47--53",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Selja Sepp\u00e4l\u00e4. 2009. A proposal for a framework to evaluate feature relevance for terminographic defini- tions. In Proceedings of the 1st Workshop on Defini- tion Extraction, WDE '09, pages 47-53, Stroudsburg, PA, USA. Association for Computational Linguistics.",
"links": null
},
"BIBREF37": {
"ref_id": "b37",
"title": "Towards the building of a corpus of definitional contexts",
"authors": [
{
"first": "Gerardo",
"middle": [],
"last": "Sierra",
"suffix": ""
},
{
"first": "Rodrigo",
"middle": [],
"last": "Alarc\u00f3n",
"suffix": ""
},
{
"first": "C\u00e9sar",
"middle": [],
"last": "Aguilar",
"suffix": ""
},
{
"first": "Alberto",
"middle": [],
"last": "Barr\u00f3n",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceeding of the 12th EU-RALEX International Congress",
"volume": "",
"issue": "",
"pages": "229--269",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gerardo Sierra, Rodrigo Alarc\u00f3n, C\u00e9sar Aguilar, and Al- berto Barr\u00f3n. 2006. Towards the building of a corpus of definitional contexts. In Proceeding of the 12th EU- RALEX International Congress, Torino, Italy, pages 229-40.",
"links": null
},
"BIBREF38": {
"ref_id": "b38",
"title": "Automated detection and annotation of term definitions in German text corpora",
"authors": [
{
"first": "Angelika",
"middle": [],
"last": "Storrer",
"suffix": ""
},
{
"first": "Sandra",
"middle": [],
"last": "Wellinghoff",
"suffix": ""
}
],
"year": 2006,
"venue": "Conference on Language Resources and Evaluation (LREC)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Angelika Storrer and Sandra Wellinghoff. 2006. Auto- mated detection and annotation of term definitions in German text corpora. In Conference on Language Re- sources and Evaluation (LREC).",
"links": null
},
"BIBREF39": {
"ref_id": "b39",
"title": "English for Science and Technology: A Discourse Approach. Cambridge Language Teaching Library",
"authors": [
{
"first": "L",
"middle": [],
"last": "Trimble",
"suffix": ""
}
],
"year": 1985,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "L. Trimble. 1985. English for Science and Technology: A Discourse Approach. Cambridge Language Teach- ing Library.",
"links": null
},
"BIBREF40": {
"ref_id": "b40",
"title": "Mining the web to create specialized glossaries",
"authors": [
{
"first": "Paola",
"middle": [],
"last": "Velardi",
"suffix": ""
},
{
"first": "Roberto",
"middle": [],
"last": "Navigli",
"suffix": ""
},
{
"first": "Pierluigi D'",
"middle": [],
"last": "Amadio",
"suffix": ""
}
],
"year": 2008,
"venue": "IEEE Intelligent Systems",
"volume": "23",
"issue": "5",
"pages": "18--25",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Paola Velardi, Roberto Navigli, and Pierluigi D'Amadio. 2008. Mining the web to create specialized glossaries. IEEE Intelligent Systems, 23(5):18-25, September.",
"links": null
},
"BIBREF41": {
"ref_id": "b41",
"title": "Ontolearn reloaded: A graph-based algorithm for taxonomy induction",
"authors": [
{
"first": "Paola",
"middle": [],
"last": "Velardi",
"suffix": ""
},
{
"first": "Stefano",
"middle": [],
"last": "Faralli",
"suffix": ""
},
{
"first": "Roberto",
"middle": [],
"last": "Navigli",
"suffix": ""
}
],
"year": 2013,
"venue": "Computational Linguistics",
"volume": "39",
"issue": "3",
"pages": "665--707",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Paola Velardi, Stefano Faralli, and Roberto Navigli. 2013. Ontolearn reloaded: A graph-based algorithm for taxonomy induction. Computational Linguistics, 39(3):665-707.",
"links": null
},
"BIBREF42": {
"ref_id": "b42",
"title": "Extraction of Dutch definitory contexts for elearning purposes",
"authors": [
{
"first": "Eline",
"middle": [],
"last": "Westerhout",
"suffix": ""
},
{
"first": "Paola",
"middle": [],
"last": "Monachesi",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the Computational Linguistics in the Netherlands (CLIN 2007)",
"volume": "",
"issue": "",
"pages": "219--253",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Eline Westerhout and Paola Monachesi. 2007. Extrac- tion of Dutch definitory contexts for elearning pur- poses. Proceedings of the Computational Linguistics in the Netherlands (CLIN 2007), Nijmegen, Nether- lands, pages 219-34.",
"links": null
},
"BIBREF43": {
"ref_id": "b43",
"title": "Data Mining: Practical machine learning tools and techniques",
"authors": [
{
"first": "H",
"middle": [],
"last": "Ian",
"suffix": ""
},
{
"first": "Eibe",
"middle": [],
"last": "Witten",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Frank",
"suffix": ""
}
],
"year": 2005,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ian H Witten and Eibe Frank. 2005. Data Mining: Prac- tical machine learning tools and techniques. Morgan Kaufmann.",
"links": null
},
"BIBREF44": {
"ref_id": "b44",
"title": "Minimally supervised method for multilingual paraphrase extraction from definition sentences on the web",
"authors": [
{
"first": "Yulan",
"middle": [],
"last": "Yan",
"suffix": ""
},
{
"first": "Chikara",
"middle": [],
"last": "Hashimoto",
"suffix": ""
},
{
"first": "Kentaro",
"middle": [],
"last": "Torisawa",
"suffix": ""
},
{
"first": "Takao",
"middle": [],
"last": "Kawai",
"suffix": ""
},
{
"first": "Stijn De",
"middle": [],
"last": "Jun'ichi Kazama",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Saeger",
"suffix": ""
}
],
"year": 2013,
"venue": "HLT-NAACL",
"volume": "",
"issue": "",
"pages": "63--73",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yulan Yan, Chikara Hashimoto, Kentaro Torisawa, Takao Kawai, Jun'ichi Kazama, and Stijn De Saeger. 2013. Minimally supervised method for multilingual para- phrase extraction from definition sentences on the web. In HLT-NAACL, pages 63-73.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"num": null,
"type_str": "figure",
"uris": null,
"text": "Bootstrapping for DE Require: T S = {(S, d \u2208 D)} Initial labelled train seeds. DS = {S} Subset of the ACL-ARC corpus. MSR-NLP: Test set 1. W00: Test set 2."
},
"FIGREF1": {
"num": null,
"type_str": "figure",
"uris": null,
"text": "and Figure 2 present the overall system configuration and data flow of the integrated system 23.34 96.72 37.6 62.27 78.45 69.43"
},
"FIGREF2": {
"num": null,
"type_str": "figure",
"uris": null,
"text": "F-Score against iteration on the MSR-NLP (top row) and W00 datasets (bottom row), with bootstrapping + post-classification heuristics (left column) and only bootstrapping (right column)."
},
"FIGREF3": {
"num": null,
"type_str": "figure",
"uris": null,
"text": "Information Gain for the best features at the end of the bootstrapping process. Note the substantial improvement in def prom (definitional prominence)."
}
}
}
}