| { |
| "paper_id": "E17-1010", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T10:51:52.941324Z" |
| }, |
| "title": "Word Sense Disambiguation: A Unified Evaluation Framework and Empirical Comparison", |
| "authors": [ |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Raganato", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Sapienza University of Rome", |
| "location": {} |
| }, |
| "email": "raganato@di.uniroma1.it" |
| }, |
| { |
| "first": "Jose", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Sapienza University of Rome", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Sapienza University of Rome", |
| "location": {} |
| }, |
| "email": "navigli@di.uniroma1.it" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Word Sense Disambiguation is a longstanding task in Natural Language Processing, lying at the core of human language understanding. However, the evaluation of automatic systems has been problematic, mainly due to the lack of a reliable evaluation framework. In this paper we develop a unified evaluation framework and analyze the performance of various Word Sense Disambiguation systems in a fair setup. The results show that supervised systems clearly outperform knowledge-based models. Among the supervised systems, a linear classifier trained on conventional local features still proves to be a hard baseline to beat. Nonetheless, recent approaches exploiting neural networks on unlabeled corpora achieve promising results, surpassing this hard baseline in most test sets.", |
| "pdf_parse": { |
| "paper_id": "E17-1010", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Word Sense Disambiguation is a longstanding task in Natural Language Processing, lying at the core of human language understanding. However, the evaluation of automatic systems has been problematic, mainly due to the lack of a reliable evaluation framework. In this paper we develop a unified evaluation framework and analyze the performance of various Word Sense Disambiguation systems in a fair setup. The results show that supervised systems clearly outperform knowledge-based models. Among the supervised systems, a linear classifier trained on conventional local features still proves to be a hard baseline to beat. Nonetheless, recent approaches exploiting neural networks on unlabeled corpora achieve promising results, surpassing this hard baseline in most test sets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Word Sense Disambiguation (WSD) has been a long-standing task in Natural Language Processing (NLP) . It lies at the core of language understanding and has already been studied from many different angles (Navigli, 2009; Navigli, 2012) . However, the field seems to be slowing down due to the lack of groundbreaking improvements and the difficulty of integrating current WSD systems into downstream NLP applications (de Lacalle and Agirre, 2015) . In general the field does not have a clear path, partially owing to the fact that identifying real improvements over existing approaches becomes a hard task with current evaluation benchmarks. This is mainly due to the lack of a unified framework, which prevents direct and fair comparison among systems. Even though many evaluation datasets have been constructed for the task (Edmonds and Cotton, 2001; Snyder and Palmer, 2004; Navigli et al., 2007; Pradhan et al., 2007; Agirre et al., 2010a; Navigli et al., 2013; Moro and Navigli, 2015, inter alia) , they tend to differ in format, construction guidelines and underlying sense inventory. In the case of the datasets annotated using WordNet (Miller, 1995) , the de facto sense inventory for WSD, we encounter the additional barrier of having text annotated with different versions. These divergences are in the main solved individually by using or constructing automatic mappings. The quality check of such mapping, however, tends to be impractical and this leads to mapping errors which give rise to additional system inconsistencies in the experimental setting. This issue is directly extensible to the training corpora used by supervised systems. In fact, results obtained by supervised or semi-supervised systems reported in the literature are not completely reliable, because the systems may not necessarily have been trained on the same corpus, or the corpus was preprocessed differently, or annotated with a sense inventory different from the test data. Thus, together, the foregoing issues prevent us from drawing reliable conclusions on different models, as in some cases ostensible improvements may have been obtained as a consequence of the nature of the training corpus, the preprocessing pipeline or the version of the underlying sense inventory, rather than of the model itself. Moreover, because of these divergences, current systems tend to report results on a few datasets only, making it hard to perform a direct quantitative confrontation.", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 98, |
| "text": "(NLP)", |
| "ref_id": null |
| }, |
| { |
| "start": 203, |
| "end": 218, |
| "text": "(Navigli, 2009;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 219, |
| "end": 233, |
| "text": "Navigli, 2012)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 430, |
| "end": 443, |
| "text": "Agirre, 2015)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 823, |
| "end": 849, |
| "text": "(Edmonds and Cotton, 2001;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 850, |
| "end": 874, |
| "text": "Snyder and Palmer, 2004;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 875, |
| "end": 896, |
| "text": "Navigli et al., 2007;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 897, |
| "end": 918, |
| "text": "Pradhan et al., 2007;", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 919, |
| "end": 940, |
| "text": "Agirre et al., 2010a;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 941, |
| "end": 962, |
| "text": "Navigli et al., 2013;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 963, |
| "end": 998, |
| "text": "Moro and Navigli, 2015, inter alia)", |
| "ref_id": null |
| }, |
| { |
| "start": 1140, |
| "end": 1154, |
| "text": "(Miller, 1995)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This paper offers two main contributions. First, we provide a complete evaluation framework for all-words Word Sense Disambiguation overcoming all the aforementioned limitations by (1) standardizing the WSD datasets and training corpora into a unified format, (2) semi-automatically converting annotations from any dataset to WordNet 3.0, and (3) preprocessing the datasets by consistently using the same pipeline. Second, we use this evaluation framework to perform a fair quantitative and qualitative empirical comparison of the main techniques proposed in the WSD literature, including the latest advances based on neural networks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The task of Word Sense Disambiguation consists of associating words in context with the most suitable entry in a pre-defined sense inventory. Depending on their nature, WSD systems are divided into two main groups: supervised and knowledgebased. In what follows we summarize the current state of these two types of approach.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State of the Art", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Supervised models train different features extracted from manually sense-annotated corpora. These features have been mostly based on the information provided by the surroundings words of the target word (Keok and Ng, 2002; Navigli, 2009) and its collocations. Recently, more complex features based on word embeddings trained on unlabeled corpora have also been explored (Taghipour and Ng, 2015b; Rothe and Sch\u00fctze, 2015; Iacobacci et al., 2016) . These features are generally taken as input to train a linear classifier (Zhong and Ng, 2010; Shen et al., 2013) . In addition to these conventional approaches, the latest developments in neural language models have motivated some researchers to include them in their WSD architectures (K\u00e5geb\u00e4ck and Salomonsson, 2016; Melamud et al., 2016; Yuan et al., 2016) . Supervised models have traditionally been able to outperform knowledge-based systems (Navigli, 2009) . However, obtaining sense-annotated corpora is highly expensive, and in many cases such corpora are not available for specific domains. This is the reason why some of these supervised methods have started to rely on unlabeled corpora as well. These approaches, which are often classified as semi-supervised, are targeted at overcoming the knowledge acquisition bottleneck of conventional supervised models (Pilehvar and Navigli, 2014) . In fact, there is a line of research specifically aimed at automatically obtaining large amounts of high-quality sense-annotated corpora (Taghipour and Ng, 2015a; Raganato et al., 2016; Camacho-Collados et al., 2016a) .", |
| "cite_spans": [ |
| { |
| "start": 203, |
| "end": 222, |
| "text": "(Keok and Ng, 2002;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 223, |
| "end": 237, |
| "text": "Navigli, 2009)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 370, |
| "end": 395, |
| "text": "(Taghipour and Ng, 2015b;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 396, |
| "end": 420, |
| "text": "Rothe and Sch\u00fctze, 2015;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 421, |
| "end": 444, |
| "text": "Iacobacci et al., 2016)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 520, |
| "end": 540, |
| "text": "(Zhong and Ng, 2010;", |
| "ref_id": "BIBREF55" |
| }, |
| { |
| "start": 541, |
| "end": 559, |
| "text": "Shen et al., 2013)", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 733, |
| "end": 765, |
| "text": "(K\u00e5geb\u00e4ck and Salomonsson, 2016;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 766, |
| "end": 787, |
| "text": "Melamud et al., 2016;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 788, |
| "end": 806, |
| "text": "Yuan et al., 2016)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 894, |
| "end": 909, |
| "text": "(Navigli, 2009)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 1317, |
| "end": 1345, |
| "text": "(Pilehvar and Navigli, 2014)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 1485, |
| "end": 1510, |
| "text": "(Taghipour and Ng, 2015a;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 1511, |
| "end": 1533, |
| "text": "Raganato et al., 2016;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 1534, |
| "end": 1565, |
| "text": "Camacho-Collados et al., 2016a)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Supervised WSD", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In this work we compare supervised systems and study the role of their underlying senseannotated training corpus. Since semi-supervised models have been shown to outperform fully supervised systems in some settings (Taghipour and Ng, 2015b; Ba\u015fkaya and Jurgens, 2016; Iacobacci et al., 2016; Yuan et al., 2016) , we evaluate and compare models using both manually-curated and automatically-constructed sense-annotated corpora for training.", |
| "cite_spans": [ |
| { |
| "start": 215, |
| "end": 240, |
| "text": "(Taghipour and Ng, 2015b;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 241, |
| "end": 267, |
| "text": "Ba\u015fkaya and Jurgens, 2016;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 268, |
| "end": 291, |
| "text": "Iacobacci et al., 2016;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 292, |
| "end": 310, |
| "text": "Yuan et al., 2016)", |
| "ref_id": "BIBREF54" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Supervised WSD", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In contrast to supervised systems, knowledgebased WSD techniques do not require any senseannotated corpus. Instead, these approaches rely on the structure or content of manually-curated knowledge resources for disambiguation. One of the first approaches of this kind was Lesk (1986) , which in its original version consisted of calculating the overlap between the context of the target word and its definitions as given by the sense inventory. Based on the same principle, various works have adapted the original algorithm by also taking into account definitions from related words (Banerjee and Pedersen, 2003) , or by calculating the distributional similarity between definitions and the context of the target word (Basile et al., 2014; Chen et al., 2014) . Distributional similarity has also been exploited in different settings in various works (Miller et al., 2012; Camacho-Collados et al., 2015; Camacho-Collados et al., 2016b) . In addition to these approaches based on distributional similarity, an important branch of knowledge-based systems found their techniques on the structural properties of semantic graphs from lexical resources (Agirre and Soroa, 2009; Guo and Diab, 2010; Ponzetto and Navigli, 2010; Agirre et al., 2014; Moro et al., 2014; Weissenborn et al., 2015; Tripodi and Pelillo, 2016) . Generally, these graph-based WSD systems first create a graph representation of the input text and then exploit different graph-based algorithms over the given representation (e.g., PageRank) to perform WSD.", |
| "cite_spans": [ |
| { |
| "start": 271, |
| "end": 282, |
| "text": "Lesk (1986)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 582, |
| "end": 611, |
| "text": "(Banerjee and Pedersen, 2003)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 717, |
| "end": 738, |
| "text": "(Basile et al., 2014;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 739, |
| "end": 757, |
| "text": "Chen et al., 2014)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 849, |
| "end": 870, |
| "text": "(Miller et al., 2012;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 871, |
| "end": 901, |
| "text": "Camacho-Collados et al., 2015;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 902, |
| "end": 933, |
| "text": "Camacho-Collados et al., 2016b)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1145, |
| "end": 1169, |
| "text": "(Agirre and Soroa, 2009;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1170, |
| "end": 1189, |
| "text": "Guo and Diab, 2010;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1190, |
| "end": 1217, |
| "text": "Ponzetto and Navigli, 2010;", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 1218, |
| "end": 1238, |
| "text": "Agirre et al., 2014;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1239, |
| "end": 1257, |
| "text": "Moro et al., 2014;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 1258, |
| "end": 1283, |
| "text": "Weissenborn et al., 2015;", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 1284, |
| "end": 1310, |
| "text": "Tripodi and Pelillo, 2016)", |
| "ref_id": "BIBREF52" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-based WSD", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In this section we explain our pipeline for transforming any given evaluation dataset or senseannotated corpus into a preprocessed unified for- mat. In our pipeline we do not make any distinction between evaluation datasets and senseannotated training corpora, as the pipeline can be applied equally to both types. For simplicity we will refer to both evaluation datasets and training corpora as WSD datasets. Figure 1 summarizes our pipeline to standardize a WSD dataset. The process consists of four steps:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 410, |
| "end": 418, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Standardization of WSD datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "1. Most WSD datasets in the literature use a similar XML format, but they have some divergences on how to encode the information. For instance, the SemEval-15 dataset (Moro and Navigli, 2015) was developed for both WSD and Entity Linking and its format was especially designed for this latter task. Therefore, we decided to convert all datasets to a unified format. As unified format we use the XML scheme used for the SemEval-13 allwords WSD task (Navigli et al., 2013) , where preprocessing information of a given corpus is also encoded.", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 191, |
| "text": "(Moro and Navigli, 2015)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 448, |
| "end": 470, |
| "text": "(Navigli et al., 2013)", |
| "ref_id": "BIBREF37" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Standardization of WSD datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "2. Once the dataset is converted to a unified format, we map the sense annotations from its original WordNet version to 3.0, which is the latest version of WordNet used in evaluation datasets. This mapping is carried out semiautomatically. First, we use automaticallyconstructed WordNet mappings 1 (Daude et al., 2003) . These mappings provide confidence values which we use to initially map senses whose mapping confidence is 100%. Then, the annotations of the remaining senses are manually checked, and re-annotated or removed whenever necessary 2 . Additionally, in this step we decided to remove all annotations of auxiliary verbs, following the annotation guidelines of the latest WSD datasets.", |
| "cite_spans": [ |
| { |
| "start": 298, |
| "end": 318, |
| "text": "(Daude et al., 2003)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Standardization of WSD datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "CoreNLP toolkit (Manning et al., 2014) for Part-of-Speech (PoS) tagging 3 and lemmatization. This step is performed in order to ensure that all systems use the same preprocessed data.", |
| "cite_spans": [ |
| { |
| "start": 16, |
| "end": 38, |
| "text": "(Manning et al., 2014)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Standardization of WSD datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "4. Finally, we developed a script to check that the final dataset conforms to the aforementioned guidelines. In this final verification we also ensured that the sense annotations match the lemma and the PoS tag provided by Stanford CoreNLP by automatically fixing all divergences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Standardization of WSD datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this section we summarize the WSD datasets used in the evaluation framework. To all these datasets we apply the standardization pipeline described in Section 3. First, we enumerate all the datasets used for the evaluation (Section 4.1). Second, we describe the sense-annotated corpora used for training (Section 4.2). Finally, we show some relevant statistics extracted from these resources (Section 4.3).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For our evaluation framework we considered five standard all-words fine-grained WSD datasets from the Senseval and SemEval competitions:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "WSD evaluation datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Senseval-2 (Edmonds and Cotton, 2001 ). This dataset was originally annotated with WordNet 1.7. After standardization, it consists of 2282 sense annotations, including nouns, verbs, adverbs and adjectives.", |
| "cite_spans": [ |
| { |
| "start": 13, |
| "end": 38, |
| "text": "(Edmonds and Cotton, 2001", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "WSD evaluation datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Senseval-3 task 1 (Snyder and Palmer, 2004) . The WordNet version of this dataset was 1.7.1. It consists of three documents from three different domains (editorial, news story and fiction), totaling 1850 sense annotations. Table 1 : Statistics of the WSD datasets used in the evaluation framework (after standardization).", |
| "cite_spans": [ |
| { |
| "start": 20, |
| "end": 45, |
| "text": "(Snyder and Palmer, 2004)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 225, |
| "end": 232, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "WSD evaluation datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 SemEval-07 task 17 (Pradhan et al., 2007) . This is the smallest among the five datasets, containing 455 sense annotations for nouns and verbs only. It was originally annotated using WordNet 2.1 sense inventory.", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 43, |
| "text": "(Pradhan et al., 2007)", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "WSD evaluation datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 SemEval-13 task 12 (Navigli et al., 2013) . This dataset includes thirteen documents from various domains. In this case the original sense inventory was WordNet 3.0, which is the same as the one that we use for all datasets. The number of sense annotations is 1644, although only nouns are considered.", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 43, |
| "text": "(Navigli et al., 2013)", |
| "ref_id": "BIBREF37" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "WSD evaluation datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 SemEval-15 task 13 (Moro and Navigli, 2015) . This is the most recent WSD dataset available to date, annotated with WordNet 3.0. It consists of 1022 sense annotations in four documents coming from three heterogeneous domains: biomedical, mathematics/computing and social issues.", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 45, |
| "text": "(Moro and Navigli, 2015)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "WSD evaluation datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We now describe the two WordNet senseannotated corpora used for training the supervised systems in our evaluation framework:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense-annotated training corpora", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 SemCor (Miller et al., 1994) . SemCor 4 is a manually sense-annotated corpus divided into 352 documents for a total of 226,040 sense annotations. It was originally tagged with senses from the WordNet 1.4 sense inventory. SemCor is, to our knowledge, the largest corpus manually annotated with WordNet senses, and is the main corpus used in the literature to train supervised WSD systems (Agirre et al., 2010b; Zhong and Ng, 2010) .", |
| "cite_spans": [ |
| { |
| "start": 9, |
| "end": 30, |
| "text": "(Miller et al., 1994)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 389, |
| "end": 411, |
| "text": "(Agirre et al., 2010b;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 412, |
| "end": 431, |
| "text": "Zhong and Ng, 2010)", |
| "ref_id": "BIBREF55" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense-annotated training corpora", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 OMSTI (Taghipour and Ng, 2015a) . OM-STI (One Million Sense-Tagged Instances) is a large corpus annotated with senses from the WordNet 3.0 inventory. It was automatically constructed by using an alignmentbased WSD approach (Chan and Ng, 2005) on a large English-Chinese parallel corpus (Eisele and Chen, 2010, MultiUN corpus) . OMSTI 5 has already shown its potential as a training corpus by improving the performance of supervised systems which add it to existing training data (Taghipour and Ng, 2015a; Iacobacci et al., 2016) . Table 1 shows some statistics 6 of the WSD datasets and training corpora which we use in the evaluation framework. The number of sense annotations varies across datasets, ranging from 455 annotations in the SemEval-07 dataset, to 2,282 annotations in the Senseval-2 dataset. As regards sense-annotated corpora, OMSTI is made up of almost 1M sense annotations, a considerable increase over the number of sense annotations of SemCor. However, SemCor is much more balanced in terms of unique senses covered (3,730 covered by OMSTI in contrast to over 33K covered by SemCor). Additionally, while OMSTI was constructed automatically, SemCor was manually built and, hence, its quality is expected to be higher. Finally, we calculated the ambiguity level of each dataset, computed as the total number of can-didate senses (i.e., senses sharing the surface form of the target word) divided by the number of sense annotations. The highest ambiguity is found on OMSTI, which, despite being constructed automatically, contains a high coverage of ambiguous words. As far as the evaluation competition datasets are concerned, the ambiguity may give a hint as to how difficult a given dataset may be. In this case, SemEval-07 displays the highest ambiguity level among all evaluation datasets.", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 33, |
| "text": "(Taghipour and Ng, 2015a)", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 288, |
| "end": 327, |
| "text": "(Eisele and Chen, 2010, MultiUN corpus)", |
| "ref_id": null |
| }, |
| { |
| "start": 481, |
| "end": 506, |
| "text": "(Taghipour and Ng, 2015a;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 507, |
| "end": 530, |
| "text": "Iacobacci et al., 2016)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 533, |
| "end": 540, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sense-annotated training corpora", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The evaluation framework consists of the WSD evaluation datasets described in Section 4.1. In this section we use this framework to perform an empirical comparison among a set of heterogeneous WSD systems. The systems used in the evaluation are described in detail in Section 5.1, the results are shown in Section 5.2 and a detailed analysis is presented in Section 5.3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We include three supervised (Section 5.1.1) and three knowledge-based (Section 5.1.2) all-words WSD systems in our empirical comparison.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison systems", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "To ensure a fair comparison, all supervised systems use the same corpus for training: SemCor and Semcor+OMSTI 7 (see Section 4.2). In the following we describe the three supervised WSD systems used in the evaluation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Supervised", |
| "sec_num": "5.1.1" |
| }, |
| { |
| "text": "\u2022 IMS (Zhong and Ng, 2010) uses a Support Vector Machine (SVM) classifier over a set of conventional WSD features. IMS 8 is built on a flexible framework which allows an easy integration of different features. The default implementation includes surrounding words, PoS tags of surroundings words, and local collocations as features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Supervised", |
| "sec_num": "5.1.1" |
| }, |
| { |
| "text": "\u2022 IMS+embeddings (Taghipour and Ng, 2015b; Rothe and Sch\u00fctze, 2015; Iacobacci et al., 2016) . These approaches have shown the potential of using word embeddings on the WSD task. Iacobacci et al. (2016) carried out a comparison of different strategies for integrating word embeddings as a feature in WSD. In this paper we consider the two best configurations in Iacobacci et al. (2016) 9 : using all IMS default features including and excluding surrounding words (IMS+emb and IMS -s +emb, respectively). In both cases word embeddings are integrated using exponential decay (i.e., word weights drop exponentially as the distance towards the target word increases). Likewise, we use Iacobacci et al.'s suggested learning strategy and hyperparameters to train the word embeddings: Skip-gram model of Word2Vec 10 (Mikolov et al., 2013) with 400 dimensions, ten negative samples and a window size of ten words. As unlabeled corpus to train the word embeddings we use the English ukWaC corpus 11 (Baroni et al., 2009) , which is made up of two billion words from paragraphs extracted from the web.", |
| "cite_spans": [ |
| { |
| "start": 17, |
| "end": 42, |
| "text": "(Taghipour and Ng, 2015b;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 43, |
| "end": 67, |
| "text": "Rothe and Sch\u00fctze, 2015;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 68, |
| "end": 91, |
| "text": "Iacobacci et al., 2016)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 178, |
| "end": 201, |
| "text": "Iacobacci et al. (2016)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 361, |
| "end": 384, |
| "text": "Iacobacci et al. (2016)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 989, |
| "end": 1010, |
| "text": "(Baroni et al., 2009)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Supervised", |
| "sec_num": "5.1.1" |
| }, |
| { |
| "text": "\u2022 Context2Vec (Melamud et al., 2016) . Neural language models have recently shown their potential for the WSD task (K\u00e5geb\u00e4ck and Salomonsson, 2016; Yuan et al., 2016) . In this experiment we replicated the approach of Melamud et al. (2016, Context2Vec) , for which the code 12 is publicly available. This approach is divided in three steps. First, a bidirectional LSTM recurrent neural network is trained on an unlabeled corpus (we considered the same ukWaC corpus used by the previous comparison system). Then, a context vector is learned for each sense annotation in the training corpus. Finally, the sense annotation whose context vector is closer to the target word's context vector is selected as the intended sense.", |
| "cite_spans": [ |
| { |
| "start": 14, |
| "end": 36, |
| "text": "(Melamud et al., 2016)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 115, |
| "end": 147, |
| "text": "(K\u00e5geb\u00e4ck and Salomonsson, 2016;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 148, |
| "end": 166, |
| "text": "Yuan et al., 2016)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 218, |
| "end": 252, |
| "text": "Melamud et al. (2016, Context2Vec)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Supervised", |
| "sec_num": "5.1.1" |
| }, |
| { |
| "text": "Finally, as baseline we included the Most Frequent Sense (MFS) heuristic, which for each target word selects the sense occurring the highest number of times in the training corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Supervised", |
| "sec_num": "5.1.1" |
| }, |
| { |
| "text": "In this section we describe the three knowledgebased WSD models used in our empirical comparison:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-based", |
| "sec_num": "5.1.2" |
| }, |
| { |
| "text": "\u2022 Lesk (Lesk, 1986 ) is a simple knowledgebased WSD algorithm that bases its calculations on the overlap between the definitions of a given sense and the context of the target word. For our experiments we replicated the extended version of the original algorithm in which definitions of related senses are also considered and the conventional term frequency-inverse document frequency (Jones, 1972 , tf-idf ) is used for word weighting (Banerjee and Pedersen, 2003, Lesk ext ). Additionally, we included the enhanced version of Lesk in which word embeddings 13 are leveraged to compute the similarity between definitions and the target context (Basile et al., 2014, Lesk ext +emb) 14 .", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 18, |
| "text": "(Lesk, 1986", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 385, |
| "end": 397, |
| "text": "(Jones, 1972", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 436, |
| "end": 474, |
| "text": "(Banerjee and Pedersen, 2003, Lesk ext", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-based", |
| "sec_num": "5.1.2" |
| }, |
| { |
| "text": "\u2022 UKB (Agirre and Soroa, 2009; Agirre et al., 2014 ) is a graph-based WSD system which makes use of random walks over a semantic network (WordNet graph in this case). UKB 15 applies the Personalized Page Rank algorithm (Haveliwala, 2002) initialized using the context of the target word. Unlike most WSD systems, UKB does not back-off to the WordNet first sense heuristic and it is self-contained (i.e., it does not make use of any external resources/corpora). We used both default configurations from UKB: using the full WordNet graph (UKB) and the full graph including disambiguated glosses as connections as well (UKB gloss).", |
| "cite_spans": [ |
| { |
| "start": 6, |
| "end": 30, |
| "text": "(Agirre and Soroa, 2009;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 31, |
| "end": 50, |
| "text": "Agirre et al., 2014", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 219, |
| "end": 237, |
| "text": "(Haveliwala, 2002)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-based", |
| "sec_num": "5.1.2" |
| }, |
| { |
| "text": "\u2022 Babelfy (Moro et al., 2014 ) is a graph-based disambiguation approach which exploits random walks to determine connections between synsets. Specifically, Babelfy 16 uses random walks with restart (Tong et al., 2006) over BabelNet (Navigli and Ponzetto, 2012) , a large semantic network integrating Word-Net among other resources such as Wikipedia 13 We used the same word embeddings described in Section 5.1.1 for IMS+emb.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 28, |
| "text": "(Moro et al., 2014", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 198, |
| "end": 217, |
| "text": "(Tong et al., 2006)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 232, |
| "end": 260, |
| "text": "(Navigli and Ponzetto, 2012)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 349, |
| "end": 351, |
| "text": "13", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-based", |
| "sec_num": "5.1.2" |
| }, |
| { |
| "text": "14 We used the implementation from https://github. com/pippokill/lesk-wsd-dsm. In this implementation additional definitions from BabelNet are considered. 15 We used the last implementation available at http://ixa2.si.ehu.es/ukb/ 16 We used the Java API from http://babelfy.org or Wiktionary. Its algorithm is based on a densest subgraph heuristic for selecting highcoherence semantic interpretations of the input text. The best configuration of Babelfy takes into account not only the target sentence in which the target word occurs, but also the whole document.", |
| "cite_spans": [ |
| { |
| "start": 155, |
| "end": 157, |
| "text": "15", |
| "ref_id": null |
| }, |
| { |
| "start": 230, |
| "end": 232, |
| "text": "16", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-based", |
| "sec_num": "5.1.2" |
| }, |
| { |
| "text": "As knowledge-based baseline we included the WordNet first sense. This baseline simply selects the candidate which is considered as first sense in WordNet 3.0. Even though the sense order was decided on the basis of semantically-tagged text, we considered it as knowledge-based in this experiment as this information is already available in WordNet. In fact, knowledge-based systems like Babelfy include this information in their pipeline. Despite its simplicity, this baseline has been shown to be hard to beat by automatic WSD systems (Navigli, 2009; Agirre et al., 2014) . Table 2 shows the F-Measure performance of all comparison systems on the five all-words WSD datasets. Since not all test word instances are covered by the corresponding training corpora, supervised systems have a maximum F-Score (ceiling in the Table) they can achieve. Nevertheless, supervised systems consistently outperform knowledge-based systems across datasets, confirming the results of Pilehvar and Navigli (2014) . A simple linear classifier over conventional WSD features (i.e., IMS) proves to be robust across datasets, consistently outperforming the MFS baseline. The recent integration of word embeddings as an additional feature is beneficial, especially as a replacement of the feature based on the surface form of surrounding words (i.e., IMS -s +emb). Moreover, recent advances on neural language models (in the case of Context2Vec a bi-directional LSTM) appear to be highly promising for the WSD task according to the results, as Context2Vec outperforms IMS in most datasets.", |
| "cite_spans": [ |
| { |
| "start": 536, |
| "end": 551, |
| "text": "(Navigli, 2009;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 552, |
| "end": 572, |
| "text": "Agirre et al., 2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 969, |
| "end": 996, |
| "text": "Pilehvar and Navigli (2014)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 575, |
| "end": 582, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 820, |
| "end": 826, |
| "text": "Table)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Knowledge-based", |
| "sec_num": "5.1.2" |
| }, |
| { |
| "text": "On the other hand, it is also interesting to note the performance inconsistencies of systems across datasets, as in all cases there is a large performance gap between the best and the worst performing dataset. As explained in Section 4.3, the ambiguity level may give a hint as to how difficult the corresponding dataset may be. In fact, WSD systems obtain relatively low results in SemEval-07, which is the most ambiguous dataset (see Table 1 ). However, this is the dataset in which supervised systems achieve a larger margin with respect to the MFS baseline, which suggests that, in general, the MFS heuristic does not perform accurately on highly ambiguous words.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 436, |
| "end": 443, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "To complement the results from the previous section, we additionally carried out a detailed analysis about the global performance of each system and divided by PoS tag. To this end, we concatenated all five datasets into a single dataset. This resulted in a large evaluation dataset of 7,253 instances to disambiguate (see Table 3 ). Table 4 shows the F-Measure performance of all comparison systems on the concatenation of all five WSD evaluation datasets, divided by PoS tag. IMS -s +emb trained on SemCor+OMSTI achieves the best overall results, slightly above Context2Vec trained on the same corpus. In what follows we describe some of the main findings extracted from our analysis.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 323, |
| "end": 330, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 334, |
| "end": 341, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Training corpus. In general, the results of supervised systems trained on SemCor only (manually-annotated) are lower than training simultaneously on both SemCor and OMSTI (automatically-annotated). This is a promising finding, which confirms the results of previous works (Raganato et al., 2016; Iacobacci et al., 2016; Yuan et al., 2016) and encourages further research on developing reliable automatic or semiautomatic methods to obtain large amounts of sense-annotated corpora in order to overcome the knowledge-acquisition bottleneck. For instance, Context2Vec improves 0.4 points overall when adding the automatically sense-annotated OMSTI as part of the training corpus, suggesting that more data, even if not perfectly clean, may be beneficial for neural language models.", |
| "cite_spans": [ |
| { |
| "start": 272, |
| "end": 295, |
| "text": "(Raganato et al., 2016;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 296, |
| "end": 319, |
| "text": "Iacobacci et al., 2016;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 320, |
| "end": 338, |
| "text": "Yuan et al., 2016)", |
| "ref_id": "BIBREF54" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Knowledge-based vs. Supervised. One of the main conclusions that can be taken from the evaluation is that supervised systems clearly outperform knowledge-based models. This may be due to the fact that in many cases the main disambiguation clue is given by the immediate local context. This is particularly problematic for knowledge-based systems, as they take equally into account all the words within a sentence (or document in the case of Babelfy). For instance, in the following sentence, both UKB and Babelfy fail to predict the correct sense of state:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "In sum, at both the federal and state government levels at least part of the seemingly irrational behavior voters display in the voting booth may have an exceedingly rational explanation. In this sentence, state is annotated with its administrative districts of a nation sense in the gold standard. The main disambiguation clue seems to be given by its previous and immediate subsequent words (federal and government), which tend to co-occur with this particular sense. However, knowledge-based WSD systems like UKB or Babelfy give the same weight to all words in context, underrating the importance of this local disambiguation clue in the example. For instance, UKB disambiguates state with the sense defined as the way something is with respect to its main attributes, probably biased by words which are not immediately next to the target word within the sentence, e.g., irrational, behaviour, rational or explanation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Low overall performance on verbs. As can be seen from Table 4 , the F-Measure performance of all systems on verbs is in all cases below 58%. This can be explained by the high granularity of verbs in WordNet. For instance, the verb keep consists of 22 different meanings in WordNet 3.0, six of them denoting \"possession and transfer of possession\" 17 . In fact, the average ambiguity level of all verbs in this evaluation framework is 10.4 (see Table 3 ), considerably greater than the ambiguity on other PoS tags, e.g., 4.8 in nouns. Nonetheless, supervised systems manage to comfortably outperform the MFS baseline, which does not seem to be reliable for verbs given their high ambiguity.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 54, |
| "end": 61, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 444, |
| "end": 451, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Influence of preprocessing. As mentioned in Section 3, our evaluation framework provides a preprocessing of the corpora with Stanford CoreNLP. This ensures a fair comparison among all systems but may introduce some annotation inaccuracies, such as erroneous PoS tags. However, for English these errors are minimal 18 . For instance, the global error rate of the Stanford PoS tagger in all disambiguation instances is 3.9%, which were fixed as explained in Section 3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Bias towards the Most Frequent Sense. After carrying out an analysis on the influence of MFS in WSD systems 19 , we found that all supervised systems suffer a strong bias towards the MFS, with all IMS-based systems disambiguating over 75% of instances with their MFS. Context2Vec is slightly less affected by this bias, with 71.5% (SemCor) and 74.7% (SemCor+OMSTI) of answers corre-sponding to the MFS. Interestingly, this MFS bias is also present in graph knowledge-based systems. In fact, Calvo and Gelbukh (2015) had already shown how the MFS correlates strongly with the number of connections in WordNet.", |
| "cite_spans": [ |
| { |
| "start": 108, |
| "end": 110, |
| "text": "19", |
| "ref_id": null |
| }, |
| { |
| "start": 491, |
| "end": 515, |
| "text": "Calvo and Gelbukh (2015)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Knowledge-based systems. For knowledgebased systems the WN first sense baseline proves still to be extremely hard to beat. The only knowledge-based system that overall manages to beat this baseline is Babelfy, which, in fact, uses information about the first sense in its pipeline. Babelfy's default pipeline includes a confidence threshold in order to decide whether to disambiguate or back-off to the first sense. In total, Babelfy backs-off to WN first sense in 63% of all instances. Nonetheless, it is interesting to note the high performance of Babelfy and Lesk ext +emb on noun instances (outperforming the first sense baseline by 1.0 and 2.2 points, respectively) in contrast to their relatively lower performance on verbs, adjectives 20 and adverbs. We believe that this is due to the nature of the lexical resource used by these two systems, i.e., BabelNet. BabelNet includes Wikipedia as one of its main sources of information. However, while Wikipedia provides a large amount of semantic connections and definitions for nouns, this it not the case for verbs, adjectives and adverbs, as they are not included in Wikipedia and their source of information mostly comes from WordNet only.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "In this paper we presented a unified evaluation framework for all-words WSD. This framework is based on evaluation datasets taken from Senseval and SemEval competitions, as well as manually and automatically sense-annotated corpora. In this evaluation framework all datasets share a common format, sense inventory (i.e., WordNet 3.0) and preprocessing pipeline, which eases the task of researchers to evaluate their models and, more importantly, ensures a fair comparison among all systems. The whole evaluation framework 21 , including guidelines for researchers to include their own sense-annotated datasets and a script to validate their conformity to the guidelines, is available at http://lcl.uniroma1.it/wsdeval .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We used this framework to perform an empirical comparison among a set of heterogeneous WSD systems, including both knowledge-based and supervised ones. Supervised systems based on neural networks achieve the most promising results. Given our analysis, we foresee two potential research avenues focused on semi-supervised learning: (1) exploiting large amounts of unlabeled corpora for learning word embeddings or training neural language models, and (2) automatically constructing high-quality sense-annotated corpora to be used by supervised WSD systems. As far as knowledge-based systems are concerned, enriching knowledge resources with semantic connections for non-nominal mentions may be an important step towards improving their performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "For future work we plan to further extend our unified framework to languages other than English, including SemEval multilingual WSD datasets, as well as to other sense inventories such as Open Multilingual WordNet, BabelNet and Wikipedia, which are available in different languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": ". The third step consists of preprocessing the given dataset. We used the Stanford 1 http://nlp.lsi.upc.edu/tools/ download-map.php 2 This manual correction involved less than 10% of all instances for the datasets for which this step was performed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In order to have a standard format which may be used by languages other than English, we provide coarse-grained PoS tags as given by the universal PoS tagset(Petrov et al., 2011).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We downloaded the SemCor 3.0 version at web.eecs. umich.edu/\u02dcmihalcea/downloads.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In this paper we refer to the portion of sense-annotated data from the MultiUN corpus as OMSTI. Note that OMSTI was released along with SemCor.6 Statistics included inTable 1: number of documents (#Docs), sentences (#Sents), tokens (#Tokens), sense annotations (#Annotations), sense types covered (#Sense types), annotated lemma types covered (#Word types), and ambiguity level (Ambiguity). There was no document information in the OMSTI data released byTaghipour and Ng (2015a).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "As already noted byTaghipour and Ng (2015a), supervised systems trained on only OMSTI obtain lower results than when trained along with SemCor, mainly due to OM-STI's lack of coverage in target word types.8 We used the original implementation available at http: //www.comp.nus.edu.sg/\u02dcnlp/software.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We used the implementation available at https:// github.com/iiacobac/ims_wsd_emb 10 code.google.com/archive/p/word2vec/ 11 http://wacky.sslmit.unibo.it/doku. php?id=corpora12 https://github.com/orenmel/ context2vec", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://wordnet.princeton.edu/man/ lexnames.5WN.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Even if preprocessing plays a minimal role for English, it may be of higher importance for other languages, e.g., morphologically richer languages(Eger et al., 2016).19 SeePostma et al. (2016) for an interesting discussion on the bias of current WSD systems towards the MFS.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The poor performance of Leskext+emb on adjective instances is particularly noticeable.21 We have additionally set up a CodaLab competition based on this evaluation framework.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The authors gratefully acknowledge the support of the ERC Starting Grant MultiJEDI No. 259234.Jose Camacho-Collados is supported by a Google PhD Fellowship in Natural Language Processing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Personalizing PageRank for Word Sense Disambiguation", |
| "authors": [ |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Aitor", |
| "middle": [], |
| "last": "Soroa", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "33--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eneko Agirre and Aitor Soroa. 2009. Personalizing PageRank for Word Sense Disambiguation. In Pro- ceedings of EACL, pages 33-41.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Semeval-2010 task 17: All-words word sense disambiguation on a specific domain", |
| "authors": [ |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Oier", |
| "middle": [], |
| "last": "Lopez De Lacalle", |
| "suffix": "" |
| }, |
| { |
| "first": "Christiane", |
| "middle": [], |
| "last": "Fellbaum", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Marchetti", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Toral", |
| "suffix": "" |
| }, |
| { |
| "first": "Piek", |
| "middle": [], |
| "last": "Vossen", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Workshop on Semantic Evaluations: Recent Achievements and Future Directions", |
| "volume": "", |
| "issue": "", |
| "pages": "123--128", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eneko Agirre, Oier Lopez De Lacalle, Christiane Fell- baum, Andrea Marchetti, Antonio Toral, and Piek Vossen. 2010a. Semeval-2010 task 17: All-words word sense disambiguation on a specific domain. In Proceedings of the Workshop on Semantic Evalua- tions: Recent Achievements and Future Directions, pages 123-128.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Semeval-2010 task 17: All-words word sense disambiguation on a specific domain", |
| "authors": [ |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Oier", |
| "middle": [], |
| "last": "Lopez De Lacalle", |
| "suffix": "" |
| }, |
| { |
| "first": "Christiane", |
| "middle": [], |
| "last": "Fellbaum", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Marchetti", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Toral", |
| "suffix": "" |
| }, |
| { |
| "first": "Piek", |
| "middle": [], |
| "last": "Vossen", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Workshop on Semantic Evaluations: Recent Achievements and Future Directions", |
| "volume": "", |
| "issue": "", |
| "pages": "123--128", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eneko Agirre, Oier Lopez De Lacalle, Christiane Fell- baum, Andrea Marchetti, Antonio Toral, and Piek Vossen. 2010b. Semeval-2010 task 17: All-words word sense disambiguation on a specific domain. In Proceedings of the Workshop on Semantic Evalua- tions: Recent Achievements and Future Directions, pages 123-128.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Random walks for knowledge-based word sense disambiguation", |
| "authors": [ |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Oier", |
| "middle": [], |
| "last": "Lopez De Lacalle", |
| "suffix": "" |
| }, |
| { |
| "first": "Aitor", |
| "middle": [], |
| "last": "Soroa", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Computational Linguistics", |
| "volume": "40", |
| "issue": "1", |
| "pages": "57--84", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eneko Agirre, Oier Lopez de Lacalle, and Aitor Soroa. 2014. Random walks for knowledge-based word sense disambiguation. Computational Linguistics, 40(1):57-84.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Extended gloss overlap as a measure of semantic relatedness", |
| "authors": [ |
| { |
| "first": "Satanjeev", |
| "middle": [], |
| "last": "Banerjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Ted", |
| "middle": [], |
| "last": "Pedersen", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 18th International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "805--810", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Satanjeev Banerjee and Ted Pedersen. 2003. Extended gloss overlap as a measure of semantic relatedness. In Proceedings of the 18th International Joint Con- ference on Artificial Intelligence, pages 805-810, Acapulco, Mexico.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "The wacky wide web: a collection of very large linguistically processed web-crawled corpora. Language resources and evaluation", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Silvia", |
| "middle": [], |
| "last": "Bernardini", |
| "suffix": "" |
| }, |
| { |
| "first": "Adriano", |
| "middle": [], |
| "last": "Ferraresi", |
| "suffix": "" |
| }, |
| { |
| "first": "Eros", |
| "middle": [], |
| "last": "Zanchetta", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "43", |
| "issue": "", |
| "pages": "209--226", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Baroni, Silvia Bernardini, Adriano Ferraresi, and Eros Zanchetta. 2009. The wacky wide web: a collection of very large linguistically pro- cessed web-crawled corpora. Language resources and evaluation, 43(3):209-226.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "An Enhanced Lesk Word Sense Disambiguation Algorithm through a Distributional Semantic Model", |
| "authors": [ |
| { |
| "first": "Pierpaolo", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Annalina", |
| "middle": [], |
| "last": "Caputo", |
| "suffix": "" |
| }, |
| { |
| "first": "Giovanni", |
| "middle": [], |
| "last": "Semeraro", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "1591--1600", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pierpaolo Basile, Annalina Caputo, and Giovanni Se- meraro. 2014. An Enhanced Lesk Word Sense Dis- ambiguation Algorithm through a Distributional Se- mantic Model. In Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers, pages 1591-1600, Dublin, Ireland.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Semisupervised learning with induced word senses for state of the art word sense disambiguation", |
| "authors": [ |
| { |
| "first": "Osman", |
| "middle": [], |
| "last": "Ba\u015fkaya", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Jurgens", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Journal of Artificial Intelligence Research", |
| "volume": "55", |
| "issue": "", |
| "pages": "1025--1058", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Osman Ba\u015fkaya and David Jurgens. 2016. Semi- supervised learning with induced word senses for state of the art word sense disambiguation. Journal of Artificial Intelligence Research, 55:1025-1058.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Is the most frequent sense of a word better connected in a semantic network", |
| "authors": [ |
| { |
| "first": "Hiram", |
| "middle": [], |
| "last": "Calvo", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Gelbukh", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Conference on Intelligent Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "491--499", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hiram Calvo and Alexander Gelbukh. 2015. Is the most frequent sense of a word better connected in a semantic network? In International Conference on Intelligent Computing, pages 491-499. Springer.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A Unified Multilingual Semantic Representation of Concepts", |
| "authors": [ |
| { |
| "first": "Jos\u00e9", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Taher Pilehvar", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "741--751", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jos\u00e9 Camacho-Collados, Mohammad Taher Pilehvar, and Roberto Navigli. 2015. A Unified Multilingual Semantic Representation of Concepts. In Proceed- ings of ACL, pages 741-751.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A Large-Scale Multilingual Disambiguation of Glosses", |
| "authors": [], |
| "year": null, |
| "venue": "Proceedings of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "1701--1708", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A Large-Scale Multilingual Disambiguation of Glosses. In Proceedings of LREC, pages 1701- 1708, Portoroz, Slovenia.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Nasari: Integrating explicit knowledge and corpus statistics for a multilingual representation of concepts and entities", |
| "authors": [ |
| { |
| "first": "Jos\u00e9", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Taher Pilehvar", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Artificial Intelligence", |
| "volume": "240", |
| "issue": "", |
| "pages": "36--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jos\u00e9 Camacho-Collados, Mohammad Taher Pilehvar, and Roberto Navigli. 2016b. Nasari: Integrating explicit knowledge and corpus statistics for a multi- lingual representation of concepts and entities. Arti- ficial Intelligence, 240:36-64.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Scaling up word sense disambiguation via parallel texts", |
| "authors": [ |
| { |
| "first": "Yee", |
| "middle": [], |
| "last": "Seng Chan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "AAAI", |
| "volume": "5", |
| "issue": "", |
| "pages": "1037--1042", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yee Seng Chan and Hwee Tou Ng. 2005. Scaling up word sense disambiguation via parallel texts. In AAAI, volume 5, pages 1037-1042.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A unified model for word sense representation and disambiguation", |
| "authors": [ |
| { |
| "first": "Xinxiong", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1025--1035", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xinxiong Chen, Zhiyuan Liu, and Maosong Sun. 2014. A unified model for word sense representation and disambiguation. In Proceedings of EMNLP, pages 1025-1035, Doha, Qatar.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Validation and tuning of wordnet mapping techniques", |
| "authors": [ |
| { |
| "first": "Jordi", |
| "middle": [], |
| "last": "Daude", |
| "suffix": "" |
| }, |
| { |
| "first": "Lluis", |
| "middle": [], |
| "last": "Padro", |
| "suffix": "" |
| }, |
| { |
| "first": "German", |
| "middle": [], |
| "last": "Rigau", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of RANLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jordi Daude, Lluis Padro, and German Rigau. 2003. Validation and tuning of wordnet mapping tech- niques. In Proceedings of RANLP.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "A methodology for word sense disambiguation at 90% based on large-scale crowdsourcing", |
| "authors": [ |
| { |
| "first": "Oier", |
| "middle": [], |
| "last": "Lopez De Lacalle", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Lexical and Computational Semantics (* SEM 2015)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oier Lopez de Lacalle and Eneko Agirre. 2015. A methodology for word sense disambiguation at 90% based on large-scale crowdsourcing. Lexical and Computational Semantics (* SEM 2015), page 61.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Senseval-2: Overview", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Edmonds", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Cotton", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of The Second International Workshop on Evaluating Word Sense Disambiguation Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1--6", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip Edmonds and Scott Cotton. 2001. Senseval-2: Overview. In Proceedings of The Second Interna- tional Workshop on Evaluating Word Sense Disam- biguation Systems, pages 1-6, Toulouse, France.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Lemmatization and morphological tagging in german and latin: A comparison and a survey of the state-of-the-art", |
| "authors": [ |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Eger", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00fcdiger", |
| "middle": [], |
| "last": "Gleim", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Mehler", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steffen Eger, R\u00fcdiger Gleim, and Alexander Mehler. 2016. Lemmatization and morphological tagging in german and latin: A comparison and a survey of the state-of-the-art. In Proceedings of LREC 2016.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "MultiUN: A Multilingual Corpus from United Nation Documents", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Eisele", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Seventh conference on International Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "2868--2872", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andreas Eisele and Yu Chen. 2010. MultiUN: A Mul- tilingual Corpus from United Nation Documents. In Proceedings of the Seventh conference on Interna- tional Language Resources and Evaluation, pages 2868-2872.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Combining orthogonal monolingual and multilingual sources of evidence for all words WSD", |
| "authors": [ |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Mona", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "1542--1551", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Weiwei Guo and Mona T. Diab. 2010. Combining orthogonal monolingual and multilingual sources of evidence for all words WSD. In Proceedings of the 48th Annual Meeting of the Association for Compu- tational Linguistics (ACL), pages 1542-1551, Upp- sala, Sweden.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Topic-sensitive PageRank", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Taher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Haveliwala", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 11th International Conference on World Wide Web", |
| "volume": "", |
| "issue": "", |
| "pages": "517--526", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taher H. Haveliwala. 2002. Topic-sensitive PageRank. In Proceedings of the 11th International Conference on World Wide Web, pages 517-526, Hawaii, USA.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Embeddings for word sense disambiguation: An evaluation study", |
| "authors": [ |
| { |
| "first": "Ignacio", |
| "middle": [], |
| "last": "Iacobacci", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Taher Pilehvar", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "897--907", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ignacio Iacobacci, Mohammad Taher Pilehvar, and Roberto Navigli. 2016. Embeddings for word sense disambiguation: An evaluation study. In Proceed- ings of ACL, pages 897-907, Berlin, Germany.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "A statistical interpretation of term specificity and its application in retrieval", |
| "authors": [ |
| { |
| "first": "Karen Sp\u00e4rck", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| } |
| ], |
| "year": 1972, |
| "venue": "Journal of Documentation", |
| "volume": "28", |
| "issue": "", |
| "pages": "11--21", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karen Sp\u00e4rck Jones. 1972. A statistical interpretation of term specificity and its application in retrieval. Journal of Documentation, 28:11-21.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Word sense disambiguation using a bidirectional lstm", |
| "authors": [ |
| { |
| "first": "Mikael", |
| "middle": [], |
| "last": "K\u00e5geb\u00e4ck", |
| "suffix": "" |
| }, |
| { |
| "first": "Hans", |
| "middle": [], |
| "last": "Salomonsson", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1606.03568" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikael K\u00e5geb\u00e4ck and Hans Salomonsson. 2016. Word sense disambiguation using a bidirectional lstm. arXiv preprint arXiv:1606.03568.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "An empirical evaluation of knowledge sources and learning algorithms for word sense disambiguation", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Keok", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "T" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 7 th Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "41--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "L. Y. Keok and H. T. Ng. 2002. An empirical evalu- ation of knowledge sources and learning algorithms for word sense disambiguation. In Proceedings of the 7 th Conference on Empirical Methods in Nat- ural Language Processing, pages 41-48, Philadel- phia, USA.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Automatic sense disambiguation using machine readable dictionaries: How to tell a pine cone from an ice cream cone", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Lesk", |
| "suffix": "" |
| } |
| ], |
| "year": 1986, |
| "venue": "Proceedings of the 5th Annual Conference on Systems Documentation", |
| "volume": "", |
| "issue": "", |
| "pages": "24--26", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Lesk. 1986. Automatic sense disambiguation using machine readable dictionaries: How to tell a pine cone from an ice cream cone. In Proceedings of the 5th Annual Conference on Systems Documen- tation, Toronto, Ontario, Canada, pages 24-26.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "The Stanford CoreNLP natural language processing toolkit", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jenny", |
| "middle": [], |
| "last": "Finkel", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [ |
| "J" |
| ], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mc-Closky", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Association for Computational Linguistics (ACL) System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "55--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher D. Manning, Mihai Surdeanu, John Bauer, Jenny Finkel, Steven J. Bethard, and David Mc- Closky. 2014. The Stanford CoreNLP natural lan- guage processing toolkit. In Association for Compu- tational Linguistics (ACL) System Demonstrations, pages 55-60.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "context2vec: Learning generic context embedding with bidirectional lstm", |
| "authors": [ |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Melamud", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Goldberger", |
| "suffix": "" |
| }, |
| { |
| "first": "Ido", |
| "middle": [], |
| "last": "Dagan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of CONLL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oren Melamud, Jacob Goldberger, and Ido Dagan. 2016. context2vec: Learning generic context em- bedding with bidirectional lstm. In Proceedings of CONLL.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "CoRR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word represen- tations in vector space. CoRR, abs/1301.3781.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Using a semantic concordance for sense identification", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "George", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Shari", |
| "middle": [], |
| "last": "Chodorow", |
| "suffix": "" |
| }, |
| { |
| "first": "Claudia", |
| "middle": [], |
| "last": "Landes", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert G", |
| "middle": [], |
| "last": "Leacock", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Proceedings of the workshop on Human Language Technology", |
| "volume": "", |
| "issue": "", |
| "pages": "240--243", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George A Miller, Martin Chodorow, Shari Landes, Claudia Leacock, and Robert G Thomas. 1994. Using a semantic concordance for sense identifica- tion. In Proceedings of the workshop on Human Language Technology, pages 240-243. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Using distributional similarity for lexical expansion in knowledge-based word sense disambiguation", |
| "authors": [ |
| { |
| "first": "Tristan", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Biemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Torsten", |
| "middle": [], |
| "last": "Zesch", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "1781--1796", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tristan Miller, Chris Biemann, Torsten Zesch, and Iryna Gurevych. 2012. Using distributional similar- ity for lexical expansion in knowledge-based word sense disambiguation. In COLING, pages 1781- 1796.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Wordnet: a lexical database for english", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "George", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Communications of the ACM", |
| "volume": "38", |
| "issue": "11", |
| "pages": "39--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George A Miller. 1995. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Semeval-2015 task 13: Multilingual all-words sense disambiguation and entity linking", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Moro", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of SemEval-2015", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrea Moro and Roberto Navigli. 2015. Semeval- 2015 task 13: Multilingual all-words sense dis- ambiguation and entity linking. Proceedings of SemEval-2015.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Entity Linking meets Word Sense Disambiguation: a Unified Approach. Transactions of the Association for Computational Linguistics (TACL)", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Moro", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Raganato", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "231--244", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrea Moro, Alessandro Raganato, and Roberto Nav- igli. 2014. Entity Linking meets Word Sense Dis- ambiguation: a Unified Approach. Transactions of the Association for Computational Linguistics (TACL), 2:231-244.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "BabelNet: The automatic construction, evaluation and application of a wide-coverage multilingual semantic network", |
| "authors": [ |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "Simone", |
| "middle": [ |
| "Paolo" |
| ], |
| "last": "Ponzetto", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Artificial Intelligence", |
| "volume": "193", |
| "issue": "", |
| "pages": "217--250", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roberto Navigli and Simone Paolo Ponzetto. 2012. BabelNet: The automatic construction, evaluation and application of a wide-coverage multilingual se- mantic network. Artificial Intelligence, 193:217- 250.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "SemEval-2007 task 07: Coarsegrained English all-words task", |
| "authors": [ |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [ |
| "C" |
| ], |
| "last": "Litkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Orin", |
| "middle": [], |
| "last": "Hargraves", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the Fourth International Workshop on Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "30--35", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roberto Navigli, Kenneth C. Litkowski, and Orin Har- graves. 2007. SemEval-2007 task 07: Coarse- grained English all-words task. In Proceedings of the Fourth International Workshop on Semantic Evaluations (SemEval-2007), Prague, Czech Repub- lic, pages 30-35.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "SemEval-2013 Task 12: Multilingual Word Sense Disambiguation", |
| "authors": [ |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Jurgens", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniele", |
| "middle": [], |
| "last": "Vannella", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of SemEval 2013", |
| "volume": "", |
| "issue": "", |
| "pages": "222--231", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roberto Navigli, David Jurgens, and Daniele Vannella. 2013. SemEval-2013 Task 12: Multilingual Word Sense Disambiguation. In Proceedings of SemEval 2013, pages 222-231.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Word Sense Disambiguation: A survey", |
| "authors": [ |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "ACM Computing Surveys", |
| "volume": "41", |
| "issue": "2", |
| "pages": "1--69", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roberto Navigli. 2009. Word Sense Disambiguation: A survey. ACM Computing Surveys, 41(2):1-69.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "A quick tour of word sense disambiguation, induction and related approaches", |
| "authors": [ |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "SOFSEM 2012: Theory and practice of computer science", |
| "volume": "", |
| "issue": "", |
| "pages": "115--129", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roberto Navigli. 2012. A quick tour of word sense dis- ambiguation, induction and related approaches. In SOFSEM 2012: Theory and practice of computer science, pages 115-129. Springer.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "A universal part-of-speech tagset", |
| "authors": [ |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1104.2086" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Slav Petrov, Dipanjan Das, and Ryan McDonald. 2011. A universal part-of-speech tagset. arXiv preprint arXiv:1104.2086.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "A large-scale pseudoword-based evaluation framework for state-of-the-art Word Sense Disambiguation", |
| "authors": [ |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Taher", |
| "suffix": "" |
| }, |
| { |
| "first": "Pilehvar", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Computational Linguistics", |
| "volume": "", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammad Taher Pilehvar and Roberto Navigli. 2014. A large-scale pseudoword-based evaluation frame- work for state-of-the-art Word Sense Disambigua- tion. Computational Linguistics, 40(4).", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Knowledge-rich Word Sense Disambiguation rivaling supervised system", |
| "authors": [ |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Simone", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Ponzetto", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "1522--1531", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simone Paolo Ponzetto and Roberto Navigli. 2010. Knowledge-rich Word Sense Disambiguation rival- ing supervised system. In Proceedings of the 48th Annual Meeting of the Association for Computa- tional Linguistics (ACL), pages 1522-1531, Upp- sala, Sweden.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Addressing the MFS Bias in WSD systems", |
| "authors": [ |
| { |
| "first": "Marten", |
| "middle": [], |
| "last": "Postma", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruben", |
| "middle": [], |
| "last": "Izquierdo", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marten Postma, Ruben Izquierdo, Eneko Agirre, Ger- man Rigau, and Piek Vossen. 2016. Addressing the MFS Bias in WSD systems. In Proceedings of LREC, Portoroz, Slovenia.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "SemEval-2007 task-17: English lexical sample, SRL and all words", |
| "authors": [ |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Loper", |
| "suffix": "" |
| }, |
| { |
| "first": "Dmitriy", |
| "middle": [], |
| "last": "Dligach", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of SemEval", |
| "volume": "", |
| "issue": "", |
| "pages": "87--92", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sameer Pradhan, Edward Loper, Dmitriy Dligach, and Martha Palmer. 2007. SemEval-2007 task-17: En- glish lexical sample, SRL and all words. In Pro- ceedings of SemEval, pages 87-92.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Automatic Construction and Evaluation of a Large Semantically Enriched Wikipedia", |
| "authors": [ |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Raganato", |
| "suffix": "" |
| }, |
| { |
| "first": "Claudio", |
| "middle": [ |
| "Delli" |
| ], |
| "last": "Bovi", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "2894--2900", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alessandro Raganato, Claudio Delli Bovi, and Roberto Navigli. 2016. Automatic Construction and Evalua- tion of a Large Semantically Enriched Wikipedia. In Proceedings of IJCAI, pages 2894-2900, New York City, NY, USA, July.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Autoextend: Extending word embeddings to embeddings for synsets and lexemes", |
| "authors": [ |
| { |
| "first": "Sascha", |
| "middle": [], |
| "last": "Rothe", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1793--1803", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sascha Rothe and Hinrich Sch\u00fctze. 2015. Autoex- tend: Extending word embeddings to embeddings for synsets and lexemes. In Proceedings of ACL, pages 1793-1803, Beijing, China.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Coarse to fine grained sense disambiguation in wikipedia", |
| "authors": [ |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Razvan", |
| "middle": [], |
| "last": "Bunescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. of *SEM", |
| "volume": "", |
| "issue": "", |
| "pages": "22--31", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hui Shen, Razvan Bunescu, and Rada Mihalcea. 2013. Coarse to fine grained sense disambiguation in wikipedia. Proc. of *SEM, pages 22-31.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "The English all-words task", |
| "authors": [ |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Snyder", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 3rd International Workshop on the Evaluation of Systems for the Semantic Analysis of Text (SENSEVAL-3)", |
| "volume": "", |
| "issue": "", |
| "pages": "41--43", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Benjamin Snyder and Martha Palmer. 2004. The En- glish all-words task. In Proceedings of the 3rd In- ternational Workshop on the Evaluation of Systems for the Semantic Analysis of Text (SENSEVAL-3), Barcelona, Spain, pages 41-43, Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "One million sense-tagged instances for word sense disambiguation and induction", |
| "authors": [ |
| { |
| "first": "Kaveh", |
| "middle": [], |
| "last": "Taghipour", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaveh Taghipour and Hwee Tou Ng. 2015a. One mil- lion sense-tagged instances for word sense disam- biguation and induction. CoNLL 2015, page 338.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Semisupervised word sense disambiguation using word embeddings in general and specific domains. Proceedings of NAACL HLT", |
| "authors": [ |
| { |
| "first": "Kaveh", |
| "middle": [], |
| "last": "Taghipour", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "314--323", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaveh Taghipour and Hwee Tou Ng. 2015b. Semi- supervised word sense disambiguation using word embeddings in general and specific domains. Pro- ceedings of NAACL HLT 2015, pages 314-323.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Fast random walk with restart and its applications", |
| "authors": [ |
| { |
| "first": "Hanghang", |
| "middle": [], |
| "last": "Tong", |
| "suffix": "" |
| }, |
| { |
| "first": "Christos", |
| "middle": [], |
| "last": "Faloutsos", |
| "suffix": "" |
| }, |
| { |
| "first": "Jia-Yu", |
| "middle": [], |
| "last": "Pan", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "ICDM", |
| "volume": "", |
| "issue": "", |
| "pages": "613--622", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hanghang Tong, Christos Faloutsos, and Jia-Yu Pan. 2006. Fast random walk with restart and its applica- tions. In ICDM, pages 613-622.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "A gametheoretic approach to word sense disambiguation", |
| "authors": [ |
| { |
| "first": "Rocco", |
| "middle": [], |
| "last": "Tripodi", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Pelillo", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1606.07711" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rocco Tripodi and Marcello Pelillo. 2016. A game- theoretic approach to word sense disambiguation. arXiv preprint arXiv:1606.07711.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Multi-Objective Optimization for the Joint Disambiguation of Nouns and Named Entities", |
| "authors": [ |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Weissenborn", |
| "suffix": "" |
| }, |
| { |
| "first": "Leonhard", |
| "middle": [], |
| "last": "Hennig", |
| "suffix": "" |
| }, |
| { |
| "first": "Feiyu", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hans", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "596--605", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dirk Weissenborn, Leonhard Hennig, Feiyu Xu, and Hans Uszkoreit. 2015. Multi-Objective Optimiza- tion for the Joint Disambiguation of Nouns and Named Entities. In Proceedings of ACL, pages 596- 605, Beijing, China.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Semi-supervised word sense disambiguation with neural models", |
| "authors": [ |
| { |
| "first": "Dayu", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Richardson", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Doherty", |
| "suffix": "" |
| }, |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Evans", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Altendorf", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "1374--1385", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dayu Yuan, Julian Richardson, Ryan Doherty, Colin Evans, and Eric Altendorf. 2016. Semi-supervised word sense disambiguation with neural models. In Proceedings of COLING, pages 1374-1385.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "It Makes Sense: A wide-coverage Word Sense Disambiguation system for free text", |
| "authors": [ |
| { |
| "first": "Zhi", |
| "middle": [], |
| "last": "Zhong", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the ACL System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "78--83", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhi Zhong and Hwee Tou Ng. 2010. It Makes Sense: A wide-coverage Word Sense Disambiguation sys- tem for free text. In Proceedings of the ACL System Demonstrations, pages 78-83.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Pipeline for standardizing any given WSD dataset.", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td/><td colspan=\"4\">Nouns Verbs Adj. Adv.</td><td>All</td></tr><tr><td colspan=\"4\">#Instances 4,300 1,652 955</td><td>346</td><td>7,253</td></tr><tr><td>Ambiguity</td><td>4.8</td><td>10.4</td><td>3.8</td><td>3.1</td><td>5.8</td></tr></table>", |
| "html": null, |
| "num": null, |
| "text": "F-Measure percentage of different models in five all-words WSD datasets.", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "text": "Number of instances and ambiguity level of the concatenation of all five WSD datasets.", |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "text": "F-Measure percentage of different models on the concatenation of all five WSD datasets.", |
| "type_str": "table" |
| } |
| } |
| } |
| } |