| { |
| "paper_id": "S10-1003", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:27:39.209845Z" |
| }, |
| "title": "SemEval-2010 Task 3: Cross-Lingual Word Sense Disambiguation", |
| "authors": [ |
| { |
| "first": "Els", |
| "middle": [], |
| "last": "Lefever", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "LT3, Language and Translation Technology Team", |
| "institution": "University College Ghent", |
| "location": { |
| "country": "Belgium" |
| } |
| }, |
| "email": "els.lefever@hogent.be" |
| }, |
| { |
| "first": "Veronique", |
| "middle": [], |
| "last": "Hoste", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "LT3, Language and Translation Technology Team", |
| "institution": "University College Ghent", |
| "location": { |
| "country": "Belgium" |
| } |
| }, |
| "email": "veronique.hoste@hogent.be" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The goal of this task is to evaluate the feasibility of multilingual WSD on a newly developed multilingual lexical sample data set. Participants were asked to automatically determine the contextually appropriate translation of a given English noun in five languages, viz. Dutch, German, Italian, Spanish and French. This paper reports on the sixteen submissions from the five different participating teams.", |
| "pdf_parse": { |
| "paper_id": "S10-1003", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The goal of this task is to evaluate the feasibility of multilingual WSD on a newly developed multilingual lexical sample data set. Participants were asked to automatically determine the contextually appropriate translation of a given English noun in five languages, viz. Dutch, German, Italian, Spanish and French. This paper reports on the sixteen submissions from the five different participating teams.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Word Sense Disambiguation, the task of selecting the correct sense of an ambiguous word in a given context, is a well-researched NLP problem (see for example Agirre and Edmonds (2006) and Navigli (2009) ), largely boosted by the various Senseval and SemEval editions. The SemEval-2010 Cross-lingual Word Sense Disambiguation task focuses on two bottlenecks in current WSD research, namely the scarcity of sense inventories and sense-tagged corpora (especially for languages other than English) and the growing tendency to evaluate the performance of WSD systems in a real application such as machine translation and cross-language information retrieval (see for example Agirre et al. (2007) ).", |
| "cite_spans": [ |
| { |
| "start": 158, |
| "end": 183, |
| "text": "Agirre and Edmonds (2006)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 188, |
| "end": 202, |
| "text": "Navigli (2009)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 670, |
| "end": 690, |
| "text": "Agirre et al. (2007)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The Cross-lingual WSD task aims at the development of a multilingual data set to test the feasibility of multilingual WSD. Many studies have already shown the validity of this crosslingual evidence idea (Gale et al., 1993; Ide et al., 2002; Ng et al., 2003; Apidianaki, 2009) , but until now no benchmark data sets have been available. For the SemEval-2010 competition we developed (i) a sense inventory in which the sense distinctions were extracted from the multilingual corpus Europarl 1 and (ii) a data set in which the ambiguous words were annotated with the senses from the multilingual sense inventory. The Cross-Lingual WSD task is a lexical sample task for English nouns, in which the word senses are made up of the translations in five languages, viz. Dutch, French, Italian, Spanish and German. Both the sense inventory and the annotated data set were constructed for a sample of 25 nouns. The data set was divided into a trial set of 5 ambiguous nouns and a test set of 20 nouns. The participants had to automatically determine the contextually appropriate translation for a given English noun in each or a subset of the five target languages. Only translations present in Europarl were considered as valid translations.", |
| "cite_spans": [ |
| { |
| "start": 203, |
| "end": 222, |
| "text": "(Gale et al., 1993;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 223, |
| "end": 240, |
| "text": "Ide et al., 2002;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 241, |
| "end": 257, |
| "text": "Ng et al., 2003;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 258, |
| "end": 275, |
| "text": "Apidianaki, 2009)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The remainder of this article is organized as follows. Section 2 focuses on the task description and gives a short overview of the construction of the sense inventory and the annotation of the benchmark data set with the senses from the multilingual sense inventory. Section 3 clarifies the scoring metrics and presents two frequency-based baselines. The participating systems are presented in Section 4, while the results of the task are discussed in Section 5. Section 6 concludes this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Two types of data sets were used in the Cross-lingual WSD task: (a) a parallel corpus on the basis of which the gold standard sense inventory was created and (b) a collection of English sentences containing the lexical sample words annotated with their contextually appropriate translations in five languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data sets", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Below, we provide a short summary of the complete data construction process. For a more detailed description, we refer to Lefever and Hoste (2009; 2010) .", |
| "cite_spans": [ |
| { |
| "start": 122, |
| "end": 146, |
| "text": "Lefever and Hoste (2009;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 147, |
| "end": 152, |
| "text": "2010)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data sets", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The gold standard sense inventory was derived from the Europarl parallel corpus 2 , which is extracted from the proceedings of the European Parliament (Koehn, 2005) . We selected 6 languages from the 11 European languages represented in the corpus, viz. English (our target language), Dutch, French, German, Italian and Spanish. All data were already sentence-aligned using a tool based on the Gale and Church (1991) algorithm, which was part of the Europarl corpus. We only considered the 1-1 sentence alignments between English and the five other languages. These sentence alignments were made available to the task participants for the five trial words. The sense inventory extracted from the parallel data set (Section 2.2) was used to annotate the sentences in the trial set and the test set, which were extracted from the JRC-ACQUIS Multilingual Parallel Corpus 3 and BNC 4 .", |
| "cite_spans": [ |
| { |
| "start": 151, |
| "end": 164, |
| "text": "(Koehn, 2005)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 394, |
| "end": 416, |
| "text": "Gale and Church (1991)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data sets", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Two steps were taken to obtain a multilingual sense inventory: (1) word alignment on the sentences to find the set of possible translations for the set of ambiguous nouns and (2) clustering by meaning (per target word) of the resulting translations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Creation of the sense inventory", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "GIZA++ (Och and Ney, 2003) was used to generate the initial word alignments, which were manually verified by certified translators in all six involved languages. The human annotators were asked to assign a \"NULL\" link to words for which no valid translation could be identified. Furthermore, they were also asked to provide extra information on compound translations (e.g. the Dutch word Investeringsbank as a translation of the English multiword Investment Bank ), fuzzy links, or target words with a different PoS (e.g. the verb to bank ).", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 26, |
| "text": "(Och and Ney, 2003)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Creation of the sense inventory", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The manually verified translations were clustered by meaning by one annotator. In order to do so, the translations were linked across languages on the basis of unique sentence IDs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Creation of the sense inventory", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "After the selection of all unique translation combinations, the translations were grouped into clusters. The clusters were organized in two levels, in which the top level reflects the main sense categories (e.g. for the word coach we have (1) (sports) manager, (2) bus, (3) carriage and (4) part of a train), and the subclusters represent the finer sense distinctions. Translations that correspond to English multiword units were identified and in case of non-apparent compounds, i.e. compounds which are not marked with a \"-\", the different compound parts were separated by \u00a7 \u00a7 in the clustering file (e.g. the German Post \u00a7 \u00a7kutsche). All clustered translations were also manually lemmatized.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Creation of the sense inventory", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The resulting sense inventory was used to annotate the sentences in the trial set (20 sentences per ambiguous word) and the test set (50 sentences per ambiguous word). In total, 1100 sentences were annotated. The annotators were asked to (a) pick the contextually appropriate sense cluster and to (b) choose their three preferred translations from this cluster. In case they were not able to find three appropriate translations, they were also allowed to provide fewer. These potentially different translations were used to assign frequency weights (shown in example (2)) to the gold standard translations per sentence. The example (1) below shows the annotation result in both German and Dutch for an English source sentence containing coach.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense annotation of the test data", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "(1) SENTENCE 12. STRANGELY , the national coach of the Irish teams down the years has had little direct contact with the four provincial coaches . For each instance, the gold standard that results from the manual annotation contains a set of translations that are enriched with frequency information. The format of both the input file and gold standard is similar to the format that will be used for the Sem-Eval Cross-Lingual Lexical Substitution task (Sinha and Mihalcea, 2009) . The following example illustrates the six-language gold standard format for the trial sentence in (1). The first field contains the target word, PoS-tag and language code, the second field contains the sentence ID and the third field contains the gold standard translations in the target language, enriched with their frequency weight:", |
| "cite_spans": [ |
| { |
| "start": 453, |
| "end": 479, |
| "text": "(Sinha and Mihalcea, 2009)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense annotation of the test data", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "(2) coach.n.nl 12 :: coach 3; speler-trainer 1; 3 Evaluation", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense annotation of the test data", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "To score the participating systems, we use an evaluation scheme which is inspired by the English lexical substitution task in SemEval 2007 (McCarthy and Navigli, 2007) . We perform both a best result evaluation and a more relaxed evaluation for the top five results. The evaluation is performed using precision and recall (P rec and Rec in the equations below), and Mode precision (M P ) and Mode recall (M R ), where we calculate precision and recall against the translation that is preferred by the majority of annotators, provided that one translation is more frequent than the others.", |
| "cite_spans": [ |
| { |
| "start": 139, |
| "end": 167, |
| "text": "(McCarthy and Navigli, 2007)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For the precision and recall formula we use the following variables. Let H be the set of annotators, T the set of test items and h i the set of responses for an item i \u2208 T for annotator h \u2208 H. For each i \u2208 T we calculate the mode (m i ) which corresponds to the translation with the highest frequency weight. For a detailed overview of the M P and M R calculations, we refer to McCarthy and Navigli (2007) . Let A be the set of items from T (and T M ) where the system provides at least one answer and a i : i \u2208 A the set of guesses from the system for item i. For each i, we calculate the multiset union (H i ) for all h i for all h \u2208 H and for each unique type (res) in H i that has an associated frequency (f req res ). In order to assign frequency weights to our gold standard translations, we asked our human annotators to indicate their top 3 translations, which enables us to also obtain meaningful associated frequencies (f req res ) viz. \"1\" in case a translation is picked by 1 annotator, \"2\" if picked by two annotators and \"3\" if chosen by all three annotators.", |
| "cite_spans": [ |
| { |
| "start": 378, |
| "end": 405, |
| "text": "McCarthy and Navigli (2007)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Best result evaluation For the best result evaluation, systems can propose as many guesses as the system believes are correct, but the resulting score is divided by the number of guesses. In this way, systems that output a lot of guesses are not favoured.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "P rec = a i :i\u2208A res\u2208a i f reqres |a i | |H i | |A| (1) Rec = a i :i\u2208T res\u2208a i f reqres |a i | |H i | |T | (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Out-of-five (Oof ) evaluation For the more relaxed evaluation, systems can propose up to five guesses. For this evaluation, the resulting score is not divided by the number of guesses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P rec = a i :i\u2208A res\u2208a i f reqres |H i | |A| (3) Rec = a i :i\u2208T res\u2208a i f reqres |H i | |T |", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Scoring", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We produced two frequency-based baselines:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "1. For the Best result evaluation, we select the most frequent lemmatized translation that results from the automated word alignment process (GIZA++).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "2. For the Out-of-five or more relaxed evaluation, we select the five most frequent (lemmatized) translations that result from the GIZA++ alignment. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We received sixteen submissions from five different participating teams. One group tackled all five target languages, whereas the other groups focused on four (one team), two (one team) or one (two teams) target language(s). For both the best and the Out-of-five evaluation tasks, there were between three and seven participating systems per language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Systems", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The OWNS system identifies the nearest neighbors of the test instances from the training data using a pairwise similarity measure (weighted sum of the word overlap and semantic overlap between two sentences). They use WordNet similarity measures as an additional information source, while the other teams merely rely on parallel corpora to extract all lexical information. The UvT-WSD systems use a k-nearest neighbour classifier in the form of one word expert per lemma-Part-of-Speech pair to be disambiguated. The classifier takes as input a variety of local and global context features. Both the FCC-WSD and T3-COLEUR systems use bilingual translation probability tables that are derived from the Europarl corpus. The FCC-WSD system uses a Naive Bayes classifier, while the T3-COLEUR system uses an unsupervised graph-based method. Finally, the UHD systems build for each target word a multilingual co-occurrence graph based on the target word's aligned contexts found in parallel corpora. The cross-lingual nodes are first linked by translation edges, that are labeled with the translations of the target word in the corresponding contexts. The graph is transformed into a minimum spanning tree which is used to select the most relevant words in context to disambiguate a given test instance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Systems", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For the system evaluation results, we show precision (P rec), recall (Rec), Mode precision (M P ) and Mode recall (M R ). We ranked all system results according to recall, as was done for the Lexical Substitution task. Table 3 shows the system ranking on the best task, while Table 4 shows the results for the Oof task.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 219, |
| "end": 226, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 276, |
| "end": 283, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Rec Beating the baseline seems to be quite challenging for this WSD task. While the best systems outperform the baseline for the best task, Table 4 : Out-of-five System Results this is not always the case for the Out-of-five task. This is not surprising though, as the Oof baseline contains the five most frequent Europarl translations. As a consequence, these translations usually contain the most frequent translations from different sense clusters, and in addition they also contain the most generic translation that often covers multiple senses of the target word.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 140, |
| "end": 147, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Prec", |
| "sec_num": null |
| }, |
| { |
| "text": "M P M R Spanish UvT-v", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Prec", |
| "sec_num": null |
| }, |
| { |
| "text": "The best results are achieved by the UvT-WSD (Spanish, Dutch) and ColEur (French, Italian and German) systems. An interesting feature that these systems have in common, is that they extract all lexical information from the parallel corpus at hand, and do not need any additional data sources. As a consequence, the systems can easily be applied to other languages as well. This is clearly illustrated by the ColEur system, that participated for all supported languages, and outperformed the other systems for three of the five languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Prec", |
| "sec_num": null |
| }, |
| { |
| "text": "In general, we notice that Spanish and French have the highest scores, followed by Italian, whereas Dutch and German seem to be more challenging. The same observation can be made for both the Oof and Best results, except for Italian that performs worse than Dutch for the latter. However, given the low participation rate for Italian, we do not have sufficient information to explain this different behaviour on the two tasks. The discrepancy between the performance figures for Spanish and French on the one hand, and German and Dutch on the other hand, seems more readily explicable. A likely explanation could be the number of classes (or translations) the systems have to choose from. As both Dutch and German are characterized by a rich compounding system, these compound translations also result in a higher number of different translations. Figure 1 illustrates this by listing the number of different translations (or classes in the context of WSD) for all trial and test words. As a result, the broader set of translations makes the WSD task, that consists in choosing the most appropriate translation from all possible translations for a given instance, more complicated for Dutch and German.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 848, |
| "end": 856, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Prec", |
| "sec_num": null |
| }, |
| { |
| "text": "We believe that the Cross-lingual Word Sense Disambiguation task is an interesting contribution to the domain, as it attempts to address two WSD problems which have received a lot of attention lately, namely (1) the scarcity of hand-crafted sense inventories and sensetagged corpora and (2) the need to make WSD more suited for practical applications.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concluding remarks", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The system results lead to the following observations. Firstly, languages which make extensive use of single word compounds seem harder to tackle, which is also reflected in the baseline scores. A possible explanation for this phenomenon could lie in the number of translations the systems have to choose from. Secondly, it is striking that the systems with the highest performance solely rely on parallel corpora as a source of information. This would seem very promising for future multilingual WSD research; by eliminating the need for external information sources, these systems present a more flexible and languageindependent approach to WSD.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concluding remarks", |
| "sec_num": "6" |
| }, |
| { |
| "text": "http://www.statmt.org/europarl/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.statmt.org/europarl/ 3 http://wt.jrc.it/lt/Acquis/ 4 http://www.natcorp.ox.ac.uk/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Word Sense Disambiguation. Text, Speech and Language Technology", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Edmonds", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. Agirre and P. Edmonds, editors. 2006. Word Sense Disambiguation. Text, Speech and Lan- guage Technology. Springer, Dordrecht.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Semeval-2007 task01: Evaluating wsd on crosslanguage information retrieval", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Magnini", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Lopez De Lacalle", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Otegi", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Rigau", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Vossen", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of CLEF 2007 Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "908--917", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. Agirre, B. Magnini, O. Lopez de Lacalle, A. Otegi, G. Rigau, and P. Vossen. 2007. Semeval-2007 task01: Evaluating wsd on cross- language information retrieval. In Proceedings of CLEF 2007 Workshop, pp. 908 -917. ISSN: 1818-8044. ISBN: 2-912335-31-0.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Data-driven semantic analysis for multilingual wsd and lexical selection in translation", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Apidianaki", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 12th Conference of the European Chapter of the Association for Computational Linguistics (EACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Apidianaki. 2009. Data-driven semantic anal- ysis for multilingual wsd and lexical selection in translation. In Proceedings of the 12th Confer- ence of the European Chapter of the Association for Computational Linguistics (EACL), Athens, Greece.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A program for aligning sentences in bilingual corpora", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [ |
| "A" |
| ], |
| "last": "Gale", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "W" |
| ], |
| "last": "Church", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "177--184", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "W.A. Gale and K.W. Church. 1991. A program for aligning sentences in bilingual corpora. In Computational Linguistics, pages 177-184.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A method for disambiguating word senses in a large corpus", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [ |
| "A" |
| ], |
| "last": "Gale", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "W" |
| ], |
| "last": "Church", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computers and the Humanities", |
| "volume": "26", |
| "issue": "", |
| "pages": "415--439", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "W.A. Gale, K.W. Church, and D. Yarowsky. 1993. A method for disambiguating word senses in a large corpus. In Computers and the Humanities, volume 26, pages 415-439.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Sense discrimination with parallel corpora", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Ide", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Erjavec", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Tufis", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of ACL Workshop on Word Sense Disambiguation: Recent Successes and Future Directions", |
| "volume": "", |
| "issue": "", |
| "pages": "54--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "N. Ide, T. Erjavec, and D. Tufis. 2002. Sense dis- crimination with parallel corpora. In Proceed- ings of ACL Workshop on Word Sense Disam- biguation: Recent Successes and Future Direc- tions, pages 54-60.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Europarl: A parallel corpus for statistical machine translation", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the MT Summit", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. Koehn. 2005. Europarl: A parallel corpus for statistical machine translation. In Proceedings of the MT Summit.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Semeval-2010 task 3: Cross-lingual word sense disambiguation", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Lefever", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Hoste", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the NAACL-HLT 2009 Workshop: SEW-2009 -Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "82--87", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. Lefever and V. Hoste. 2009. Semeval-2010 task 3: Cross-lingual word sense disambigua- tion. In Proceedings of the NAACL-HLT 2009 Workshop: SEW-2009 -Semantic Evaluations, pages 82-87, Boulder, Colorado.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Construction of a benchmark data set for cross-lingual word sense disambiguation", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Lefever", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Hoste", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the seventh international conference on Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. Lefever and V. Hoste. 2010. Construction of a benchmark data set for cross-lingual word sense disambiguation. In Proceedings of the seventh international conference on Language Resources and Evaluation., Malta.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Semeval-2007 task 10: English lexical substitution task", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Mccarthy", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 4th International Workshop on Semantic Evaluations (SemEval-2007)", |
| "volume": "", |
| "issue": "", |
| "pages": "48--53", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. McCarthy and R. Navigli. 2007. Semeval-2007 task 10: English lexical substitution task. In Proceedings of the 4th International Workshop on Semantic Evaluations (SemEval-2007), pages 48-53, Prague, Czech Republic.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Word sense disambiguation: a survey", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "ACM Computing Surveys", |
| "volume": "41", |
| "issue": "", |
| "pages": "1--69", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Navigli. 2009. Word sense disambiguation: a survey. In ACM Computing Surveys, volume 41, pages 1-69.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Exploiting parallel texts for word sense disambiguation: An empirical study", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [ |
| "T" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [ |
| "S" |
| ], |
| "last": "Chan", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 41st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "455--462", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H.T. Ng, B. Wang, and Y.S. Chan. 2003. Exploit- ing parallel texts for word sense disambiguation: An empirical study. In Proceedings of the 41st Annual Meeting of the Association for Compu- tational Linguistics, pages 455-462, Santa Cruz.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A systematic comparison of various statistical alignment models", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [ |
| "J" |
| ], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Computational Linguistics", |
| "volume": "29", |
| "issue": "1", |
| "pages": "19--51", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "F.J. Och and H. Ney. 2003. A systematic com- parison of various statistical alignment models. Computational Linguistics, 29(1):19-51.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Semeval-2010 task 2: Cross-lingual lexical substitution", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Mccarthy", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "D" |
| ], |
| "last": "Sinha", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the NAACL-HLT 2009 Workshop: SEW-2009 -Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "McCarthy D. Sinha, R. D. and R. Mihalcea. 2009. Semeval-2010 task 2: Cross-lingual lexical sub- stitution. In Proceedings of the NAACL-HLT 2009 Workshop: SEW-2009 -Semantic Evalua- tions, Boulder, Colorado.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "trainer 3; voetbaltrainer 1; coach.n.fr 12 :: capitaine 1; entra\u00eeneur 3; coach.n.de 12 :: Coach 1; Fu\u00dfbaltrainer 1; Nationaltrainer 2; Trainer 3; coach.n.it 12 :: allenatore 3; coach.n.es 12 :: entrenador 3;", |
| "num": null |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Number of different translations per word for Dutch, French, Spanish, Italian and German.", |
| "num": null |
| }, |
| "TABREF1": { |
| "num": null, |
| "type_str": "table", |
| "text": "shows the baselines for the Best evaluation, whileTable 2gives an overview per language of the baselines for the Out-offive evaluation.Spanish 18.36 18.36 23.38 23.38 French 20.71 20.71 15.21 15.21 Italian 14.03 14.03 11.23 11.23", |
| "content": "<table><tr><td/><td>Prec</td><td>Rec</td><td>M P</td><td>M R</td></tr><tr><td>Dutch</td><td colspan=\"3\">15.69 15.69 8.71</td><td>8.71</td></tr><tr><td colspan=\"4\">German 13.16 13.16 6.95</td><td>6.95</td></tr><tr><td/><td colspan=\"3\">Table 1: Best Baselines</td></tr><tr><td/><td>Prec</td><td>Rec</td><td>M P</td><td>M R</td></tr><tr><td colspan=\"5\">Spanish 48.41 48.41 42.62 42.62</td></tr><tr><td colspan=\"5\">French 45.99 45.99 36.45 36.45</td></tr><tr><td colspan=\"5\">Italian 34.51 34.51 29.70 29.70</td></tr><tr><td>Dutch</td><td colspan=\"4\">37.43 37.43 24.58 24.58</td></tr><tr><td colspan=\"5\">German 32.89 32.89 29.80 29.80</td></tr></table>", |
| "html": null |
| }, |
| "TABREF2": { |
| "num": null, |
| "type_str": "table", |
| "text": "Out-of-five Baselines", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF4": { |
| "num": null, |
| "type_str": "table", |
| "text": "Best System Results", |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |