| { |
| "paper_id": "2022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:21:23.384731Z" |
| }, |
| "title": "ParaNames: A Massively Multilingual Entity Name Corpus", |
| "authors": [ |
| { |
| "first": "Jonne", |
| "middle": [], |
| "last": "S\u00e4lev\u00e4", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Michtom School of Computer Science Brandeis University", |
| "location": {} |
| }, |
| "email": "jonnesaleva@brandeis.edu" |
| }, |
| { |
| "first": "Constantine", |
| "middle": [], |
| "last": "Lignos", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Michtom School of Computer Science Brandeis University", |
| "location": {} |
| }, |
| "email": "lignos@brandeis.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We present ParaNames, a Wikidata-derived multilingual parallel name resource consisting of over 118 million names for 13.7 million entities, spanning over 400 languages. ParaNames is useful for multilingual language processing, both for defining name translation tasks and as supplementary data for other tasks. We demonstrate an application of ParaNames by training a multilingual model for canonical name translation to and from English.", |
| "pdf_parse": { |
| "paper_id": "2022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We present ParaNames, a Wikidata-derived multilingual parallel name resource consisting of over 118 million names for 13.7 million entities, spanning over 400 languages. ParaNames is useful for multilingual language processing, both for defining name translation tasks and as supplementary data for other tasks. We demonstrate an application of ParaNames by training a multilingual model for canonical name translation to and from English.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Our goal for ParaNames is to introduce a massively multilingual entity name resource that provides names for diverse entities in the largest possible set of languages and can be kept up to date through a mostly-automated preprocessing procedure. In this extended abstract, we summarize our approach to transforming the Wikidata knowledge graph into a set of parallel entity names identified with the highlevel types of person, location, and organization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction and Related Work", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We do not claim to be the first to harvest the parallel entity names available from Wikidata or Wikipedia. There is scattered prior work in this area, with one of the earliest explorations at scale being performed by Irvine et al. (2010) . Recently, Benites et al. (2020) used Wikipedia as a data source and automatically extracted potential transliteration pairs, combining their outputs with several previously published corpora into an aggregate corpus of 1.6 million names.", |
| "cite_spans": [ |
| { |
| "start": 217, |
| "end": 237, |
| "text": "Irvine et al. (2010)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 250, |
| "end": 271, |
| "text": "Benites et al. (2020)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction and Related Work", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To construct our dataset, we began by extracting all entity records from Wikidata and ingesting them into a MongoDB instance. Each entity in Wikidata is associated with several types of metadata, including names for it across languages. Given that we are working with such a large-scale dataset, there are important challenges that arise.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constructing the resource", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Script usage While language codes can identify a specific script for a language, many Wikidata labels do not conform to the scripts used by each language. In many cases, this is simply a data quality issue, such as with Greek where approximately 8.9% of ORG entities are written in Latin script.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constructing the resource", |
| "sec_num": "2" |
| }, |
| { |
| "text": "However, in other cases, the presence of several scripts can also reflect real variation in the citation forms used in the language, as many languages (e.g. Kazakh) commonly use several scripts. While we explored automated methods of identifying names in incorrect scripts, we decided that manually constructing a list of allowed scripts for each language would yield the best results. We used Wikipedia as an authoritative source to look up which scripts are used to write each language, and filtered out all names whose most common Unicode script property is not among the allowed ones.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constructing the resource", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Providing entity types Downstream tasks and analysis of performance across different entity types often require that entities have a single highlevel type. Wikidata has a complex type hierarchy, but we infer simpler entity types for as many entities as possible. We identified suitable high-level Wikidata types-Q5 (human) for PER, Q82794 (geographic region) for LOC, and Q43229 (organization) for ORG-and classified each Wikidata entity that is an instance of these types as the corresponding named entity type. In total, our resource includes 8,726,033 PER entities, 3,078,428 LOC entities and 2,196,035 ORG entities.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constructing the resource", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To demonstrate an application of ParaNames, we train multilingual Transformer-based models that map entity name from English to one of Arabic, Armenian, Georgian, Greek, Hebrew, Japanese, Kazakh, Korean, Latvian, Lithuanian, Persian (Farsi), Russian, Swedish, Tajik, Thai, Vietnamese, and Urdu and vice versa. We chose these languages Table 1 : Canonical name translation performance for the X \u2192 En task, computed on the test set using our baseline configuration with language special tokens on the source side.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 335, |
| "end": 342, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "as they cover a wide geographic distribution, as well as several different orthographic systems, language families and typological features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "To create the parallel data, we extracted all entities that had names in English and at least one of the selected languages and split them into train, dev, and test sets using an 80/10/10 split. We also added \"special tokens\" to the beginning of each input to provide the model with additional information, e.g. entity type (<PER>), language of non-English label (<kk>) and/or its script (<Cyrillic>).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We use a single NVIDIA RTX 3090 GPU for training and decoding, and train our model for up to 90k updates using Adam. 1 We evaluate using three metrics: accuracy, mean F1-score (Chen et al., 2018) , and character error rate (CER).", |
| "cite_spans": [ |
| { |
| "start": 117, |
| "end": 118, |
| "text": "1", |
| "ref_id": null |
| }, |
| { |
| "start": 176, |
| "end": 195, |
| "text": "(Chen et al., 2018)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "As our first experiment, we trained our models with only a language special token on the source side. The results in both translation directions can be seen in Tables 1 and 2 . When translating to English, our model performs best on Swedish, Vietnamese and Latvian, which is unsurprising as all use the Latin script. However, Latvian names tend to be more inflected and generally match English less often, which explains its lower ranking. Kazakh and Tajik follow next, which also makes sense as Cyrillic can be transliterated to Latin script relatively unambiguously. Model performance is", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 160, |
| "end": 174, |
| "text": "Tables 1 and 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Other hyperparameter values are nearly identical to the best configuration inMoran and Lignos (2020).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "ConclusionParaNames supports the modeling of parallel names for millions of entities in over 400 languages. It can enable multifaceted research in names, including name translation/transliteration and further research in named entity recognition and linking, especially in lower-resourced languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "consistently worst on Hebrew-most likely caused by the lack of vowels in the Hebrew names, which the model must infer when translating to English. When translating from English, the model performs best on languages similar to when translating to English. Swedish and Latvian have the highest accuracy, followed by Kazakh, Tajik, and Georgian. For Hebrew, the model performs much better; a potential explanation for this is the lack of vowel diacritics. Interestingly, the reverse is true for Thai, where the model performs less than half as accurately as when translating into English.We also hypothesized that incorporating other information could be helpful, and repeated the experiment using a mixture of language, type token, and script special tokens. Overall, the results within each language tended to be quite similar regardless of tokens. The best settings were to use all three special tokens when translating from English, and language and type tokens when translating to English. While small, the differences from baseline were statistically significant for almost all settings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "TRANSLIT: A large-scale name transliteration resource", |
| "authors": [ |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Benites", |
| "suffix": "" |
| }, |
| { |
| "first": "Gilbert", |
| "middle": [], |
| "last": "Fran\u00e7ois Duivesteijn", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "3265--3271", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fernando Benites, Gilbert Fran\u00e7ois Duivesteijn, Pius von D\u00e4niken, and Mark Cieliebak. 2020. TRANSLIT: A large-scale name transliteration re- source. In Proceedings of the 12th Language Re- sources and Evaluation Conference, pages 3265- 3271, Marseille, France. European Language Re- sources Association.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "NEWS 2018 whitepaper", |
| "authors": [ |
| { |
| "first": "Nancy", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiangyu", |
| "middle": [], |
| "last": "Duan", |
| "suffix": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Rafael", |
| "middle": [ |
| "E" |
| ], |
| "last": "Banchs", |
| "suffix": "" |
| }, |
| { |
| "first": "Haizhou", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Seventh Named Entities Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "47--54", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-2408" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nancy Chen, Xiangyu Duan, Min Zhang, Rafael E. Banchs, and Haizhou Li. 2018. NEWS 2018 whitepa- per. In Proceedings of the Seventh Named Entities Workshop, pages 47-54, Melbourne, Australia. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Transliterating from all languages", |
| "authors": [ |
| { |
| "first": "Ann", |
| "middle": [], |
| "last": "Irvine", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Klementiev", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 9th Conference of the Association for Machine Translation in the Americas: Research Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ann Irvine, Chris Callison-Burch, and Alexandre Kle- mentiev. 2010. Transliterating from all languages. In Proceedings of the 9th Conference of the Association for Machine Translation in the Americas: Research Papers, Denver, Colorado, USA. Association for Ma- chine Translation in the Americas.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Effective architectures for low resource multilingual named entity transliteration", |
| "authors": [ |
| { |
| "first": "Molly", |
| "middle": [], |
| "last": "Moran", |
| "suffix": "" |
| }, |
| { |
| "first": "Constantine", |
| "middle": [], |
| "last": "Lignos", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 3rd Workshop on Technologies for MT of Low Resource Languages", |
| "volume": "", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Molly Moran and Constantine Lignos. 2020. Effective architectures for low resource multilingual named entity transliteration. In Proceedings of the 3rd Work- shop on Technologies for MT of Low Resource Lan- guages, pages 79-86, Suzhou, China. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "text": "\u00b1 .02 0.08 \u00b1 .00 97.15 \u00b1 .01 Vietnamese 80.75 \u00b1 .02 0.17 \u00b1 .00 94.08 \u00b1 .01 Latvian 67.86 \u00b1 .02 0.14 \u00b1 .00 95.19 \u00b1 .01 Kazakh 55.38 \u00b1 .04 0.16 \u00b1 .00 93.93 \u00b1 .01 Tajik 49.62 \u00b1 .05 0.20 \u00b1 .00 92.77 \u00b1 .01 Lithuanian 47.39 \u00b1 .03 0.28 \u00b1 .00 89.53 \u00b1 .01 Thai 43.94 \u00b1 .05 0.29 \u00b1 .00 89.91 \u00b1 .01 Armenian 39.92 \u00b1 .05 0.28 \u00b1 .00 90.04 \u00b1 .01 Georgian 34.44 \u00b1 .02 0.29 \u00b1 .00 89.29 \u00b1 .01 Korean 33.27 \u00b1 .05 0.32 \u00b1 .00 88.46 \u00b1 .01 Russian 32.81 \u00b1 .06 0.38 \u00b1 .00 84.80 \u00b1 .02 Urdu 31.92 \u00b1 .03 0.23 \u00b1 .00 91.48 \u00b1 .01 Japanese 29.00 \u00b1 .04 0.33 \u00b1 .00 87.79 \u00b1 .", |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Language</td><td>Accuracy</td><td>CER</td><td>F1</td></tr><tr><td>Swedish Persian Arabic Greek Hebrew Overall</td><td colspan=\"3\">88.25 01 28.68 \u00b1 .05 0.28 \u00b1 .00 89.84 \u00b1 .02 25.74 \u00b1 .03 0.32 \u00b1 .00 89.23 \u00b1 .01 24.70 \u00b1 .03 0.35 \u00b1 .00 86.60 \u00b1 .01 15.24 \u00b1 .07 0.44 \u00b1 .00 84.58 \u00b1 .02 42.88 \u00b1 .02 0.27 \u00b1 .00 90.27 \u00b1 .01</td></tr></table>", |
| "num": null |
| } |
| } |
| } |
| } |