| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T01:08:08.074458Z" |
| }, |
| "title": "On Pronunciations in Wiktionary: Extraction and Experiments on Multilingual Syllabification and Stress Prediction", |
| "authors": [ |
| { |
| "first": "Winston", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Johns Hopkins University", |
| "location": {} |
| }, |
| "email": "wswu@jhu.edu" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Johns Hopkins University", |
| "location": {} |
| }, |
| "email": "yarowsky@jhu.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We constructed parsers for five non-English editions of Wiktionary, which combined with pronunciations from the English edition, comprises over 5.3 million IPA pronunciations, the largest pronunciation lexicon of its kind. This dataset is a unique comparable corpus of IPA pronunciations annotated from multiple sources. We analyze the dataset, noting the presence of machine-generated pronunciations. We develop a novel visualization method to quantify syllabification. We experiment on the new combined task of multilingual IPA syllabification and stress prediction, finding that training a massively multilingual neural sequence-to-sequence model with copy attention can improve performance on both high-and low-resource languages, and multitask training on stress prediction helps with syllabification.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We constructed parsers for five non-English editions of Wiktionary, which combined with pronunciations from the English edition, comprises over 5.3 million IPA pronunciations, the largest pronunciation lexicon of its kind. This dataset is a unique comparable corpus of IPA pronunciations annotated from multiple sources. We analyze the dataset, noting the presence of machine-generated pronunciations. We develop a novel visualization method to quantify syllabification. We experiment on the new combined task of multilingual IPA syllabification and stress prediction, finding that training a massively multilingual neural sequence-to-sequence model with copy attention can improve performance on both high-and low-resource languages, and multitask training on stress prediction helps with syllabification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Wiktionary 1 is a free online multilingual dictionary containing a plethora of interesting information. In this paper, we focus on the pronunciation annotations in Wiktionary, which are relatively understudied. For any given word, Wiktionary may include data for IPA (both phonetic and phonemic), hyphenation, dialectical variation, and even audio files of speakers pronouncing the words. These types of data have been shown to be useful for tasks such as grapheme-to-phoneme transduction, e.g. in recent SIGMORPHON shared tasks . There are many existing parsing efforts that have extracted pronunciation information from Wiktionary. Recent extractions of data from Wiktionary focus on obtaining high-quality pronunciations from a single edition of Wiktionary, usually the English edition (e.g. Wu and Yarowsky, 1 www.wiktionary.org 2020a; Sajous et al., 2020; Lee et al., 2020) . However, substantial increases in data can be obtained by parsing other editions of Wiktionary, which have been shown to be helpful for downstream tasks. For example, Schlippe et al. (2010) extract pronunciations from the English, French, German, and Spanish editions, and ? extract pronunciations from the English, German, Greek, Japanese, Korean, and Russian editions.", |
| "cite_spans": [ |
| { |
| "start": 795, |
| "end": 811, |
| "text": "Wu and Yarowsky,", |
| "ref_id": null |
| }, |
| { |
| "start": 812, |
| "end": 813, |
| "text": "1", |
| "ref_id": null |
| }, |
| { |
| "start": 840, |
| "end": 860, |
| "text": "Sajous et al., 2020;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 861, |
| "end": 878, |
| "text": "Lee et al., 2020)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1048, |
| "end": 1070, |
| "text": "Schlippe et al. (2010)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we build upon Yawipa (Wu and Yarowsky, 2020a,b) , a recent Wiktionary parsing framework. Targeting the larger Wiktionaries for increased coverage and those not dealt with in previous work, we construct new pronunciation parsers for the French, Spanish, Malagasy, Italian, and Greek editions of Wiktionary. Combined with pronunciations from the English Wiktionary, this totals to over 5.3 million words, which to our knowledge is the largest pronunciation lexicon to date and also a unique comparable corpora of pronunciations. In Section 2, we show that our extracted pronunciations are a substantial increase in data, covering numerous pronunciations not in the English Wiktionary. This is especially beneficial for low-resource languages. In Section 3, we analyze this data and find that a small portion of these pronunciations may be low-quality and computer-generated. In Section 4, we present a novel visualization technique for analyzing the use of stress in IPA pronunciations. In Section 5, we experiment on the combined task of massively multilingual syllabification and stress detection. Our neural sequence-to-sequence model with copy attention outperforms a sequence labeling baseline, especially in very low-resource scenarios, underscoring the contributions of additional languages to the task. In addition we find that a multi-task approach of predicting both stress and syllabification can improve the performance on syllabification alone. ", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 62, |
| "text": "(Wu and Yarowsky, 2020a,b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As a multilingual resource, Wiktionary exists as a set of numerous editions. That is, the English Wiktionary is written in English by and for English speakers, while the French Wiktionary is written in French by and for French speakers. Any edition can contain entries for words in any language. For example, Figure 1 shows a screenshot of the English Wiktionary's pronunciation information for the French word chien. We use the terms <lang> edition and <lang> Wiktionary interchangeably.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 309, |
| "end": 317, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Wiktionary Pronunciation Extraction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Speakers of different languages have different priorities when annotating data. One can assume that an editor of the Spanish Wiktionary is more likely to provide pronunciations for Spanish words before working on English words. Our effort at extracting a new dataset of pronunciations from 6 different editions of Wiktionary resulted in a total of over 5,3 million unique IPA pronunciations across 2,177 languages. Note that because the data comes from multiple editions, a word may have multiple annotated pronunciations, making our dataset an interesting comparable corpora. Figure 2 shows the 16 languages with the most data in this dataset, along with the contribution of each edition of Wiktionary from which we parsed and extracted IPA pronunciations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 577, |
| "end": 585, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Why parse other editions of Wiktionary?", |
| "sec_num": null |
| }, |
| { |
| "text": "We draw several insights from Figure 2 . First, the inclusion of pronunciations from non-English Wiktionaries represents substantial gains. Though the English edition is the largest Wiktionary by number of entries, 2 the French edition contains a huge number of pronunciations for French words, dwarfing other editions that we parsed. The French Wiktionary also supplies the entirety of the pronunciations for Northern Sami words (se, spoken in Norway, Sweden, and Finland), most of the available pronunciations for Esperanto (eo) and Italian (it) words, and also words in 1,198 other lowresource languages not shown in the long tail of Figure 2 . In contrast, the English edition (the second largest supplier) is the sole supplier of pronunciations in 416 languages.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 30, |
| "end": 38, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 637, |
| "end": 645, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Why parse other editions of Wiktionary?", |
| "sec_num": null |
| }, |
| { |
| "text": "Parsing Implementation The Yawipa framework (Wu and Yarowsky, 2020a) extracts data from the XML dump of Wiktionary. 3 Every entry is encoded in MediaWiki markup, which is similar to Markdown but includes special templates (enclosed in double braces) which programmatically generates HTML that we see when we visit the Wiktionary website. For example, in the English wiktionary, the entry for the French word chien contains the following markup (rendered in Figure 1 ):", |
| "cite_spans": [ |
| { |
| "start": 44, |
| "end": 68, |
| "text": "(Wu and Yarowsky, 2020a)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 457, |
| "end": 465, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Why parse other editions of Wiktionary?", |
| "sec_num": null |
| }, |
| { |
| "text": "===Pronunciation=== {{fr-IPA}} {{audio|fr|Fr-chien.ogg|audio}} {{rhymes|fr|j\u1ebc}}", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why parse other editions of Wiktionary?", |
| "sec_num": null |
| }, |
| { |
| "text": "These three templates generate the three bullet points in Figure 1 . Note that the {{fr-IPA}} template generates the IPA pronunciation, so the IPA itself does not exist in the English Wiktionary dump. Thus, we can only extract the IPA from the French edition, below, highlighting the need to parse multiple Wiktionary editions for multiple sources of pronunciations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 58, |
| "end": 66, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Why parse other editions of Wiktionary?", |
| "sec_num": null |
| }, |
| { |
| "text": "Above is the French Wiktionary's pronunciation for the word chien. A template (fr-r\u00e9g) is also used, but the IPA is extractable from the markup. Each edition of Wiktionary has its own conventions on formatting and templates, thus requiring a separate parser specifically for that edition. For implementation details, please see the repository https://github.com/wswu/yawipa.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "=== {{S|nom|fr}} === {{fr-r\u00e9g|Sj\u1ebc}}", |
| "sec_num": null |
| }, |
| { |
| "text": "For high-resource languages, the home language edition (e.g. English edition for the English language) usually supplies the most pronunciations, but this is not always the case (e.g. the French Wiktionary provides more Italian pronunciations than the Italian edition). In terms of amount of data, two languages are outliers: Malagasy (mg, an Austronesian language spoken in Madagascar) and Volap\u00fck (vo, a constructed language). As relatively less spoken languages, these languages have a disproportionately large amount of data. Why is this so?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of the Dataset", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The data for these two languages come from the Malagasy edition, which we parsed because of its high ranking in the List of Wiktionaries. 4 Both Malagasy and Volap\u00fck are inflected languages 5 whose IPA pronunciations seem to be entirely computer-generated using a regular transduction process from orthography to IPA, which was exploited to create a large set of pronunciations for these two languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of the Dataset", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We also find that some Latin pronunciations may be machine-generated. For example, the Malagasy edition supplies /kontabulawit/ as the pronunciation for the Latin contabulavit and [d \"\u1ebd:onstRat] for demonstrat. These pronunciations lack stress and syllable markings, and in the case of demonstrat, do not agree with established pronunciations of Latin. thus leading us to believe that these were machine-generated pronunciations. In contrast, the English edition contains both well-formed classical and ecclesiastical Latin pronunciations with stress and syllable markers, but only for the dictionary forms contabul\u014d /kon\"ta.bu.lo:/ and d\u0113monstr\u014d /de:\"mon.stro:/.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of the Dataset", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We must emphasize that we are not condemning the use of machine-generated pronunciations. For many languages, e.g. Spanish and Latin, the spelling of a word reflects its pronunciation, so generated pronunciations are likely to be accurate. Indeed, the existence of pronunciation templates such as {{fr-IPA}}, mentioned above, are wellresearched additions to Wiktionary that alleviate the need for humans to manually input IPA pronunciations, thus reducing the potential for human error. We fully support the use of these templates (though they make our parsing job harder), and we would love to see them standardized across all Wiktionary editions, so that editions such as the Malagasy edition can benefit from contributions to the English edition (or any other edition, for that matter).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of the Dataset", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We do caution researchers that the data contained in crowd-sourced resources such as Wiktionary may not be thoroughly vetted for accuracy, as we have discovered. Fortunately, the openness of these crowdsourced data allows for community members to quickly intervene when problematic data is found. One especially poignant example in recent news is the Scots Wikipedia, a large portion of which was recently revealed to be written by an American teenager who is not a Scots speaker. 6 Essentially, this teenager translated English articles into \"Scots\" by systematically rewriting English words to sound as if they were spoken with a Scottish accent, in the same vein as some of the Latin \"IPA\" pronunciations in the Malagasy Wiktionary.", |
| "cite_spans": [ |
| { |
| "start": 481, |
| "end": 482, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of the Dataset", |
| "sec_num": "3" |
| }, |
| { |
| "text": "IPA has the ability to mark syllable boundaries (.) as well as primary (\") and secondary () stress. Words in some languages, e.g. Malay, do not have stress, and sometimes stress can be double marked (\"\") for extra stress. We first quantify IPA stress and syllabification in our extracted dataset then present multilingual experiments on predicting syllabification and stress.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Visualizing Syllabification", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We develop a visualization technique to understand the distribution of words in each language that contain syllable boundaries (Figure 3 ). These bubble charts plot the number of characters in a word (x-axis), the percentage of words containing syllable markers (y-axis), and the number of words in these categories (size of the dot). These charts can help researchers to quickly quantify the presence of syllable markers, one component of highquality IPA pronunciations. We consider a word to be syllabified if it contains any of the following symbols:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 127, |
| "end": 136, |
| "text": "(Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Visualizing Syllabification", |
| "sec_num": "4" |
| }, |
| { |
| "text": ". \"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Visualizing Syllabification", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Ideally, one should see that the longer the word, the higher the percentage of words that have syllables marked. French is a perfect example of this: once words reach 9-10 characters in length, they all contain syllable markers. By examining these plots, we can easily identify examples of problematic IPA syllabification in Malagasy (mg) and Latin (la) words. For Malagasy words, syllable boundaries simply do not exist. For Latin words, we see an unusual negative-slope curve, where words around 4-6 characters in length are more likely to have syllables marked, but longer words are less likely to have syllable boundaries marked. This analysis actually is consistent with our earlier finding in Section 2: because Latin is a highly inflected language, the dictionary forms contain high-quality IPA, but the overwhelming number of pronunciations are actually machine-generated for inflected forms, which may not have the syllables marked. English is a middle ground in terms of quality. While we see the expected upward slope as the length of the word increases, the percentage of words with syllable markers never approaches 100%. A manual review of several English pronunciations indicates that annotators simply did not include syllable boundaries for many English words. Further analyses could shed light on the rea-sons for the negligence of the annotators, or other phenomena that might explain the lack of syllable markers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Visualizing Syllabification", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In this section, we present experiments on multilingual syllable and stress prediction. In the linguistics literature, many studies have shown that awareness of syllable boundaries can improve word recognition performance in children (e.g. McBride-Chang et al., 2004; Plaza and Cohen, 2007; G\u00fcldenoglu, 2017) . Speech syllabification is also a common step in a speech recognition pipeline. Syllabification of text is not a new task, and has been explored via a variety of methods, including rulebased and grammar-based approaches (e.g. Weerasinghe et al., 2005; M\u00fcller, 2006) and data-driven approaches (e.g. Bartlett et al., 2008; Nicolai et al., 2016; Gyanendro Singh et al., 2016) . However, previous work has focused primarily on a handful of languages, and some focus on orthographic syllabification rather than phonemic segmentation. Some use CELEX (Baayen et al., 1996) , a popular dataset containing syllabified text, but it only contains syllabified words in English, German, and Dutch. In contrast, our extracted pronunciation lexicon is a unique multilingual resource that allows for developing and evaluating models and approaches on the new combined task of massively multilingual IPA syllabification and stress prediction across hundreds of languages. In this task, given unmarked IPA, a model must insert syllable markers or stress markers at the appropriate locations.", |
| "cite_spans": [ |
| { |
| "start": 240, |
| "end": 267, |
| "text": "McBride-Chang et al., 2004;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 268, |
| "end": 290, |
| "text": "Plaza and Cohen, 2007;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 291, |
| "end": 308, |
| "text": "G\u00fcldenoglu, 2017)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 536, |
| "end": 561, |
| "text": "Weerasinghe et al., 2005;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 562, |
| "end": 575, |
| "text": "M\u00fcller, 2006)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 609, |
| "end": 631, |
| "text": "Bartlett et al., 2008;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 632, |
| "end": 653, |
| "text": "Nicolai et al., 2016;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 654, |
| "end": 683, |
| "text": "Gyanendro Singh et al., 2016)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 855, |
| "end": 876, |
| "text": "(Baayen et al., 1996)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Data For our task, we filter our pronunciation dataset to keep only IPA containing syllable boundaries or stress markers, 7 so that we have ground truth for our model. This resulted in 93,206 IPA pronunciations across 174 languages, which we split into a 80-10-10 train-dev-test stratified split (same proportion of languages in each set).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Models We first build a baseline: a multilingual character BiLSTM sequence tagger with 256 hidden size (B) that predicts both stress and syllabification (Str & Syl) or syllabification alone (Syl). The data is preprocessed such that each IPA character is labelled with 0 for no stress or syllable, 1 for primary stress (\"), 2 for secondary stress (), and 3 for syllable boundary (.). We include a language token so the model will incorporate knowledge of the language. For example:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "IPA: /In.flu\"En.z@/ Input: eng I n f l u E n z @ Output: 0 2 0 3 0 0 1 0 3 0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For comparison, we experiment with two modern seq2seq models: the default encoder-decoder model (S) in OpenNMT-py (Klein et al., 2017) , and the same model with copy attention (SC) (See et al., 2017) . In this scenario, we formulate syllabification and stress prediction as a sequence generation task, where the input is an unstressed, unsyllabified IPA, and the output is the original IPA sequence containing both stress and syllable markers.", |
| "cite_spans": [ |
| { |
| "start": 114, |
| "end": 134, |
| "text": "(Klein et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 181, |
| "end": 199, |
| "text": "(See et al., 2017)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We then treat syllabification and stress prediction in a pipelined approach (Syl \u2192 Str), where the first model (B or SC) will predict syllable boundaries, and then a second model will predict the stress. Stress classification is a 3-class classification problem: given a syllable, predict primary stress, secondary stress, or no stress. The structure of this stress classifier is also a BiLSTM, where the hidden state of the syllable in question is passed to a dense feed-forward layer, then a softmax.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "A summary of experimental results is in Table 1 . The baseline BiLSTM model performs consistently worse than the seq2seq models. This is somewhat surprising, since the seq2seq task is a more challenging task: the model must generate the IPA characters along with stress and syllable markers. However, the seq2seq model is able to generate the 7 A stress marker can server as a syllable boundary, e.g. for the English word consume /k@n\"sum/.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 40, |
| "end": 47, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Acc CED 5Acc 5CED Table 1 : Results on the syllabification and stress prediction tasks. See Section 5 for abbreviations. Acc is 1-best accuracy, 5Best is 5-best accuracy (is the gold in the top 5 hypotheses?), CED is mean character edit distance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 18, |
| "end": 25, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "correct sequence of IPA characters, minus stress and syllable markers, in 95% (for regular attention) and 99% (for copy attention) of test examples, alleviating our concerns and proving the effectiveness of copy attention for this task. The pipeline approach performs substantially worse than the multi-task approach. In the pipeline, the syllabification model first predicts the syllable boundaries, then the stress classifier produces a classification for each syllable. We find that with the pipeline approach, it is impossible to improve upon the first step in the pipeline. Thus, if the syllabification step does not correctly identify syllable boundaries, the final pronunciation will never be correct, even if the stress is correctly predicted for each syllable. Finally, multi-task training on both syllabification and stress marking improves performance over syllabification alone. We believe this is because stress and syllable prediction are two somewhat overlapping tasks. If a model can label stress, then it should have some notion of where syllables are. The (-Str) rows in Table 1 show performance on syllabification by evaluating the output of the multitask model preprocessed to replace all stress marks with syllable boundaries.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1089, |
| "end": 1096, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "The large majority of languages in our dataset can be considered low-resource, a specific interest of our experiments. 154 of the 174 languages have much fewer than 466 training examples (0.5% of the entire dataset), yet the average accuracy on these languages is an impressive 67% for syllabifi-cation (B Str & Syl -Str) and 51% for both syllabification and stress prediction (B Str & Syl) . This highlights the contribution of other languages in a single massively multilingual model trained to do both tasks. Other researchers have found that good performance on syllabification requires much more data than this (Nicolai et al., 2016) . We highlight the fact that many of the languages have less than 10 test examples and can be considered truly lowresource; the contribution of many other languages allows our multilingual models to predict the correct pronunciation with minimal training data in a specific language. Though we find that multilingual training helps for low-resource languages, it can also help with high-resource languages: in the SC Str & Syl scenario, a model trained only on French obtained 92.1% on the French test words, compared to the multilingual model at 98.1% accuracy. Full tables of results, along with code to reproduce our experiments, is available at https: //github.com/wswu/syllabification.", |
| "cite_spans": [ |
| { |
| "start": 377, |
| "end": 390, |
| "text": "(B Str & Syl)", |
| "ref_id": null |
| }, |
| { |
| "start": 616, |
| "end": 638, |
| "text": "(Nicolai et al., 2016)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "We extracted the largest dataset of IPA pronunciations to date, by combining IPA from the French, Spanish, Malagasy, Italian, and Greek editions of Wiktionary along with existing pronunciations from the English edition, totaling to 5.3 million pronunciations. We developed a visualization method for examining syllabification in large datasets, which can give indications about the quality of IPA pronunciations. We experiment on the new combined task of massively multilingual prediction of syllabification and stress using a variety of models and approaches, showing success with a multitask multilingual sequence-to-sequence model. We hope our dataset and analysis methods will be useful for researchers in a variety of disciplines.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We envision our newly extracted pronunciation dataset to be especially useful for researchers interested in lexicography and spoken language technologies. In terms of lexicography, this dataset is a unique comparable corpus containing annotations from several editions of Wiktionary, each representing a distinct population of speakers. In several cases, the same pronunciation is supplied by multiple editions, and some editions use phonetic rather than phonemic IPA. Future work can address questions such as: When and why might different editions disagree on a pronunciation? Why do some words have pronunciations and others don't?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In addition, we would like to investigate the use of our pronunciation dataset in language learning of core vocabulary of low-resource languages and modeling etymology relationships between words (Wu et al., 2021) .", |
| "cite_spans": [ |
| { |
| "start": 196, |
| "end": 213, |
| "text": "(Wu et al., 2021)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://meta.wikimedia.org/wiki/ Wiktionary", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://dumps.wikimedia. org/enwiktionary/latest/ XXwiktionary-latest-pages-articles.xml. bz2, where XX is replaced with a two-letter ISO 639-1 code.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://en.wikipedia.org/wiki/List_ of_Wiktionaries 5 Inflected words have their own Wiktionary entry, which can exponentially increase the number of pronunciations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://www.reddit.com/r/Scotland/ comments/ig9jia/ive_discovered_that_ almost_every_single_article", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "The celex lexical database (cd-rom)", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [ |
| "H" |
| ], |
| "last": "Baayen", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Piepenbrock", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Gulikers", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. H. Baayen, R. Piepenbrock, and L. Gulikers. 1996. The celex lexical database (cd-rom).", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Automatic syllabification with structured SVMs for letter-to-phoneme conversion", |
| "authors": [ |
| { |
| "first": "Susan", |
| "middle": [], |
| "last": "Bartlett", |
| "suffix": "" |
| }, |
| { |
| "first": "Grzegorz", |
| "middle": [], |
| "last": "Kondrak", |
| "suffix": "" |
| }, |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Cherry", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL-08: HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "568--576", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Susan Bartlett, Grzegorz Kondrak, and Colin Cherry. 2008. Automatic syllabification with structured SVMs for letter-to-phoneme conversion. In Pro- ceedings of ACL-08: HLT, pages 568-576, Colum- bus, Ohio. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The SIGMORPHON 2020 shared task on multilingual grapheme-to-phoneme conversion", |
| "authors": [ |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Gorman", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [ |
| "E" |
| ], |
| "last": "Lucas", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Ashby", |
| "suffix": "" |
| }, |
| { |
| "first": "Arya", |
| "middle": [], |
| "last": "Goyzueta", |
| "suffix": "" |
| }, |
| { |
| "first": "Shijie", |
| "middle": [], |
| "last": "Mccarthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "You", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
| "volume": "", |
| "issue": "", |
| "pages": "40--50", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.sigmorphon-1.2" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyle Gorman, Lucas F.E. Ashby, Aaron Goyzueta, Arya McCarthy, Shijie Wu, and Daniel You. 2020. The SIGMORPHON 2020 shared task on multilin- gual grapheme-to-phoneme conversion. In Proceed- ings of the 17th SIGMORPHON Workshop on Com- putational Research in Phonetics, Phonology, and Morphology, pages 40-50, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The effects of syllableawareness skills on the word-reading performances of students reading in a transparent orthography", |
| "authors": [ |
| { |
| "first": "Birkan", |
| "middle": [], |
| "last": "G\u00fcldenoglu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "International Electronic Journal of Elementary Education", |
| "volume": "8", |
| "issue": "3", |
| "pages": "425--442", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Birkan G\u00fcldenoglu. 2017. The effects of syllable- awareness skills on the word-reading performances of students reading in a transparent orthography. In- ternational Electronic Journal of Elementary Educa- tion, 8(3):425-442.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Automatic syllabification for Manipuri language", |
| "authors": [ |
| { |
| "first": "Lenin", |
| "middle": [], |
| "last": "Loitongbam Gyanendro Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanasam Ranbir", |
| "middle": [], |
| "last": "Laitonjam", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of COL-ING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "349--357", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Loitongbam Gyanendro Singh, Lenin Laitonjam, and Sanasam Ranbir Singh. 2016. Automatic syllabifica- tion for Manipuri language. In Proceedings of COL- ING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, pages 349-357, Osaka, Japan. The COLING 2016 Orga- nizing Committee.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Opennmt: Opensource toolkit for neural machine translation", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Senellart", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "M" |
| ], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G. Klein, Yoon Kim, Y. Deng, Jean Senellart, and Alexander M. Rush. 2017. Opennmt: Open- source toolkit for neural machine translation. ArXiv, abs/1701.02810.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Massively multilingual pronunciation modeling with WikiPron", |
| "authors": [ |
| { |
| "first": "Jackson", |
| "middle": [ |
| "L" |
| ], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [ |
| "E" |
| ], |
| "last": "Lucas", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "Elizabeth" |
| ], |
| "last": "Ashby", |
| "suffix": "" |
| }, |
| { |
| "first": "Yeonju", |
| "middle": [], |
| "last": "Garza", |
| "suffix": "" |
| }, |
| { |
| "first": "Sean", |
| "middle": [], |
| "last": "Lee-Sikka", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Arya", |
| "middle": [ |
| "D" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Mccarthy", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gorman", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "4223--4228", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jackson L. Lee, Lucas F.E. Ashby, M. Elizabeth Garza, Yeonju Lee-Sikka, Sean Miller, Alan Wong, Arya D. McCarthy, and Kyle Gorman. 2020. Massively multilingual pronunciation modeling with WikiPron. In Proceedings of the 12th Language Resources and Evaluation Conference, pages 4223-4228, Mar- seille, France. European Language Resources Asso- ciation.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Levels of phonological awareness in three cultures", |
| "authors": [ |
| { |
| "first": "Catherine", |
| "middle": [], |
| "last": "Mcbride-Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ellen", |
| "middle": [], |
| "last": "Bialystok", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Karen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanping", |
| "middle": [], |
| "last": "Chong", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Journal of Experimental Child Psychology", |
| "volume": "89", |
| "issue": "2", |
| "pages": "93--111", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Catherine McBride-Chang, Ellen Bialystok, Karen KY Chong, and Yanping Li. 2004. Levels of phonolog- ical awareness in three cultures. Journal of Experi- mental Child Psychology, 89(2):93-111.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Improving syllabification models with phonotactic knowledge", |
| "authors": [ |
| { |
| "first": "Karin", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Eighth Meeting of the ACL Special Interest Group on Computational Phonology and Morphology at HLT-NAACL 2006", |
| "volume": "", |
| "issue": "", |
| "pages": "11--20", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karin M\u00fcller. 2006. Improving syllabification models with phonotactic knowledge. In Proceedings of the Eighth Meeting of the ACL Special Interest Group on Computational Phonology and Morphology at HLT- NAACL 2006, pages 11-20, New York City, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Morphological segmentation can improve syllabification", |
| "authors": [ |
| { |
| "first": "Garrett", |
| "middle": [], |
| "last": "Nicolai", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Grzegorz", |
| "middle": [], |
| "last": "Kondrak", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 14th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
| "volume": "", |
| "issue": "", |
| "pages": "99--103", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W16-2016" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Garrett Nicolai, Lei Yao, and Grzegorz Kondrak. 2016. Morphological segmentation can improve syllabifi- cation. In Proceedings of the 14th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology, pages 99-103, Berlin, Germany. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "The contribution of phonological awareness and visual attention in early reading and spelling", |
| "authors": [ |
| { |
| "first": "Monique", |
| "middle": [], |
| "last": "Plaza", |
| "suffix": "" |
| }, |
| { |
| "first": "Henri", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Dyslexia", |
| "volume": "13", |
| "issue": "1", |
| "pages": "67--76", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Monique Plaza and Henri Cohen. 2007. The contribu- tion of phonological awareness and visual attention in early reading and spelling. Dyslexia, 13(1):67- 76.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "ENGLAWI: From human-to machinereadable Wiktionary", |
| "authors": [ |
| { |
| "first": "Franck", |
| "middle": [], |
| "last": "Sajous", |
| "suffix": "" |
| }, |
| { |
| "first": "Basilio", |
| "middle": [], |
| "last": "Calderone", |
| "suffix": "" |
| }, |
| { |
| "first": "Nabil", |
| "middle": [], |
| "last": "Hathout", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "3016--3026", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Franck Sajous, Basilio Calderone, and Nabil Hathout. 2020. ENGLAWI: From human-to machine- readable Wiktionary. In Proceedings of the 12th Language Resources and Evaluation Conference, pages 3016-3026, Marseille, France. European Lan- guage Resources Association.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Wiktionary as a source for automatic pronunciation extraction", |
| "authors": [ |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Schlippe", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ochs", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanja", |
| "middle": [], |
| "last": "Schultz", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "INTERSPEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tim Schlippe, Sebastian Ochs, and Tanja Schultz. 2010. Wiktionary as a source for automatic pronunciation extraction. In INTERSPEECH.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Get to the point: Summarization with pointergenerator networks", |
| "authors": [ |
| { |
| "first": "Abigail", |
| "middle": [], |
| "last": "See", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1073--1083", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1099" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abigail See, Peter J. Liu, and Christopher D. Manning. 2017. Get to the point: Summarization with pointer- generator networks. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1073- 1083, Vancouver, Canada. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A rule based syllabification algorithm for Sinhala", |
| "authors": [ |
| { |
| "first": "Ruvan", |
| "middle": [], |
| "last": "Weerasinghe", |
| "suffix": "" |
| }, |
| { |
| "first": "Asanka", |
| "middle": [], |
| "last": "Wasala", |
| "suffix": "" |
| }, |
| { |
| "first": "Kumudu", |
| "middle": [], |
| "last": "Gamage", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Second International Joint Conference on Natural Language Processing: Full Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/11562214_39" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruvan Weerasinghe, Asanka Wasala, and Kumudu Gamage. 2005. A rule based syllabification algo- rithm for Sinhala. In Second International Joint Conference on Natural Language Processing: Full Papers.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Sequence models for computational etymology of borrowings", |
| "authors": [ |
| { |
| "first": "Winston", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", |
| "volume": "", |
| "issue": "", |
| "pages": "4032--4037", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2021.findings-acl.353" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Winston Wu, Kevin Duh, and David Yarowsky. 2021. Sequence models for computational etymology of borrowings. In Findings of the Association for Com- putational Linguistics: ACL-IJCNLP 2021, pages 4032-4037, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Multilingual dictionary based construction of core vocabulary", |
| "authors": [ |
| { |
| "first": "Winston", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Garrett", |
| "middle": [], |
| "last": "Nicolai", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "4211--4217", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Winston Wu, Garrett Nicolai, and David Yarowsky. 2020. Multilingual dictionary based construction of core vocabulary. In Proceedings of the 12th Lan- guage Resources and Evaluation Conference, pages 4211-4217, Marseille, France. European Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Computational etymology and word emergence", |
| "authors": [ |
| { |
| "first": "Winston", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "3252--3259", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Winston Wu and David Yarowsky. 2020a. Computa- tional etymology and word emergence. In Proceed- ings of the 12th Language Resources and Evaluation Conference, pages 3252-3259, Marseille, France. European Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Wiktionary normalization of translations and morphological information", |
| "authors": [ |
| { |
| "first": "Winston", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "4683--4692", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.coling-main.413" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Winston Wu and David Yarowsky. 2020b. Wik- tionary normalization of translations and morpholog- ical information. In Proceedings of the 28th Inter- national Conference on Computational Linguistics, pages 4683-4692, Barcelona, Spain (Online). Inter- national Committee on Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Screenshot of the English Wiktionary's pronunciation information for the French word chien." |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "The top 16 languages in terms of number of pronunciations, with contributions from multiple editions of Wiktionary." |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Percentage of French, English, Malagasy, and Latin words containing syllable markers, by length of word. The size of the points indicates the number of words and cannot be compared among graphs." |
| } |
| } |
| } |
| } |