| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:31:08.086984Z" |
| }, |
| "title": "Data Augmentation for Transformer-based G2P", |
| "authors": [ |
| { |
| "first": "Zach", |
| "middle": [], |
| "last": "Ryan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Colorado", |
| "location": {} |
| }, |
| "email": "zachary.j.ryan@colorado.edu" |
| }, |
| { |
| "first": "Mans", |
| "middle": [], |
| "last": "Hulden", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Colorado", |
| "location": {} |
| }, |
| "email": "mans.hulden@colorado.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The Transformer model has been shown to outperform other neural seq2seq models in several character-level tasks. It is unclear, however, if the Transformer would benefit as much as other seq2seq models from data augmentation strategies in the low-resource setting. In this paper we explore methods for data augmentation in the g2p task together with the Transformer model. Our results show that a relatively simple alignment-based approach of identifying consistent input-output subsequences in grapheme-phoneme data combined with a subsequent splicing together of such pieces to generate hallucinated data works well in the low-resource setting, often delivering substantial performance improvement over a standard Transformer model.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The Transformer model has been shown to outperform other neural seq2seq models in several character-level tasks. It is unclear, however, if the Transformer would benefit as much as other seq2seq models from data augmentation strategies in the low-resource setting. In this paper we explore methods for data augmentation in the g2p task together with the Transformer model. Our results show that a relatively simple alignment-based approach of identifying consistent input-output subsequences in grapheme-phoneme data combined with a subsequent splicing together of such pieces to generate hallucinated data works well in the low-resource setting, often delivering substantial performance improvement over a standard Transformer model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The Transformer model (Vaswani et al., 2017) has recently been shown to be robust for character-level translation tasks, outperforming other recurrent sequence-to-sequence (seq2seq) models in a wide range of tasks, including morphological inflection, grapheme-to-phoneme (g2p), and text normalization . A transformer-based model also served as the baseline system for both the SIGMORPHON 2020 shared tasks on graphemeto-phoneme conversion (Gorman et al., 2020) and low-resource morphological inflection (Vylomova et al., 2020) , delivering substantially better performance than other models. 1 A common thread in research with characterlevel seq2seq has been that, for situations where few training examples are available, alternative strategies to produce more robust performance must be taken. For morphology tasks, this has included strategies such as instructing the model 1 Our code is available at https://github.com/ LonelyRider-cs/sig_shared_tasks.", |
| "cite_spans": [ |
| { |
| "start": 22, |
| "end": 44, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 439, |
| "end": 460, |
| "text": "(Gorman et al., 2020)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 503, |
| "end": 526, |
| "text": "(Vylomova et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 592, |
| "end": 593, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "#camerounais# #kam_\u0281u_n\u025b__# #diff\u00e9rence# #di_fe\u0281\u0251_s_# nais# n\u025b__# #diff\u00e9 #di_fe ...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Figure 1: Augmentation strategy: after aligning all grapheme-phoneme pairs, we use input subsequences that are reliably mapped to the same output in the whole data set in creating an augmented data set from the pieces. We also enforce-using an unsupervised algorithm for detecting consonants and vowels-that only CV or VC is allowed at the boundary of the pieces spliced together.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "VC", |
| "sec_num": null |
| }, |
| { |
| "text": "to copy input symbols to the output (Makarov et al., 2017; Makarov and Clematide, 2018; Anastasopoulos and Neubig, 2019) , which may require alignment of the input and output in the training. Another strategy is data augmentation (Bergmanis et al., 2017; Silfverberg et al., 2017) , whereby some mechanism is employed to generate additional training examples from the few available ones. Pointer-generator networks (Vinyals et al., 2015) , which facilitate copying of the input, have also been employed (Sharma et al., 2018) . Perhaps since no low-resource g2p task has previously been organized, the performance of standard models of seq2seq in settings with limited training data have not been explored as much as in morphological inflection, where data augmentation has proven to be a successful strategy.", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 58, |
| "text": "(Makarov et al., 2017;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 59, |
| "end": 87, |
| "text": "Makarov and Clematide, 2018;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 88, |
| "end": 120, |
| "text": "Anastasopoulos and Neubig, 2019)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 230, |
| "end": 254, |
| "text": "(Bergmanis et al., 2017;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 255, |
| "end": 280, |
| "text": "Silfverberg et al., 2017)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 415, |
| "end": 437, |
| "text": "(Vinyals et al., 2015)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 503, |
| "end": 524, |
| "text": "(Sharma et al., 2018)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "VC", |
| "sec_num": null |
| }, |
| { |
| "text": "The Transformer model, as opposed to other seq2seq models such as bidirectional LSTM encoder-decoders, however, seems to be more ro-bust by itself in the low-resource setting, at least for morphology tasks. The multiple Transformerbased baselines in the SIGMORPHON 2020 morphological task did not provide any consistent improvement by data augmentation. Also, the bestperforming systems did not seem to use this strategy, even in low-resource cases. For the graphemeto-phoneme task, it is therefore unclear if the Transformer would also benefit from one of these strategies in low-resource scenarios. The SIGMOR-PHON g2p task (task 1) featured uniform amounts of training data of 3600 g/p word pairs, and so can not be considered a low-resource task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "VC", |
| "sec_num": null |
| }, |
| { |
| "text": "The different strategies to fortify seq2seq models in the low-resource setting in other character-level tasks are not all applicable to the g2p task, however. The array of mechanisms for learning to copy the input-special copy symbols, pointer-generator networks-favored by many low-resource morphology systems do not naturally transfer to the g2p task since the input and output pairs use different alphabets. Data augmentation, however, remains a potentially viable strategy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "VC", |
| "sec_num": null |
| }, |
| { |
| "text": "In this paper we discuss experiments on the SIG-MORPHON 2020 task 1 data sets where we explored data augmentation strategies for the g2p setting. Our actual submission (team CU-Z) to the task was a bidirectional LSTM encoder-decoder which later turned out to perform much worse than the Transformer model described in this paper. We did not finish training the Transformer models before the submission deadline, and only submitted the BiLSTM. In this paper we only discuss data augmentation and the Transformer model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "VC", |
| "sec_num": null |
| }, |
| { |
| "text": "We experimented with two strategies of data augmentation: our first strategy was to identify in the training data grapheme sequences in the beginning of words and at the ends of words that (almost) always map to the same phoneme sequence, such as a word-initial c consistently mapping to k. Subsequently we generated new training data by swapping such sequences across words, generating new words. This initial strategy failed to provide improvements on the development set, and we moved to a more refined version of this idea, discussed in more detail below. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Augmentation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In our main strategy, we first perform a 1-to-1 alignment of the input-output data, yielding alignments such as are shown in Figure 1 . For the alignment, we use an MCMC-algorithm originally developed by the second author for the SIGMORPHON 2016 shared task baseline for morphological inflection (Cotterell et al., 2016) , largely similar to Expectation-Maximization based models (Ristad and Yianilos, 1998; Novak et al., 2012) , but using an MCMC sampler instead. After the alignment, we investigate how consistently some part of the word-initial substring graphemes #i 1 , . . . , i m maps to the same phonemes #o 1 , . . . , o n , and likewise for the word-final parts i 1 , . . . , i m # and o 1 , . . . , o n #. We use # here as a symbol to denote either beginning-of-word or end-of-word. Whichever is intended should be clear from the context.", |
| "cite_spans": [ |
| { |
| "start": 296, |
| "end": 320, |
| "text": "(Cotterell et al., 2016)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 380, |
| "end": 407, |
| "text": "(Ristad and Yianilos, 1998;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 408, |
| "end": 427, |
| "text": "Novak et al., 2012)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 125, |
| "end": 133, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Slice-and-shuffle", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "For example, in French, the initial grapheme sequence #poin, whenever found in the data, is always aligned with #pw\u1ebc, and the final grapheme sequence parer# is consistently aligned with the phoneme sequence paKe#. Such pieces can then be used to create new grapheme/phoneme pairs in an augmented training data set, such as poinparer \u2192 pw\u1ebcpaKe. See Figure 1 for another example.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 348, |
| "end": 356, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Slice-and-shuffle", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In particular, for an input subsequence i, we estimate its reliability as being associated with an output subsequence o as the conditional probability of the output, given the input in the usual way as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Slice-and-shuffle", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "p(o|i) = count(i : o) + \u03b1 ANY count(i : ANY) + \u03b1|ANY| (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Slice-and-shuffle", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Here \u03b1 is a smoothing parameter and ANYthrough a slight notational abuse-represents all the witnessed different output alignments for a particular input subsequence i. For example, if we are calculating the conditional probability of some output sequence, conditioned on an initial #phosequence, and #pho has been aligned in the training data with #p, #f, and #fo, then ANY represents the set {#p, #f, #fo}.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Slice-and-shuffle", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "To select which beginning and ending pieces can be reliably used for creation of augmented data, we declare a cutoff probability c = 0.98 and only use those pieces i : o where p(o|i) > c. This yields a large number of usable pieces for each language, even in the lowest-resource setting of 100 training examples. 2 Note that the number of actual potential augmented input-output mappings corresponds to roughly the square of the number of discovered reliable beginning and ending pairings. We generate augmented words completely at random from all the pieces available to us, except we limit the output sequence length to 15 by excluding longer sequences, and put an additional restriction on the juncture where the splices come together regarding consonants and vowels, discussed below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Slice-and-shuffle", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "After estimating p(o|i) for each seen subsequence in the training data the resulting \"reliable\" pieces can be spliced together to augment the data set, by combining word-initial and word-final pieces. Since phonological assimilations and coarticulations are very common in vowel-vowel and consonant-consonant sequences, and since we wish to avoid generating unnatural syllables, we do not splice together pieces where a slice ending in a phoneme-side consonant would be paired up with another one that begins with a vowel and vice versa. This is also shown in the example Figure 1 . To determine which symbols on the phoneme side are consonants and vowels, we use the unsupervised 2 ady: 2933 (100), 11358 (500); arm: 2789 (100), 11840 (100), bul: 2862 (100), 10657 (500), dut: 4422 (100), 19058 (500); fre: 3005 (100), 11996 (500); 3089 (100), 13698 (500); gre: 3667 (100), 16341 (500); hin: 2073 (100), 8141 (500); hun: 3282 (100), 13748 (500); 2438 (100), 9556 (500); jpn: 725 (100), 3252 (500); kor: 331 (100), 1280 (500); lit: 4328 (100), 15414 (500); rum: 3330 (100) 12360 (500); vie: 2567 (100), 12309 (500). algorithm in Hulden (2017) to divide the set of phonemes seen in the training data for a language into consonants and vowels. Table 1 shows a selection of French \"words\" generated by this complete process of aligning, determining useful pieces, and splicing them together while avoiding CC or VV sequences at the juncture of splicing.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 572, |
| "end": 580, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1242, |
| "end": 1249, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Consonants and Vowels", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "For each language and each original-size data set (100, 500, 3600) we generate 50,000 additional training examples from the original training data. To create the low-resource data training sets from the shared task training sets, we randomly select 100 (min), or 500 (med) examples from the original training data consisting of 3,600 examples. To determine the cutoff where the data-augmentation strategy stops paying dividends, we also create an augmented data set of 50,000 examples from the original data (we call the original task data the full) data set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Consonants and Vowels", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Following Wu et al. 2020, we use a relatively small transformer model (the Fairseq implementation; Ott et al. 2019) with 4 encoder-decoder layers, and 4 attention heads. The embedding size is 256 and hidden layer size 1024. We use dropout (0.3) during training and a batch size of 400, a learning rate of 0.001. We train the models until no improvement is seen on the dev-set for 5 epochs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training details", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "The main results are shown in Table 2 and Figure 2 . As can be seen, there is a consistent pattern of diminishing returns as more training data becomes available, with word error rates being significantly lower for almost all the augmented cases where 100 or 500 examples were used.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 30, |
| "end": 37, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 42, |
| "end": 50, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Recurrent neural networks in a variety of models have been applied to the g2p problem, including LSTMs and bidirectional LSTMs (Rao et al., 2015) , as well as convolutional networks (Yolchuyeva et al., 2019) . The Transformer for g2p is investigated in and Yolchuyeva et al. (2020) , showing improvements over previous models, at least in high-resource settings. Low-resource settings for g2p in general are examined in Jyothi and Hasegawa-Johnson (2017) , and a number of papers have experimented with high-resource to lowresource transfer learning (Schlippe et al., 2014; ady arm bul dut fre geo gre hin hun ice jpn kor lit rum vie Table 2 : Word error rate (WER) results on the test set when trained with 100 examples, 500 examples, and the full data set, compared to augmentation ( aug ) for (100,500,3600) \u2192 50,000 synthetic examples. Deri and Knight, 2016) , an avenue we did not explore in this work.", |
| "cite_spans": [ |
| { |
| "start": 127, |
| "end": 145, |
| "text": "(Rao et al., 2015)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 182, |
| "end": 207, |
| "text": "(Yolchuyeva et al., 2019)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 257, |
| "end": 281, |
| "text": "Yolchuyeva et al. (2020)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 420, |
| "end": 454, |
| "text": "Jyothi and Hasegawa-Johnson (2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 550, |
| "end": 573, |
| "text": "(Schlippe et al., 2014;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 574, |
| "end": 574, |
| "text": "", |
| "ref_id": null |
| }, |
| { |
| "start": 841, |
| "end": 863, |
| "text": "Deri and Knight, 2016)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 635, |
| "end": 642, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We have developed a method for data augmentation for the g2p task based on a 1-to-1 alignment of input/output strings together with a confidence calculation of what parts of the aligned strings can be used to splice together an augmented dataset. Used together with the popular Transformer seq2seq model, we see significant and consistent improvements on very small datasets of 100 examples, moderate improvements on medium-size datasets (500 examples), with the advantage tapering off and mostly disappearing completely with the shared tasks' datasets of 3,600 examples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Pushing the limits of low-resource morphological inflection", |
| "authors": [ |
| { |
| "first": "Antonios", |
| "middle": [], |
| "last": "Anastasopoulos", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "984--996", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1091" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antonios Anastasopoulos and Graham Neubig. 2019. Pushing the limits of low-resource morphological in- flection. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 984-996, Hong Kong, China. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Training data augmentation for low-resource morphological inflection", |
| "authors": [ |
| { |
| "first": "Toms", |
| "middle": [], |
| "last": "Bergmanis", |
| "suffix": "" |
| }, |
| { |
| "first": "Katharina", |
| "middle": [], |
| "last": "Kann", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the CoNLL SIGMORPHON 2017 Shared Task: Universal Morphological Reinflection", |
| "volume": "", |
| "issue": "", |
| "pages": "31--39", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K17-2002" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Toms Bergmanis, Katharina Kann, Hinrich Sch\u00fctze, and Sharon Goldwater. 2017. Training data aug- mentation for low-resource morphological inflection. In Proceedings of the CoNLL SIGMORPHON 2017 Shared Task: Universal Morphological Reinflection, pages 31-39, Vancouver. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The SIGMORPHON 2016 shared Task-Morphological reinflection", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "Christo", |
| "middle": [], |
| "last": "Kirov", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Sylak-Glassman", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 14th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
| "volume": "", |
| "issue": "", |
| "pages": "10--22", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W16-2002" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Cotterell, Christo Kirov, John Sylak-Glassman, David Yarowsky, Jason Eisner, and Mans Hulden. 2016. The SIGMORPHON 2016 shared Task- Morphological reinflection. In Proceedings of the 14th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphol- ogy, pages 10-22, Berlin, Germany. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Grapheme-tophoneme models for (almost) any language", |
| "authors": [ |
| { |
| "first": "Aliya", |
| "middle": [], |
| "last": "Deri", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "399--408", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1038" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aliya Deri and Kevin Knight. 2016. Grapheme-to- phoneme models for (almost) any language. In Pro- ceedings of the 54th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), pages 399-408, Berlin, Germany. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "The SIGMORPHON 2020 shared task on multilingual grapheme-to-phoneme conversion", |
| "authors": [ |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Gorman", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [ |
| "E" |
| ], |
| "last": "Lucas", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Ashby", |
| "suffix": "" |
| }, |
| { |
| "first": "Arya", |
| "middle": [ |
| "D" |
| ], |
| "last": "Goyzueta", |
| "suffix": "" |
| }, |
| { |
| "first": "Shijie", |
| "middle": [], |
| "last": "Mccarthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "You", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyle Gorman, Lucas F. E. Ashby, Aaron Goyzueta, Arya D. McCarthy, Shijie Wu, and Daniel You. 2020. The SIGMORPHON 2020 shared task on multilin- gual grapheme-to-phoneme conversion. In Proceed- ings of the 17th SIGMORPHON Workshop on Com- putational Research in Phonetics, Phonology, and Morphology.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "A phoneme clustering algorithm based on the obligatory contour principle", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mans Hulden", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 21st Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "290--300", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K17-1030" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mans Hulden. 2017. A phoneme clustering algorithm based on the obligatory contour principle. In Pro- ceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017), pages 290-300, Vancouver, Canada. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Low-resource grapheme-to-phoneme conversion using recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Preethi", |
| "middle": [], |
| "last": "Jyothi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Hasegawa-Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5030--5034", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Preethi Jyothi and Mark Hasegawa-Johnson. 2017. Low-resource grapheme-to-phoneme conversion us- ing recurrent neural networks. In 2017 IEEE Inter- national Conference on Acoustics, Speech and Sig- nal Processing (ICASSP), pages 5030-5034. IEEE.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Neural transition-based string transduction for limitedresource setting in morphology", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Makarov", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Clematide", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "83--93", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Makarov and Simon Clematide. 2018. Neu- ral transition-based string transduction for limited- resource setting in morphology. In Proceedings of the 27th International Conference on Computational Linguistics, pages 83-93, Santa Fe, New Mexico, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Align and copy: UZH at SIGMORPHON 2017 shared task for morphological reinflection", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Makarov", |
| "suffix": "" |
| }, |
| { |
| "first": "Tatiana", |
| "middle": [], |
| "last": "Ruzsics", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Clematide", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the CoNLL SIGMORPHON 2017", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K17-2004" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Makarov, Tatiana Ruzsics, and Simon Clematide. 2017. Align and copy: UZH at SIGMORPHON 2017 shared task for morphological reinflection. In Proceedings of the CoNLL SIGMORPHON 2017", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Vancouver. Association for Computational Linguistics", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "49--57", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shared Task: Universal Morphological Reinflection, pages 49-57, Vancouver. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Improving WFST-based G2P conversion with alignment constraints and RNNLM N-best rescoring", |
| "authors": [ |
| { |
| "first": "Josef", |
| "middle": [ |
| "R" |
| ], |
| "last": "Novak", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [ |
| "R" |
| ], |
| "last": "Dixon", |
| "suffix": "" |
| }, |
| { |
| "first": "Nobuaki", |
| "middle": [], |
| "last": "Minematsu", |
| "suffix": "" |
| }, |
| { |
| "first": "Keikichi", |
| "middle": [], |
| "last": "Hirose", |
| "suffix": "" |
| }, |
| { |
| "first": "Chiori", |
| "middle": [], |
| "last": "Hori", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideki", |
| "middle": [], |
| "last": "Kashioka", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Thirteenth Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Josef R. Novak, Paul R. Dixon, Nobuaki Minematsu, Keikichi Hirose, Chiori Hori, and Hideki Kashioka. 2012. Improving WFST-based G2P conversion with alignment constraints and RNNLM N-best rescor- ing. In Thirteenth Annual Conference of the Inter- national Speech Communication Association.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "fairseq: A fast, extensible toolkit for sequence modeling", |
| "authors": [ |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexei", |
| "middle": [], |
| "last": "Baevski", |
| "suffix": "" |
| }, |
| { |
| "first": "Angela", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)", |
| "volume": "", |
| "issue": "", |
| "pages": "48--53", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-4009" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. fairseq: A fast, extensible toolkit for sequence modeling. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics (Demonstrations), pages 48-53, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Grapheme-to-phoneme conversion using long short-term memory recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Kanishka", |
| "middle": [], |
| "last": "Rao", |
| "suffix": "" |
| }, |
| { |
| "first": "Fuchun", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Ha\u015fim", |
| "middle": [], |
| "last": "Sak", |
| "suffix": "" |
| }, |
| { |
| "first": "Fran\u00e7oise", |
| "middle": [], |
| "last": "Beaufays", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4225--4229", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kanishka Rao, Fuchun Peng, Ha\u015fim Sak, and Fran\u00e7oise Beaufays. 2015. Grapheme-to-phoneme conversion using long short-term memory recurrent neural networks. In 2015 IEEE International Con- ference on Acoustics, Speech and Signal Processing (ICASSP), pages 4225-4229. IEEE.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Learning string-edit distance", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [ |
| "Sven" |
| ], |
| "last": "Ristad", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "N" |
| ], |
| "last": "Yianilos", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", |
| "volume": "20", |
| "issue": "5", |
| "pages": "522--532", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eric Sven Ristad and Peter N. Yianilos. 1998. Learning string-edit distance. IEEE Transactions on Pattern Analysis and Machine Intelligence, 20(5):522-532.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Combining grapheme-to-phoneme converter outputs for enhanced pronunciation generation in low-resource scenarios", |
| "authors": [ |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Schlippe", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolf", |
| "middle": [], |
| "last": "Quaschningk", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanja", |
| "middle": [], |
| "last": "Schultz", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Spoken Language Technologies for Under-Resourced Languages", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tim Schlippe, Wolf Quaschningk, and Tanja Schultz. 2014. Combining grapheme-to-phoneme converter outputs for enhanced pronunciation generation in low-resource scenarios. In Spoken Language Tech- nologies for Under-Resourced Languages.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "IIT(BHU)-IIITH at CoNLL-SIGMORPHON 2018 shared task on universal morphological reinflection", |
| "authors": [ |
| { |
| "first": "Abhishek", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Ganesh", |
| "middle": [], |
| "last": "Katrapati", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipti Misra", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the CoNLL-SIGMORPHON 2018 Shared Task: Universal Morphological Reinflection", |
| "volume": "", |
| "issue": "", |
| "pages": "105--111", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K18-3013" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abhishek Sharma, Ganesh Katrapati, and Dipti Misra Sharma. 2018. IIT(BHU)-IIITH at CoNLL- SIGMORPHON 2018 shared task on universal mor- phological reinflection. In Proceedings of the CoNLL-SIGMORPHON 2018 Shared Task: Uni- versal Morphological Reinflection, pages 105-111, Brussels. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Data augmentation for morphological reinflection", |
| "authors": [ |
| { |
| "first": "Miikka", |
| "middle": [], |
| "last": "Silfverberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Wiemerslage", |
| "suffix": "" |
| }, |
| { |
| "first": "Ling", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lingshuang Jack", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the CoNLL SIGMORPHON 2017 Shared Task: Universal Morphological Reinflection", |
| "volume": "", |
| "issue": "", |
| "pages": "90--99", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K17-2010" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Miikka Silfverberg, Adam Wiemerslage, Ling Liu, and Lingshuang Jack Mao. 2017. Data augmentation for morphological reinflection. In Proceedings of the CoNLL SIGMORPHON 2017 Shared Task: Univer- sal Morphological Reinflection, pages 90-99, Van- couver. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Pointer networks", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Meire", |
| "middle": [], |
| "last": "Fortunato", |
| "suffix": "" |
| }, |
| { |
| "first": "Navdeep", |
| "middle": [], |
| "last": "Jaitly", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "2692--2700", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, Meire Fortunato, and Navdeep Jaitly. 2015. Pointer networks. In Advances in neural in- formation processing systems, pages 2692-2700.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Miikka Silfverberg, and Mans Hulden. 2020. The SIG-MORPHON 2020 Shared Task 0: Typologically diverse morphological inflection", |
| "authors": [ |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Vylomova", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [], |
| "last": "White", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Salesky", |
| "suffix": "" |
| }, |
| { |
| "first": "Sabrina", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mielke", |
| "suffix": "" |
| }, |
| { |
| "first": "Shijie", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Edoardo", |
| "middle": [], |
| "last": "Ponti", |
| "suffix": "" |
| }, |
| { |
| "first": "Rowan", |
| "middle": [], |
| "last": "Hall Maudslay", |
| "suffix": "" |
| }, |
| { |
| "first": "Ran", |
| "middle": [], |
| "last": "Zmigrod", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Valvoda", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Toldova", |
| "suffix": "" |
| }, |
| { |
| "first": "Francis", |
| "middle": [], |
| "last": "Tyers", |
| "suffix": "" |
| }, |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Klyachko", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Yegorov", |
| "suffix": "" |
| }, |
| { |
| "first": "Natalia", |
| "middle": [], |
| "last": "Krizhanovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Paula", |
| "middle": [], |
| "last": "Czarnowska", |
| "suffix": "" |
| }, |
| { |
| "first": "Irene", |
| "middle": [], |
| "last": "Nikkarinen", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrej", |
| "middle": [], |
| "last": "Krizhanovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Tiago", |
| "middle": [], |
| "last": "Pimentel", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucas", |
| "middle": [], |
| "last": "Torroba Hennigen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christo", |
| "middle": [], |
| "last": "Kirov", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ekaterina Vylomova, Jennifer White, Elizabeth Salesky, Sabrina J. Mielke, Shijie Wu, Edoardo Ponti, Rowan Hall Maudslay, Ran Zmigrod, Joseph Valvoda, Svetlana Toldova, Francis Tyers, Elena Klyachko, Ilya Yegorov, Natalia Krizhanovsky, Paula Czarnowska, Irene Nikkarinen, Andrej Krizhanovsky, Tiago Pimentel, Lucas Torroba Hennigen, Christo Kirov, Garrett Nicolai, Ad- ina Williams, Antonios Anastasopoulos, Hilaria Cruz, Eleanor Chodroff, Ryan Cotterell, Miikka Silfverberg, and Mans Hulden. 2020. The SIG- MORPHON 2020 Shared Task 0: Typologically diverse morphological inflection. In Proceedings of the 17th SIGMORPHON Workshop on Compu- tational Research in Phonetics, Phonology, and Morphology.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Applying the transformer to character-level transduction", |
| "authors": [ |
| { |
| "first": "Shijie", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "Mans", |
| "middle": [], |
| "last": "Hulden", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2005.10213" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shijie Wu, Ryan Cotterell, and Mans Hulden. 2020. Applying the transformer to character-level transduc- tion. arXiv:2005.10213 [cs.CL].", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Grapheme-to-phoneme conversion with convolutional neural networks", |
| "authors": [ |
| { |
| "first": "Sevinj", |
| "middle": [], |
| "last": "Yolchuyeva", |
| "suffix": "" |
| }, |
| { |
| "first": "G\u00e9za", |
| "middle": [], |
| "last": "N\u00e9meth", |
| "suffix": "" |
| }, |
| { |
| "first": "B\u00e1lint", |
| "middle": [], |
| "last": "Gyires-T\u00f3th", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Applied Sciences", |
| "volume": "9", |
| "issue": "6", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sevinj Yolchuyeva, G\u00e9za N\u00e9meth, and B\u00e1lint Gyires- T\u00f3th. 2019. Grapheme-to-phoneme conversion with convolutional neural networks. Applied Sciences, 9(6):1143.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Transformer based graphemeto-phoneme conversion", |
| "authors": [ |
| { |
| "first": "Sevinj", |
| "middle": [], |
| "last": "Yolchuyeva", |
| "suffix": "" |
| }, |
| { |
| "first": "G\u00e9za", |
| "middle": [], |
| "last": "N\u00e9meth", |
| "suffix": "" |
| }, |
| { |
| "first": "B\u00e1lint", |
| "middle": [], |
| "last": "Gyires-T\u00f3th", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2004.06338" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sevinj Yolchuyeva, G\u00e9za N\u00e9meth, and B\u00e1lint Gyires- T\u00f3th. 2020. Transformer based grapheme- to-phoneme conversion. arXiv preprint arXiv:2004.06338.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Main WER results on the SIGMORPHON test sets with augmented and unaugmented data." |
| }, |
| "TABREF1": { |
| "num": null, |
| "text": "Example augmented French data from the original min data set that contains 100 examples. In total, 50,000 examples such as the ones shown here are created from each data set.", |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |