| { |
| "paper_id": "U17-1006", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:11:25.541662Z" |
| }, |
| "title": "Phonemic Transcription of Low-Resource Tonal Languages", |
| "authors": [ |
| { |
| "first": "Oliver", |
| "middle": [], |
| "last": "Adams", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne", |
| "location": { |
| "country": "Australia" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne", |
| "location": { |
| "country": "Australia" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Carnegie Mellon University", |
| "location": { |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [ |
| "2017" |
| ], |
| "last": "Michaud", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Center for Scientific Research", |
| "location": { |
| "country": "France" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Phonemic", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Transcription of speech is an important part of language documentation, and yet speech recognition technology has not been widely harnessed to aid linguists. We explore the use of a neural network architecture with the connectionist temporal classification loss function for phonemic and tonal transcription in a language documentation setting. In this framework, we explore jointly modelling phonemes and tones versus modelling them separately, and assess the importance of pitch information versus phonemic context for tonal prediction. Experiments on two tonal languages, Yongning Na and Eastern Chatino, show the changes in recognition performance as training data is scaled from 10 minutes to 150 minutes. We discuss the findings from incorporating this technology into the linguistic workflow for documenting Yongning Na, which show the method's promise in improving efficiency, minimizing typographical errors, and maintaining the transcription's faithfulness to the acoustic signal, while highlighting phonetic and phonemic facts for linguistic consideration.", |
| "pdf_parse": { |
| "paper_id": "U17-1006", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Transcription of speech is an important part of language documentation, and yet speech recognition technology has not been widely harnessed to aid linguists. We explore the use of a neural network architecture with the connectionist temporal classification loss function for phonemic and tonal transcription in a language documentation setting. In this framework, we explore jointly modelling phonemes and tones versus modelling them separately, and assess the importance of pitch information versus phonemic context for tonal prediction. Experiments on two tonal languages, Yongning Na and Eastern Chatino, show the changes in recognition performance as training data is scaled from 10 minutes to 150 minutes. We discuss the findings from incorporating this technology into the linguistic workflow for documenting Yongning Na, which show the method's promise in improving efficiency, minimizing typographical errors, and maintaining the transcription's faithfulness to the acoustic signal, while highlighting phonetic and phonemic facts for linguistic consideration.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Language documentation involves eliciting speech from native speakers, and transcription of these rich cultural and linguistic resources is an integral part of the language documentation process. However, transcription is very slow: it often takes a linguist between 30 minutes to 2 hours to transcribe and translate 1 minute of speech, depending on the transcriber's familiarity with the language and the difficulty of the content. This is a bottleneck in the standard documentary linguistics workflow: linguists accumulate considerable amounts of speech, but do not transcribe and translate it all, and there is a risk that untranscribed recordings could end up as \"data graveyards\" (Himmelmann, 2006, 4,12-13) . There is clearly a need for \"devising better ways for linguists to do their work\" (Thieberger, 2016, 92) .", |
| "cite_spans": [ |
| { |
| "start": 685, |
| "end": 712, |
| "text": "(Himmelmann, 2006, 4,12-13)", |
| "ref_id": null |
| }, |
| { |
| "start": 797, |
| "end": 819, |
| "text": "(Thieberger, 2016, 92)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There has been work on low-resource speech recognition (Besacier et al., 2014) , with approaches using cross-lingual information for better acoustic modelling (Burget et al., 2010; Vu et al., 2014; Xu et al., 2016; M\u00fcller et al., 2017) and language modelling (Xu and Fung, 2013) . However, speech recognition technology has largely been ineffective for endangered languages since architectures based on hidden Markov models (HMMs), which generate orthographic transcriptions, require a large pronunciation lexicon and a language model trained on text. These speech recognition systems are usually trained on a variety of speakers and hundreds of hours of data (Hinton et al., 2012, 92) , with the goal of generalisation to new speakers. Since large amounts of text are used for language model training, such systems often do not incorporate pitch information for speech recognition of tonal languages (Metze et al., 2013) , as they can instead rely on contextual information for tonal disambiguation via the language model (Le and Besacier, 2009; Feng et al., 2012) .", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 78, |
| "text": "(Besacier et al., 2014)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 159, |
| "end": 180, |
| "text": "(Burget et al., 2010;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 181, |
| "end": 197, |
| "text": "Vu et al., 2014;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 198, |
| "end": 214, |
| "text": "Xu et al., 2016;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 215, |
| "end": 235, |
| "text": "M\u00fcller et al., 2017)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 259, |
| "end": 278, |
| "text": "(Xu and Fung, 2013)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 660, |
| "end": 685, |
| "text": "(Hinton et al., 2012, 92)", |
| "ref_id": null |
| }, |
| { |
| "start": 901, |
| "end": 921, |
| "text": "(Metze et al., 2013)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1031, |
| "end": 1046, |
| "text": "Besacier, 2009;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1047, |
| "end": 1065, |
| "text": "Feng et al., 2012)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In contrast, language documentation contexts often have just a few speakers for model training, and little text for language model training. However, there may be benefit even in a system that overfits to these speakers. If a phonemic recognition tool can provide a canvas transcription for manual correction and linguistic analysis, it may be possible to improve the leverage of linguists. The data collected in this semi-automated workflow can then be used as training data for further re-finement of the acoustic model, leading to a snowball effect of better and faster transcription.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper we investigate the application of neural speech recognition models to the task of phonemic and tonal transcription in a resourcescarce language documentation setting. We use the connectionist temporal classification (CTC) formulation (Graves et al., 2006) for the purposes of direct prediction of phonemes and tones given an acoustic signal, thus bypassing the need for a pronunciation lexicon, language model, and time alignments of phonemes in the training data. By drastically reducing the data requirements in this way, we make the use of automatic transcription technology more feasible in a language documentation setting.", |
| "cite_spans": [ |
| { |
| "start": 248, |
| "end": 269, |
| "text": "(Graves et al., 2006)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We evaluate this approach on two tonal languages, Yongning Na and Eastern Chatino (Cruz and Woodbury, 2006; Michaud, 2017) . Na is a Sino-Tibetan language spoken in Southwest China with three tonal levels, High (H), Mid (M) and Low (L) and a total of seven tone labels. Eastern Chatino, spoken in Oaxaca, Mexico, has a richer tone set but both languages have extensive morphotonology. Overall estimates of numbers of speakers for Chatino and Na are similar, standing at about 40,000 for both (Simons and Fennig, 2017) , but there is a high degree of dialect differentiation within the languages. The data used in the present study are from the Alawa dialect of Yongning Na, and the San Juan Quiahije dialect of Eastern Chatino; as a rule-of-thumb estimate, it is likely that these materials would be intelligible to a population of less than 10,000 (for details on the situation for Eastern Chatino, see Cruz (2011, 18-23) ).", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 107, |
| "text": "(Cruz and Woodbury, 2006;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 108, |
| "end": 122, |
| "text": "Michaud, 2017)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 492, |
| "end": 517, |
| "text": "(Simons and Fennig, 2017)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 904, |
| "end": 922, |
| "text": "Cruz (2011, 18-23)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Though a significant amount of Chatino speech has been transcribed (Chatino Language Documentation Project, 2017), its rich tone system and opposing location on the globe make it a useful point of comparison for our explorations of Na, the language for which automatic transcription is our primary practical concern. Though Na has previously had speech recognition applied in a pilot study (Do et al., 2014) , phoneme error rates were not quantified and tone recognition was left as future work.", |
| "cite_spans": [ |
| { |
| "start": 390, |
| "end": 407, |
| "text": "(Do et al., 2014)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We perform experiments scaling the training data, comparing joint prediction of phonemes and tones with separate prediction, and assessing the influence of pitch information versus phonemic context on phonemic and tonal prediction in the CTC-based framework. Importantly, we qualitatively evaluate use of this automation in the transcription of Na. The effectiveness of the approach has resulted in its incorporation into the linguist's workflow. Our open-source implementation is available online. 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The underlying model used is a long shortterm memory (LSTM) recurrent neural network (Hochreiter and Schmidhuber, 1997) in a bidirectional configuration (Schuster and Paliwal, 1997) . The network is trained with the connectionist temporal classification (CTC) loss function (Graves et al., 2006) . Critically, this alleviates the need for alignments between speech features and labels in the transcription which we do not have. This is achieved through the use of a dynamic programming algorithm that efficiently sums over the probability of neural network output label that correspond to the gold transcription sequence when repeated labels are collapsed.", |
| "cite_spans": [ |
| { |
| "start": 85, |
| "end": 119, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 153, |
| "end": 181, |
| "text": "(Schuster and Paliwal, 1997)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 274, |
| "end": 295, |
| "text": "(Graves et al., 2006)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The use of an underlying recurrent neural network allows the model to implicitly model context via the parameters of the LSTM, despite the independent frame-wise label predictions of the CTC network. It is this feature of the architecture that makes it a promising tool for tonal prediction, since tonal information is suprasegmental, spanning many frames (Mortensen et al., 2016) . Context beyond the immediate local signal is indispensable for tonal prediction, and long-ranging context is especially important in the case of morphotonologically rich languages such as Na and Chatino. Past work distinguishes between embedded tonal modelling, where phoneme and tone labels are jointly predicted, and explicit tonal modelling, where they are predicted separately (Lee et al., 2002) . We compare several training objectives for the purposes of phoneme and tone prediction. This includes separate prediction of 1) phonemes and 2) tones, as well as 3) jointly predict phonemes and tones using one label set. Figure 1 presents an example sentence from the Na corpus described in \u00a73.1, along with an example of these three objectives. As for the sister, she stayed at home. \u800c\u59b9\u59b9\u5462\uff0c\u7559\u5728\u5bb6\u91cc\u3002 Target label sequence: ", |
| "cite_spans": [ |
| { |
| "start": 356, |
| "end": 380, |
| "text": "(Mortensen et al., 2016)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 571, |
| "end": 586, |
| "text": "Na and Chatino.", |
| "ref_id": null |
| }, |
| { |
| "start": 764, |
| "end": 782, |
| "text": "(Lee et al., 2002)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1006, |
| "end": 1014, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2" |
| }, |
| { |
| "text": "1. t\u02b0 i g o m i d\u0291 o t\u02b0 i \u0251 \u0281 o d\u0291 o t s \u026f m v\u0329 2. \u02e9\u02e5 \u02e7 \u02e7 \u02e5 \u02e9\u02e5 \u02e9 \u02e7 \u02e9 \u02e9 \u02e9 3. t\u02b0 i \u02e9\u02e5 g o \u02e7 m i \u02e7 d \u0291 o \u02e5 t\u02b0 i \u02e9\u02e5 \u0251 \u02e9 \u0281 o \u02e7 d\u0291 o \u02e9 t s \u026f \u02e9 m v\u0329 \u02e9", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We designed the experiments to answer these primary questions:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "1. How do the error rates scale with respect to training data?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "2. How effective is tonal modelling in a CTC framework?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "3. To what extent does phoneme context play a role in tone prediction?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "4. Does joint prediction of phonemes and tones help minimize error rates?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We assess the performance of the systems as training data scales from 10 minutes to 150 minutes of a single Na speaker, and between 12 and 50 minutes for a single speaker of Chatino. Experimenting with this extremely limited training data gives us a sense of how much a linguist needs to transcribe before this technology can be profitably incorporated into their workflow.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We evaluate both the phoneme error rate (PER) and tone error rate (TER) of models based on the same neural architecture, but with varying input features and output objectives. ). These input features vary in the amount of acoustic information relevant to tonal modelling that they include. The output objectives correspond to those discussed in \u00a72: tones only ( ), phonemes only ( ), or jointly modelling both ( ). We denote combinations of input features and target labellings as \u27e8 \u27e9\u21d2\u27e8 \u27e9. In case of tonal prediction we explore similar configurations to that of phoneme prediction, but with two additional points of comparison. The first is predicting tones given one-hot phoneme vectors ( ) of the gold phoneme transcription ( \u21d2 ). The second predicts tones directly from pitch features ( \u21d2 ). These important points of comparison serve to give us some understanding as to how much tonal information is being extracted directly from the acoustic signal versus the phoneme context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We explore application of the model to the Na corpus that is part of the Pangloss collection (Michailovsky et al., 2014) . This corpus consists of around 100 narratives, constituting 11 hours of speech from one speaker in the form of traditional stories, and spontaneous narratives about life, family and customs (Michaud, 2017, 33) . Several hours of the recordings have been phonemically transcribed, and we used up to 149 minutes of this for training, 24 minutes for validation and 23 minutes for testing. The total number of phoneme and tone labels used for automatic transcription was 78 and 7 respectively. For Chatino, we used data of \u0106avar et al. (2016) from the GORILLA language archive for Eastern Chatino of San Juan Quiahije, Oaxaca, Mexico for the purposes of comparing phoneme and tone prediction with Na when data restriction is in place. We used up to 50 minutes of data for training, 8 minutes for validation and 7 minutes for testing. The phoneme inventory we used consists of 31 labels along with 14 tone labels. For both languages, preprocessing involved removing punctuation and any other symbols that are not phonemes or tones such as tone group delimiters and hyphens connecting syllables within words. Figure 2 shows the phoneme and tone error rates for Na and Chatino. Error rate scaling Error rates decrease logarithmically with training data. The best methods reliably have a lower than 30% PER with 30 minutes of training data. We believe it is reasonable to expect similar trends in other languages, with these results suggesting how much linguists might need to transcribe before semi-automation can become part of their workflow.", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 120, |
| "text": "(Michailovsky et al., 2014)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 313, |
| "end": 332, |
| "text": "(Michaud, 2017, 33)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1226, |
| "end": 1234, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In the case of phoneme-only prediction, use of pitch information does help reduce the PER, which is consistent with previous work (Metze et al., 2013) .", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 150, |
| "text": "(Metze et al., 2013)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Tonal modelling TER is always higher than PER for the same amount of training data, despite there being only 7 tone labels versus 78 phoneme labels in our Na experiment. This is true even when pitch features are present. However, it is unsurprising since the tones have overlapping pitch ranges, and can be realized with vastly different pitch over the course of a single sentence. This suggests that context is more important for predicting tones than phonemes, which are more contextindependent.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "\u21d2 and \u21d2 are vastly in-ferior to other methods, all of which are privy to phonemic information via training labels or input. However, combining the fbank and pitch input features ( \u21d2 ) makes for the equal best performing approach for tonal prediction in Na at maximum training data. This indicates both that these features are complementary and that the model has learnt a representation useful for tonal prediction that is on par with explicit phonemic information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Though tonal prediction is more challenging than phoneme prediction, these results suggest automatic tone transcription is feasible using this architecture, even without inclusion of explicit linguistic information such as constraints on valid tone sequences which is a promising line of future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Phoneme context To assess the importance of context in tone prediction, \u21d2 gives us a point of comparison where no acoustic information is available at all. It performs reasonably well for Na, and competitively for Chatino. One likely reason for its solid performance is that long-range context is modelled more effectively by using phoneme input features, since there are vastly fewer phonemes per sentence than speech frames. The rich morphotonology of Na and Chatino means context is important in the realisation of tones, explaining why \u21d2 can perform almost as well as methods using acoustic features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Joint prediction Interestingly, joint prediction of phonemes and tones does not outperform the best methods for separate phoneme and tone prediction, except in the case of Chatino tone prediction, if we discount \u21d2 . In light of the celebrated successes of multitask learning in various domains (Collobert et al., 2011; Deng et al., 2013; Girshick, 2015; Ramsundar et al., 2015; Ruder, 2017) , one might expect training with joint prediction of phonemes and tones to help, since it gives more relevant contextual information to the model.", |
| "cite_spans": [ |
| { |
| "start": 294, |
| "end": 318, |
| "text": "(Collobert et al., 2011;", |
| "ref_id": null |
| }, |
| { |
| "start": 319, |
| "end": 337, |
| "text": "Deng et al., 2013;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 338, |
| "end": 353, |
| "text": "Girshick, 2015;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 354, |
| "end": 377, |
| "text": "Ramsundar et al., 2015;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 378, |
| "end": 390, |
| "text": "Ruder, 2017)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The trends observed in the experimentation on Chatino were largely consistent with those of Na, but with higher error rates owing to less training data and a larger tone label set. There are two differences with the Na results worth noting. One is that \u21d2 is more competitive in the case of Chatino, suggest- ing that phoneme context plays a more important role in tonal prediction in Chatino. The second is that \u21d2 outperforms \u21d2 , and that adding pitch features to Filterbank features offers less benefit than in Na. Figure 3 shows the most common tone substitution mistakes for \u21d2 in the test set. Proportions were very similar for other methods. The most common tonal substitution errors were those between between M and L. Acoustically, M and L are neighbours; as mentioned above, in Na the same tone can be realised with a different pitch at different points in a sentence, leading to overlapping pitch ranges between these tones. Moreover, M and L tones were by far the most common tonal labels.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 516, |
| "end": 524, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Na versus Chatino", |
| "sec_num": null |
| }, |
| { |
| "text": "The phoneme error rates in the above quantitative analysis are promising, but is this system actually of practical use in a linguistic workflow? We discuss here the experience of a linguist in applying this model to Na data to aid in transcription of 9 minutes and 30 seconds of speech.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Qualitative Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The phonemic errors typically make linguistic sense: they are not random added noise and often bring the linguist's attention to phonetic facts that are easily overlooked because they are not phonemically contrastive.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recognition Errors", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "One set of such errors is due to differences in articulation between different morphosyntactic classes. For example, the noun 'person' /h\u0129\u02e5/ and the relativizer suffix /-h\u0129\u02e5/ are segmentally identical, but the latter is articulated much more weakly than the former and it is often recognized as /\u0129/ in automatic transcription, without an initial /h/. Likewise, in the demonstrative /\u0288\u0282\u02b0\u026f\u02e5/ the initial consonant /\u0288\u0282\u02b0/ is often strongly hypo-articulated, resulting in its recognition as a fricative /\u0282/, /\u0290/, or /\u0291/ instead of an aspirated affricate. As a further example, the negation that is transcribed as /m\u00f5\u02e7/ in Housebuilding2.290 instead of /m\u0264\u02e7/. This highlights that the vowel in that syllable is probably nazalised, and acoustically unlike the average /\u0264/ vowel for lexical words. The extent to which a word's morphosyntactic category influences the way it is pronounced is known to be languagespecific (Brunelle et al., 2015) ; the phonemic transcription tool indirectly reveals that this influence is considerable in Na.", |
| "cite_spans": [ |
| { |
| "start": 912, |
| "end": 935, |
| "text": "(Brunelle et al., 2015)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recognition Errors", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "A second set is due to loanwords containing combinations of phonemes that are unattested in the training set. For example /\u0291\u026f\u02e9pe\u02e7/, from Mandarin r\u00ecb\u011bn (\u65e5\u672c , 'Japan'). /pe/ is otherwise unattested in Na, which only has /pi/; accordingly, the syllable was identified as /pi/. In documenting Na, Mandarin loanwords were initially transcribed with Chinese characters, and thus cast aside from analyses, instead of confronting the issue of how different phonological systems coexist and interact in language use.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recognition Errors", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "A third set of errors made by the system result in an output that is not phonologically well formed, such as syllables without tones and sequences with consonant clusters such as /kgv\u0329 /. These cases are easy for the linguist to identify and amend.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recognition Errors", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The recognition system currently makes tonal mistakes that are easy to correct on the basis of elementary phonological knowledge: it produces some impossible tone sequences such as M+L+M inside the same tone group. Very long-ranging tonal dependencies are not harnessed so well by the current tone identification tool. This is consistent with quantitative indications in \u00a74 and is a case for including a tonal language model or refining the neural architecture to better harness longrange contextual information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recognition Errors", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Using this automatic transcription as a starting point for manual correction was found to confer several benefits to the linguist.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Benefits for the Linguist", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Faithfulness to acoustic signal The model produces output that is faithful to the acoustic signal. In casual oral speech there are repetitions and hesitations that are sometimes overlooked by the transcribing linguist, who is engrossed in a holistic process involving interpretation, translation, anno-tation, and communication with the language consultant. When using an automatically generated transcription as a canvas, there can be full confidence in the linearity of transcription, and more attention can be placed on linguistically meaningful dialogue with the language consultant.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Benefits for the Linguist", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Typographical errors and the transcriber's mindset Transcriptions are made during fieldwork with a language consultant and are difficult to correct down the line based only on auditory impression when the consultant is not available. However, such typographic errors are common, with a large number of phoneme labels and significant use of combinations of keys (Shift, Alternative Graph, etc). By providing a high-accuracy first-pass automatic transcription, much of this manual data entry is entirely avoided. Enlisting the linguist solely for correction of errors also allows them to embrace a critical mindset, putting them in \"proofreading mode\", where focus can be entirely centred on assessing the correctness of the system output without the additional distracting burden of data entry.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Benefits for the Linguist", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Speed Assessing automatic transcription's influence on the speed of the overall language documentation process is beyond the scope of this paper and is left to future work. Language documentation is a holistic process. Beyond phonemic transcription, documentation of Na involves other work that happens in parallel: translating, discussing with a native speaker, copying out new words into the Na dictionary, and being constantly on the lookout for new and unexpected linguistic phenomena. Further complicating this, the linguist's proficiency of the language and speed of transcription is dynamic, improving over time. This makes comparisons difficult.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Benefits for the Linguist", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "From this preliminary experiment, the efficiency of the linguist was perceived to be improved, but the benefits lie primarily in the advantages of providing a transcript faithful to the recording, and allowing the linguist to minimize manual entry, focusing on correction and enrichment of the transcribed document.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Benefits for the Linguist", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The snowball effect More data collection means more training data for better ASR performance. The process of improving the acoustic model by training on such semi-automatic transcriptions has begun, with the freshly transcribed Housebuild-ing2 used in this investigation now available for subsequent Na acoustic modelling training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Benefits for the Linguist", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "As a first example of output by incorporating automatic transcription into the Yongning Na documentation workflow, transcription of the recording Housebuilding was completed using automatic transcription as a canvas; this document is now available online. 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Benefits for the Linguist", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We have presented the results of applying a CTCbased LSTM model to the task of phoneme and tone transcription in a resource-scarce context: that of a newly documented language. Beyond comparing the effects of various training inputs and objectives on the phoneme and tone error rates, we reported on the application of this method to linguistic documentation of Yongning Na. Its applicability as a first-pass transcription is very encouraging, and it has now been incorporated into the workflow. Our results give an idea of the amount of speech other linguists might aspire to transcribe in order to bootstrap this process: as little as 30 minutes in order to obtain a sub-30% phoneme error rate as a starting point, with further improvements to come as more data is transcribed in the semi-automated workflow. There is still much room for modelling improvement, including incorporation of linguistic constraints into the architecture for more accurate transcriptions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://github.com/oadams/mam", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://lacito.vjf.cnrs.fr/pangloss/corpus/show_text_en .php?id=crdo-NRU_F4_HOUSEBUILDING2_SOUND &idref=crdo-NRU_F4_HOUSEBUILDING2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Automatic speech recognition for under-resourced languages: A survey", |
| "authors": [ |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Besacier", |
| "suffix": "" |
| }, |
| { |
| "first": "Etienne", |
| "middle": [], |
| "last": "Barnard", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexey", |
| "middle": [], |
| "last": "Karpov", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanja", |
| "middle": [], |
| "last": "Schultz", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Speech Communication", |
| "volume": "56", |
| "issue": "", |
| "pages": "85--100", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laurent Besacier, Etienne Barnard, Alexey Karpov, and Tanja Schultz. 2014. Automatic speech recog- nition for under-resourced languages: A survey. Speech Communication 56:85-100.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Effects of lexical frequency and lexical category on the duration of Vietnamese syllables", |
| "authors": [ |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Brunelle", |
| "suffix": "" |
| }, |
| { |
| "first": "Daryl", |
| "middle": [], |
| "last": "Chow", |
| "suffix": "" |
| }, |
| { |
| "first": "Th\u1ee5y", |
| "middle": [], |
| "last": "Nh\u00e3 Uy\u00ean Nguy\u1ec5n", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of 18th International Congress of Phonetic Sciences. University of Glasgow, Glasgow", |
| "volume": "", |
| "issue": "", |
| "pages": "1--5", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marc Brunelle, Daryl Chow, and Th\u1ee5y Nh\u00e3 Uy\u00ean Nguy\u1ec5n. 2015. Effects of lexical frequency and lex- ical category on the duration of Vietnamese sylla- bles. In The Scottish Consortium for ICPhS 2015, editor, Proceedings of 18th International Congress of Phonetic Sciences. University of Glasgow, Glas- gow, pages 1-5.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Multilingual acoustic modeling for speech recognition based on subspace Gaussian mixture models", |
| "authors": [ |
| { |
| "first": "Luk\u00e1\u0161", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| }, |
| { |
| "first": "Petr", |
| "middle": [], |
| "last": "Schwarz", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Agarwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Pinar", |
| "middle": [], |
| "last": "Akyazi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Arnab", |
| "middle": [], |
| "last": "Ghoshal", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Glembek", |
| "suffix": "" |
| }, |
| { |
| "first": "Nagendra", |
| "middle": [], |
| "last": "Goel", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Karafi\u00e1t", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "Others", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Acoustics Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4334--4337", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luk\u00e1\u0161 Burget, Petr Schwarz, Mohit Agarwal, Pinar Akyazi, Kai Feng, Arnab Ghoshal, Ond\u0159ej Glembek, Nagendra Goel, Martin Karafi\u00e1t, Daniel Povey, and Others. 2010. Multilingual acoustic modeling for speech recognition based on subspace Gaussian mix- ture models. In Acoustics Speech and Signal Pro- cessing (ICASSP), 2010 IEEE International Confer- ence on. IEEE, pages 4334-4337.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Endangered Language Documentation: Bootstrapping a Chatino Speech Corpus, Forced Aligner, ASR", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Ma\u0142gorzata", |
| "suffix": "" |
| }, |
| { |
| "first": "Damir", |
| "middle": [], |
| "last": "\u0106avar", |
| "suffix": "" |
| }, |
| { |
| "first": "Hilaria", |
| "middle": [], |
| "last": "Cavar", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Cruz", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "4004--4011", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ma\u0142gorzata E. \u0106avar, Damir Cavar, and Hilaria Cruz. 2016. Endangered Language Documentation: Boot- strapping a Chatino Speech Corpus, Forced Aligner, ASR. In LREC. pages 4004-4011.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Chatino Language Documentation Project Collection", |
| "authors": [], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chatino Language Documentation Project. 2017. Chatino Language Documentation Project Collec- tion.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Phonology, tone and the functions of tone in San Juan Quiahije Chatino", |
| "authors": [ |
| { |
| "first": "Emiliana", |
| "middle": [], |
| "last": "Cruz", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Ph.D", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emiliana Cruz. 2011. Phonology, tone and the functions of tone in San Juan Quiahije Chatino. Ph.D., University of Texas at Austin, Austin. http://hdl.handle.net/2152/ETD-UT-2011-08-4280.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "El sandhi de los tonos en el Chatino de Quiahije", |
| "authors": [ |
| { |
| "first": "Emiliana", |
| "middle": [], |
| "last": "Cruz", |
| "suffix": "" |
| }, |
| { |
| "first": "Tony", |
| "middle": [], |
| "last": "Woodbury", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Las memorias del Congreso de Idiomas Ind\u00edgenas de Latinoam\u00e9rica-II, Archive of the Indigenous Languages of Latin America", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emiliana Cruz and Tony Woodbury. 2006. El sandhi de los tonos en el Chatino de Quiahije. In Las memorias del Congreso de Idiomas Ind\u00edgenas de Latinoam\u00e9rica-II, Archive of the Indigenous Lan- guages of Latin America.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "New types of deep neural network learning for speech recognition and related applications: an overview", |
| "authors": [ |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Kingsbury", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "2013 IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "8599--8603", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICASSP.2013.6639344" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li Deng, Geoffrey Hinton, and Brian Kingsbury. 2013. New types of deep neural network learning for speech recognition and related applications: an overview. In 2013 IEEE In- ternational Conference on Acoustics, Speech and Signal Processing. IEEE, pages 8599-8603. https://doi.org/10.1109/ICASSP.2013.6639344.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Towards the automatic processing of", |
| "authors": [ |
| { |
| "first": "Thi-Ngoc-Diep", |
| "middle": [], |
| "last": "Do", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Michaud", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Castelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thi-Ngoc-Diep Do, Alexis Michaud, and Eric Castelli. 2014. Towards the automatic processing of", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Sino-Tibetan): developing a 'light' acoustic model of the target language and testing 'heavyweight' models from five national languages", |
| "authors": [ |
| { |
| "first": "Yongning", |
| "middle": [], |
| "last": "Na", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "4th International Workshop on Spoken Language Technologies for Under-resourced Languages", |
| "volume": "", |
| "issue": "", |
| "pages": "153--160", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yongning Na (Sino-Tibetan): developing a 'light' acoustic model of the target language and test- ing 'heavyweight' models from five national lan- guages. In 4th International Workshop on Spoken Language Technologies for Under-resourced Lan- guages (SLTU 2014). St Petersburg, Russia, pages 153-160. https://halshs.archives-ouvertes.fr/halshs- 00980431.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Sine-wave speech recognition in a tonal language", |
| "authors": [ |
| { |
| "first": "Yan-Mei", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ning", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Guang", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shan-Kai", |
| "middle": [], |
| "last": "Yin", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "The Journal of the Acoustical Society of America", |
| "volume": "131", |
| "issue": "2", |
| "pages": "133--138", |
| "other_ids": { |
| "DOI": [ |
| "10.1121/1.3670594" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yan-Mei Feng, Li Xu, Ning Zhou, Guang Yang, and Shan-Kai Yin. 2012. Sine-wave speech recog- nition in a tonal language. The Journal of the Acoustical Society of America 131(2):EL133- EL138. https://doi.org/10.1121/1.3670594.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A pitch extraction algorithm tuned for automatic speech recognition", |
| "authors": [ |
| { |
| "first": "Pegah", |
| "middle": [], |
| "last": "Ghahremani", |
| "suffix": "" |
| }, |
| { |
| "first": "Bagher", |
| "middle": [], |
| "last": "Babaali", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "Korbinian", |
| "middle": [], |
| "last": "Riedhammer", |
| "suffix": "" |
| }, |
| { |
| "first": ";", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanjeev", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pegah Ghahremani, Bagher BabaAli, Daniel Povey, Korbinian Riedhammer, Jan Trmal, and Sanjeev Khudanpur. 2014. A pitch extraction algorithm tuned for automatic speech recognition. In Acous- tics, Speech and Signal Processing (ICASSP), 2014", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "IEEE International Conference on. IEEE", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "2494--2498", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "IEEE International Conference on. IEEE, pages 2494-2498.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Fast R-CNN", |
| "authors": [ |
| { |
| "first": "Ross", |
| "middle": [], |
| "last": "Girshick", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "2015 IEEE International Conference on Computer Vision (ICCV). IEEE", |
| "volume": "", |
| "issue": "", |
| "pages": "1440--1448", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICCV.2015.169" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ross Girshick. 2015. Fast R-CNN. In 2015 IEEE International Conference on Com- puter Vision (ICCV). IEEE, pages 1440-1448. https://doi.org/10.1109/ICCV.2015.169.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Connectionist Temporal Classification : Labelling Unsegmented Sequence Data with Recurrent Neural Networks", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Graves", |
| "suffix": "" |
| }, |
| { |
| "first": "Santiago", |
| "middle": [], |
| "last": "Fernandez", |
| "suffix": "" |
| }, |
| { |
| "first": "Faustino", |
| "middle": [], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Jurgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 23rd international conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "369--376", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/1143844.1143891" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Graves, Santiago Fernandez, Faustino Gomez, and Jurgen Schmidhuber. 2006. Connection- ist Temporal Classification : Labelling Unseg- mented Sequence Data with Recurrent Neural Net- works. Proceedings of the 23rd international conference on Machine Learning pages 369-376. https://doi.org/10.1145/1143844.1143891.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Language documentation: what is it and what is it good for", |
| "authors": [ |
| { |
| "first": "Nikolaus", |
| "middle": [], |
| "last": "Himmelmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Essentials of language documentation", |
| "volume": "", |
| "issue": "", |
| "pages": "1--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikolaus Himmelmann. 2006. Language documenta- tion: what is it and what is it good for? In J. Gippert, Nikolaus Himmelmann, and Ulrike Mosel, editors, Essentials of language documentation, de Gruyter, Berlin/New York, pages 1-30.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Deep neural networks for acoustic modeling in speech recognition: The shared views of four research groups", |
| "authors": [ |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Dong", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [ |
| "E" |
| ], |
| "last": "Dahl", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdel-Rahman", |
| "middle": [], |
| "last": "Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Navdeep", |
| "middle": [], |
| "last": "Jaitly", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Senior", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Vanhoucke", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Tara", |
| "middle": [ |
| "N" |
| ], |
| "last": "Sainath", |
| "suffix": "" |
| }, |
| { |
| "first": "Others", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Signal Processing Magazine", |
| "volume": "29", |
| "issue": "6", |
| "pages": "82--97", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Geoffrey Hinton, Li Deng, Dong Yu, George E Dahl, Abdel-rahman Mohamed, Navdeep Jaitly, Andrew Senior, Vincent Vanhoucke, Patrick Nguyen, Tara N Sainath, and Others. 2012. Deep neural networks for acoustic modeling in speech recognition: The shared views of four research groups. Signal Processing Magazine, IEEE 29(6):82-97.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Automatic speech recognition for under-resourced languages: application to Vietnamese language. Audio, Speech, and Language Processing", |
| "authors": [ |
| { |
| "first": "-Bac", |
| "middle": [], |
| "last": "Viet", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Besacier", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "IEEE Transactions on", |
| "volume": "17", |
| "issue": "8", |
| "pages": "1471--1482", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Viet-Bac Le and Laurent Besacier. 2009. Automatic speech recognition for under-resourced languages: application to Vietnamese language. Audio, Speech, and Language Processing, IEEE Transactions on 17(8):1471-1482.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Using tone information in Cantonese continuous speech recognition", |
| "authors": [ |
| { |
| "first": "Tan", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Wai", |
| "middle": [], |
| "last": "Lau", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiu", |
| "middle": [], |
| "last": "Wing Wong", |
| "suffix": "" |
| }, |
| { |
| "first": "P C", |
| "middle": [], |
| "last": "Ching", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "ACM Transactions on Asian Language Information Processing (TALIP)", |
| "volume": "1", |
| "issue": "1", |
| "pages": "83--102", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tan Lee, Wai Lau, Yiu Wing Wong, and P C Ching. 2002. Using tone information in Cantonese continu- ous speech recognition. ACM Transactions on Asian Language Information Processing (TALIP) 1(1):83- 102.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Models of tone for tonal and non-tonal languages", |
| "authors": [ |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Metze", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "W" |
| ], |
| "last": "Zaid", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Sheikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Waibel", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gehring", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [], |
| "last": "Kilgour", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bao Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Van Huy Nguyen", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "IEEE Workshop on Automatic Speech Recognition and Understanding", |
| "volume": "", |
| "issue": "", |
| "pages": "261--266", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ASRU.2013.6707740" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Florian Metze, Zaid A.W. W Sheikh, Alex Waibel, Jonas Gehring, Kevin Kilgour, Quoc Bao Nguyen, and Van Huy Nguyen. 2013. Models of tone for tonal and non-tonal languages. 2013 IEEE Work- shop on Automatic Speech Recognition and Under- standing, ASRU 2013 -Proceedings pages 261-266. https://doi.org/10.1109/ASRU.2013.6707740.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Documenting and researching endangered languages: the Pangloss Collection", |
| "authors": [ |
| { |
| "first": "Boyd", |
| "middle": [], |
| "last": "Michailovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Martine", |
| "middle": [], |
| "last": "Mazaudon", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Michaud", |
| "suffix": "" |
| }, |
| { |
| "first": "S\u00e9verine", |
| "middle": [], |
| "last": "Guillaume", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Language Documentation and Conservation", |
| "volume": "8", |
| "issue": "", |
| "pages": "119--135", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Boyd Michailovsky, Martine Mazaudon, Alexis Michaud, S\u00e9verine Guillaume, Alexandre Fran\u00e7ois, and Evangelia Adamou. 2014. Documenting and researching endangered languages: the Pan- gloss Collection. Language Documentation and Conservation 8:119-135.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Tone in Yongning Na: lexical tones and morphotonology", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Michaud", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Number 13 in Studies in Diversity Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Michaud. 2017. Tone in Yongning Na: lexi- cal tones and morphotonology. Number 13 in Stud- ies in Diversity Linguistics. Language Science Press, Berlin. http://langsci-press.org/catalog/book/109.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "PanPhon: A Resource for Mapping IPA Segments to Articulatory Feature Vectors", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "David R Mortensen", |
| "suffix": "" |
| }, |
| { |
| "first": "Akash", |
| "middle": [], |
| "last": "Littell", |
| "suffix": "" |
| }, |
| { |
| "first": "Kartik", |
| "middle": [], |
| "last": "Bharadwaj", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Lori", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Levin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of COL-ING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "3475--3484", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David R Mortensen, Patrick Littell, Akash Bharadwaj, Kartik Goyal, Chris Dyer, and Lori Levin. 2016. PanPhon: A Resource for Mapping IPA Segments to Articulatory Feature Vectors. Proceedings of COL- ING 2016, the 26th International Conference on Computational Linguistics: Technical Papers pages 3475-3484. http://aclweb.org/anthology/C16-1328.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Language Adaptive Multilingual CTC Speech Recognition", |
| "authors": [ |
| { |
| "first": "Markus", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "St\u00fcker", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Waibel", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Speech and Computer: 19th International Conference, SPECOM 2017", |
| "volume": "", |
| "issue": "", |
| "pages": "473--482", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/978-3-319-66429-3_47" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Markus M\u00fcller, Sebastian St\u00fcker, and Alex Waibel. 2017. Language Adaptive Multilingual CTC Speech Recognition. In Alexey Karpov, Rod- monga Potapova, and Iosif Mporas, editors, Speech and Computer: 19th International Conference, SPECOM 2017, Hatfield, UK, September 12-16, 2017, Proceedings, Springer International Publishing, Cham, pages 473-482. https://doi.org/10.1007/978-3-319-66429-3_47.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Massively Multitask Networks for Drug Discovery", |
| "authors": [ |
| { |
| "first": "Bharath", |
| "middle": [], |
| "last": "Ramsundar", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Kearnes", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Riley", |
| "suffix": "" |
| }, |
| { |
| "first": "Dale", |
| "middle": [], |
| "last": "Webster", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Konerding", |
| "suffix": "" |
| }, |
| { |
| "first": "Vijay", |
| "middle": [], |
| "last": "Pande", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharath Ramsundar, Steven Kearnes, Patrick Riley, Dale Webster, David Konerding, and Vijay Pande. 2015. Massively Multitask Networks for Drug Dis- covery http://arxiv.org/abs/1502.02072.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "An Overview of Multi-Task Learning in Deep Neural Networks", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Ruder. 2017. An Overview of Multi- Task Learning in Deep Neural Networks http://arxiv.org/abs/1706.05098.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Bidirectional recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Kuldip", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Paliwal", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "IEEE Transactions on Signal Processing", |
| "volume": "45", |
| "issue": "11", |
| "pages": "2673--2681", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Schuster and Kuldip K Paliwal. 1997. Bidirec- tional recurrent neural networks. IEEE Transactions on Signal Processing 45(11):2673-2681.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Ethnologue: languages of the world", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Gary", |
| "suffix": "" |
| }, |
| { |
| "first": "Charles", |
| "middle": [ |
| "D" |
| ], |
| "last": "Simons", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fennig", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gary F. Simons and Charles D. Fennig, editors. 2017. Ethnologue: languages of the world.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Documentary linguistics: methodological challenges and innovatory responses", |
| "authors": [ |
| { |
| "first": "Nick", |
| "middle": [], |
| "last": "Thieberger", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Applied Linguistics", |
| "volume": "37", |
| "issue": "1", |
| "pages": "88--99", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nick Thieberger. 2016. Documentary linguis- tics: methodological challenges and innovatory responses. Applied Linguistics 37(1):88-99.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Multilingual deep neural network based acoustic modeling for rapid language adaptation", |
| "authors": [ |
| { |
| "first": "Ngoc", |
| "middle": [ |
| "Thang" |
| ], |
| "last": "Vu", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Imseng", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "Petr", |
| "middle": [], |
| "last": "Motlicek", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanja", |
| "middle": [], |
| "last": "Schultz", |
| "suffix": "" |
| }, |
| { |
| "first": "Herv\u00e9", |
| "middle": [], |
| "last": "Bourlard", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 39th IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ngoc Thang Vu, David Imseng, Daniel Povey, Petr Motlicek, Tanja Schultz, and Herv\u00e9 Bourlard. 2014. Multilingual deep neural network based acoustic modeling for rapid language adaptation. In Proceed- ings of the 39th IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP).", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Semi-supervised and Cross-lingual Knowledge Transfer Learnings for DNN Hybrid Acoustic Models under Low-resource Conditions", |
| "authors": [ |
| { |
| "first": "Haihua", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Chongjia", |
| "middle": [], |
| "last": "Ni", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiong", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Eng-Siong", |
| "middle": [], |
| "last": "Chng", |
| "suffix": "" |
| }, |
| { |
| "first": "Haizhou", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "1315--1319", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Haihua Xu, Hang Su, Chongjia Ni, Xiong Xiao, Hao Huang, Eng-Siong Chng, and Haizhou Li. 2016. Semi-supervised and Cross-lingual Knowl- edge Transfer Learnings for DNN Hybrid Acous- tic Models under Low-resource Conditions. In Pro- ceedings of the Annual Conference of the Interna- tional Speech Communication Association, (INTER- SPEECH). San Francisco, USA, pages 1315-1319.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Cross-lingual language modeling for low-resource speech recognition", |
| "authors": [ |
| { |
| "first": "Ping", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "IEEE Transactions on Audio, Speech and Language Processing", |
| "volume": "21", |
| "issue": "6", |
| "pages": "1134--1144", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/TASL.2013.2244088" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ping Xu and Pascale Fung. 2013. Cross-lingual language modeling for low-resource speech recog- nition. IEEE Transactions on Audio, Speech and Language Processing 21(6):1134-1144. https://doi.org/10.1109/TASL.2013.2244088.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Input features include log Filterbank features 2 ( ), pitch features of Ghahremani et al. (2014) ( ), and a 2 41 log Filterbank features along with their first and second derivatives combination of both (", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF1": { |
| "text": "Phoneme error rate (PER) and tone error rate (TER) on test sets as training data is scaled for Na (left) and Chatino (right). The legend entries are formatted as \u27e8 \u27e9 \u21d2 \u27e8 \u27e9 to indicate input features to the model and output target labels.", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF2": { |
| "text": "Confusion matrix showing the rates of substitution errors between tones (as a percentage, normalized per row).", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| } |
| } |
| } |
| } |