| { |
| "paper_id": "2022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:12:12.086029Z" |
| }, |
| "title": "Enhancing Documentation of Hupa with Automatic Speech Recognition", |
| "authors": [ |
| { |
| "first": "Zoey", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Boston College", |
| "location": {} |
| }, |
| "email": "zoey.liu@bc.edu" |
| }, |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Spence", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of California", |
| "location": { |
| "settlement": "Davis" |
| } |
| }, |
| "email": "jspence@ucdavis.edu" |
| }, |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Prud'hommeaux", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Boston College", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This study investigates applications of automatic speech recognition (ASR) techniques to Hupa, a critically endangered Native American language from the Dene (Athabaskan) language family. Using around 9h12m of spoken data produced by one elder who is a firstlanguage Hupa speaker, we experimented with different evaluation schemes and training settings. On average a fully connected deep neural network reached a word error rate of 35.26%. Our overall results illustrate the utility of ASR for making Hupa language documentation more accessible and usable. In addition, we found that when training acoustic models, using recordings with transcripts that were not carefully verified did not necessarily have a negative effect on model performance. This shows promise for speech corpora of indigenous languages that commonly include transcriptions produced by secondlanguage speakers or linguists who have advanced knowledge in the language of interest.", |
| "pdf_parse": { |
| "paper_id": "2022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This study investigates applications of automatic speech recognition (ASR) techniques to Hupa, a critically endangered Native American language from the Dene (Athabaskan) language family. Using around 9h12m of spoken data produced by one elder who is a firstlanguage Hupa speaker, we experimented with different evaluation schemes and training settings. On average a fully connected deep neural network reached a word error rate of 35.26%. Our overall results illustrate the utility of ASR for making Hupa language documentation more accessible and usable. In addition, we found that when training acoustic models, using recordings with transcripts that were not carefully verified did not necessarily have a negative effect on model performance. This shows promise for speech corpora of indigenous languages that commonly include transcriptions produced by secondlanguage speakers or linguists who have advanced knowledge in the language of interest.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The documentation of endangered and other lessstudied languages typically involves the creation of high-quality audio and video recordings representing a variety of speech genres, with the long-term goal of generating general-purpose linguistic data that can be used by diverse audiences for different research and applied purposes (Himmelmann, 1998; Riesberg, 2018) . With the advent of cheap, highly portable digital recording and storage technologies since the early 2000s, it is not uncommon for fieldwork projects to generate hundreds of hours of multimedia recordings.", |
| "cite_spans": [ |
| { |
| "start": 332, |
| "end": 350, |
| "text": "(Himmelmann, 1998;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 351, |
| "end": 366, |
| "text": "Riesberg, 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "While these collections of recordings are becoming increasingly accessible via web-based portals, in the sense that they can be downloaded, locating information of interest within them correctly and efficiently is another matter entirely. Coarsegrained catalog metadata describing the content of the recordings can provide users with some shallow guidance, but the identification of more specific information requires enormous investments of time and effort. Accordingly, it becomes essential to have adequate transcriptions of recordings for users to find the information they are interested in.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Transcribing recordings, however, is also an extremely time-consuming endeavor, leading to what is sometimes called the \"transcription bottleneck\" (Gupta and Boulianne, 2020; Zahrer et al., 2020; \u0106avar et al., 2016; Shi et al., 2021) , which refers to the situation where the language data is mostly in the form of (archival) recordings, and transcriptions of the data are not yet available.", |
| "cite_spans": [ |
| { |
| "start": 147, |
| "end": 174, |
| "text": "(Gupta and Boulianne, 2020;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 175, |
| "end": 195, |
| "text": "Zahrer et al., 2020;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 196, |
| "end": 215, |
| "text": "\u0106avar et al., 2016;", |
| "ref_id": null |
| }, |
| { |
| "start": 216, |
| "end": 233, |
| "text": "Shi et al., 2021)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Hupa (ISO 639-3 code: hup; Glottolog code: hupa1240), a critically endangered Native American language of northwestern California, provides a case in point. Since the early 2000s, Mrs. Verdena Parker, an elder from the Hoopa Valley Tribe, has generously shared her knowledge of the language with other community members and academic researchers. Recordings produced by and with Mrs. Parker include several hours of monolingual Hupa narratives and other texts, as well as over 800 hours of linguistic interviews that are a mixture of Hupa and English as the elicitation metalanguage. 1 The sheer quantity of these Hupa recordings makes their transcription challenging, a situation that is exacerbated by other factors. First, the people who are considered first-language speakers of Hupa are older and tend not to be literate in the language. Therefore the pool of potential transcribers is limited to second-language speakers and linguists with advanced research knowledge. Second, while literacy is used as a tool for some pedagogical purposes in the contemporary Hupa community and there is a reasonably well-established practical orthography, many of the classes for learning Hupa focus more on developing oral proficiency rather than on literacy skills per se. This means many of the younger people who have become secondlanguage speakers of the language may not feel confident in their ability to produce accurate transcriptions of connected discourse.", |
| "cite_spans": [ |
| { |
| "start": 583, |
| "end": 584, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we apply automatic speech recognition (ASR) technology to help address the transcription bottleneck for Hupa. In particular, we hope to develop effective techniques that would lend themselves to transcribing spoken Hupa. At this stage of the research, we are focusing primarily on monolingual narratives and other texts since these have the highest density of linguistic data and thus more value for research and language documentation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2 Meet the Language Data", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Hupa is the ancestral language of the Hoopa Valley Tribe in present-day Humboldt County, California. Since the mid-19th century, Hupa people have endured many hardships in the wake of the violent colonization of the region, including decades of educational policies that were designed to eradicate indigenous languages and other manifestations of traditional culture. As a result of this difficult history, by the mid-20th century most Hupa children grew up primarily speaking English as their first language, and today there are only a handful of elderly people (probably fewer than a dozen) who are considered first-language speakers of Hupa.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Hupa Language", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Nevertheless, at least since the 1970s, tribal members have been engaged in various kinds of language reclamation efforts (in the sense of Leonard (2011)), and today a number of people have developed a high degree of L2 proficiency in the language. Students at Hoopa Valley High School can take four years of Hupa language as part of their regular curriculum, and a practical orthography for the language developed in the 1980s and 1990s (Golla, 1996) is used in a number of pedagogically-oriented resources. Good descriptions of the linguistic features of Hupa are also obtainable from Golla (1970) and Sapir and Golla (2001) (see also Gordon (1996) ), although there remains something of a disconnect between the highly technical descriptive materials produced by professional academics and the needs on the ground of language teachers and learners.", |
| "cite_spans": [ |
| { |
| "start": 438, |
| "end": 451, |
| "text": "(Golla, 1996)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 587, |
| "end": 599, |
| "text": "Golla (1970)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 604, |
| "end": 626, |
| "text": "Sapir and Golla (2001)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 637, |
| "end": 650, |
| "text": "Gordon (1996)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Hupa Language", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The Hupa audio data in our experiments consists of a subset of audio recordings collected from fieldwork with Mrs. Verdena Parker (Table 1) that started in 2005 and is ongoing today. The majority of the recordings we use feature Mrs. Parker telling stories from different genres, including personal anecdotes from her life, oral-historical accounts of significant events in Hoopa Valley, and traditional stories that explain how the world came to be. Each recording has time-aligned transcriptions in the practical orthography of Golla (1996) ; the transcripts were produced by a human transcriber using annotation tools such as ELAN (Brugman and Russel, 2004) .", |
| "cite_spans": [ |
| { |
| "start": 530, |
| "end": 542, |
| "text": "Golla (1996)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 634, |
| "end": 660, |
| "text": "(Brugman and Russel, 2004)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 130, |
| "end": 139, |
| "text": "(Table 1)", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Audio data and transcriptions", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Since the audio files had been transcribed gradually over a number of years by several researchers, each transcript was lightly edited and corrected by a linguist (an author of this paper), who has advanced research knowledge of the language. As of now, after removing utterances that are fully in English, the amount of spoken Hupa available for conducting ASR experiments totals 9h12m.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Audio data and transcriptions", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Although all transcriptions were checked in consultation with Mrs. Parker, each one typically goes through several stages of manual checking before being considered complete. As a result, some transcriptions have been subsequently examined more thoroughly than others. Based solely on transcription quality differences, we divided the audio data into two sets: the \"verified\" data (\u223c1h35m) vs. the \"coarse\" data (\u223c7h37m).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Audio data and transcriptions", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Overall, the transcriptions of the verified data are more accurate than those of the coarse data. That said, the verified transcriptions typically have undergone more orthographic normalization, which includes removing elements (e.g., word-final epenthetic vowels) that are audible in the recordings but are not part of the practical orthography (Golla, 1996) . In a small number of instances, the verified transcriptions might have slight deviations from what was actually produced in the corresponding recording if Mrs. Parker felt strongly that she had misspoken. Therefore while the verified transcriptions tend to be more accurate, in some ways they are idealizations that are less faithful to the acoustic substance of their original recordings.", |
| "cite_spans": [ |
| { |
| "start": 346, |
| "end": 359, |
| "text": "(Golla, 1996)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Audio data and transcriptions", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In addition to the audio recordings and their transcriptions, we also included digitized texts for our experiments (Section 4); these texts were originally transcribed from dictation from Sapir and Golla (2001) and Goddard (1904) ( Table 1) .", |
| "cite_spans": [ |
| { |
| "start": 188, |
| "end": 210, |
| "text": "Sapir and Golla (2001)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 232, |
| "end": 240, |
| "text": "Table 1)", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Digitized texts", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "While research on ASR for endangered language documentation is still relatively rare, recently there has been growing efforts trying to mitigate this gap . Shi et al. (2021) adopted end-to-end systems for Yolox\u00f3chitl Mixtec, an endangered Mixtecan language. Using encoder-decoder architectures, they achieved the best word error rate (WER) (\u223c16%) for over 55h of conversational speech from more than twenty speakers. Gupta and Boulianne (2020) applied neural ASR models for Cree, an indigenous language in Canada. Their data consists of 4h30m story retelling or reading from six speakers. Utilizing data from high-resource languages, Zahrer et al. (2020) performed cross-linguistic learning of phoneme recognition for the Muyu language. In a study of ASR for two tonal languages, Yongning Na and Eastern Chatino, proposed a neural architecture to jointly predict phonemes and tones without needing timealigned transcripts and pronunciation dictionary. ASR technologies have also been developed for some Dene languages (Littell et al., 2018) , though in a limited way. For instance, speech recognition tools were incorporated into the Rosetta Stone language learning software for Din\u00e9 Bizaad (Navajo). 2 The Persephone ASR software was combined in ELAN (Brugman and Russel, 2004) for Tsuut'ina.", |
| "cite_spans": [ |
| { |
| "start": 156, |
| "end": 173, |
| "text": "Shi et al. (2021)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1018, |
| "end": 1040, |
| "text": "(Littell et al., 2018)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1201, |
| "end": 1202, |
| "text": "2", |
| "ref_id": null |
| }, |
| { |
| "start": 1252, |
| "end": 1278, |
| "text": "(Brugman and Russel, 2004)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In (low-resource) ASR experiments 3 , acoustic models are commonly evaluated with data from held-out speaker(s). This evaluation standard, however, is not applicable in our study here since all of the Hupa audio came from one speaker. Thus as alternatives, we designed two separate evaluation schemes for both the verified and the coarse data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation scheme", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The first one utilized random splits, for which we randomly divided all the recordings into training and test sets at a 4:1 ratio for ten times. For the second scheme, taking into account the fact that the audio recordings were collected from distinct fieldwork dates (17 dates for the verified data and 34 dates for the coarse data), we used recordings from each held-out date as the test set and the rest of the data was employed as the training set. WER and character error rate (CER) were taken as evaluation metrics for model performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation scheme", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Note that the results obtained from these two evaluation methods are not directly comparable, given that the amount of training data and that of the test data for the two methods are different. On the other hand, the goal of employing separate evaluation schemes is to acquire more realistic estimates regarding the potential of the ASR systems in the case of Hupa.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation scheme", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "With the two evaluation schemes outlined above, we investigated different training settings with the goal of exploring: (1) the differences between the verified and coarse data; a(2) the utility of including all acoustic data, regardless of transcription quality.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acoustic training data configuration", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In our first four experiments, we focused on the verified data, evaluating ASR performance with random splits then with held-out dates. We then included the coarse data for model training, keeping the test data the same in order to determine whether WER decreases with more training data, even when there is a mismatch in transcription quality between the test data and the training data. In our second set of experiments, we carried out the same model training procedures using the coarse data. Finally, we combined the coarse data and verified data to train and test acoustic models on random splits of this combined data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acoustic training data configuration", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For each training/test set split of the audio data, we built one trigram language model with Witten-Bell discounting using the SRILM toolkit (Stolcke, 2002) ; the data used to train the language model also included the transcripts of the audio training data along with the digitized texts.", |
| "cite_spans": [ |
| { |
| "start": 141, |
| "end": 156, |
| "text": "(Stolcke, 2002)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language and acoustic models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "For acoustic modeling, we drew on the opensource Kaldi toolkit (Povey et al., 2011) . The au-Original utterance: haya:\u0142 keh do'ng haya: ch'in' *** teh\u0142 Model prediction:", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 83, |
| "text": "(Povey et al., 2011)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language and acoustic models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "haya:\u0142 *** do'ng haya: ch'in' te: niwhsing Evaluation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language and acoustic models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "D I S", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language and acoustic models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The original utterance has six words; compared to the original utterance; the utterance predicted by the ASR model contains one deletion (D), one insertion (I), and one substitution (S); therefore:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language and acoustic models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "WER = 100 * 1+1+1 6 = 50%", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language and acoustic models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "An example of WER calculation; I for insertion, D for deletion, and S for substitution. dio recordings were transformed to the standard 13 dimensional mel-frequency cepstral coefficients (MFCCs), as well as their delta-and delta-delta features. The delta-and delta-delta features are, respectively, numerical approximations of the first and second order derivatives of the MFCCs, both computed on a 25ms window with 10ms interval apart which enables modeling the trajectories of the audio signals. Linear Discriminant Analysis and Maximum Likelihood Linear Transform were then employed to reduce the dimensionality of the feature vectors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language and acoustic models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The acoustic model architecture that we used is a fully connected deep neural network (DNN) (Miao et al., 2015) , which has been demonstrated to have competitive performance when facing data limitation (Morris et al., 2021) . The DNN had six hidden layers, each with 1024 hidden units. Sequence training was carried out with the default parameters in Kaldi using state-level minimum Bayes risk criterion and a per-utterance Stochastic Gradient Descent weight update. Decoding was performed with the finite state transducer-based decoder im-plemented in Kaldi.", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 111, |
| "text": "(Miao et al., 2015)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 202, |
| "end": 223, |
| "text": "(Morris et al., 2021)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language and acoustic models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The average WER results for the verified data given each training setting and evaluation scheme are presented in Table 2 . When only using the verified data for ASR training and evaluation, we obtained a WER of 53.23%; on the other hand, we see that combining coarse data with the training data of the verified set resulted in much lower WER values (and lower CER values as well), and accordingly better model performance; this pattern is consistent regardless of whether evaluating acoustic models with random splits or held-out dates. Similar observations hold when developing models for the coarse data with additional help of verified data (Table 3) , which also led to lower WER values. These results indicate that including more training data, even when the transcription quality of the training data does not necessarily match that of the test data, is helpful to build better ASR models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 113, |
| "end": 120, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 644, |
| "end": 653, |
| "text": "(Table 3)", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "When combining all data from the verified set and the coarse set together, we reached a WER of 35.26% evaluated with random splits, which is comparable to the results of random splits for each data set separately.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Leveraging ASR technologies, we investigate the possibility and effectiveness of automatically transcribing fieldwork recordings for Hupa. Through experimentation with different evaluation schemes and training settings, the acoustic models demonstrate reasonable WER results, showing promise for applying spoken language technology to document Hupa. Interestingly, training ASR models using recordings with transcripts that were not carefully verified did not negatively impact the performance, which bodes well for speech corpora of indigenous languages that include transcriptions produced by second-language speakers or linguists.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion & Ongoing Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In ongoing work, we are extending our efforts in several directions. First, the transcripts of the coarse data are being manually checked periodically to improve transcription and gloss alignment quality. Second, as we are still in the preliminary stage of performing ASR for Hupa, the current study only used the DNN architecture from Kaldi. We plan to explore other more recent neural approaches (Watanabe et al., 2018) that have been found to be effective with limited amount of audio data (Shi et al., 2021) ; then apply the trained models to recordings that have not yet been transcribed in an iterative fashion to better combine ASR with documentation of Hupa. Even a WER as high as \u223c 35.26% is expected to yield significant savings in the time required to make transcribed texts available.", |
| "cite_spans": [ |
| { |
| "start": 398, |
| "end": 421, |
| "text": "(Watanabe et al., 2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 493, |
| "end": 511, |
| "text": "(Shi et al., 2021)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion & Ongoing Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Third, thus far our acoustic models are decoded with language models at the word level. However, given the complex morphological features of Hupa (Sapir and Golla, 2001) , to reduce outof-vocabulary rate in future experiments, we are working towards combining morphological segmentation or subword unit models Liu et al. (2019) into building ASR systems. Lastly, with better performing acoustic models and more transcriptions, we aim to develop a workflow to adapt these transcribed materials into pedagogically-oriented resources for use by members of the community.", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 169, |
| "text": "(Sapir and Golla, 2001)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 310, |
| "end": 327, |
| "text": "Liu et al. (2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion & Ongoing Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Many of these recordings are now available through the California Language Archive web portal: https://cla. berkeley.edu/.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://navajorenaissance.org/ 3 Code in quarantine at https://github.com/ zoeyliu18/Hupa", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We are grateful for the continuous support from the Hupa indigenous community. We would like to especially thank Mrs. Verdena Parker for her generous and valuable input for the documentation work of Hupa throughout the years. In addition, we thank the anonymous reviewers for their helpful feedback. This material is based upon work supported by the National Science Foundation under Grant #2127309 to the Computing Research Association for the CIFellows Project, and Grant #1761562. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the National Science Foundation nor the Computing Research Association.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Evaluation Phonemic Transcription of Low-Resource Tonal Languages for Language Documentation", |
| "authors": [ |
| { |
| "first": "Oliver", |
| "middle": [], |
| "last": "Adams", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Hilaria", |
| "middle": [], |
| "last": "Cruz", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bird", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Michaud", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oliver Adams, Trevor Cohn, Graham Neubig, Hilaria Cruz, Steven Bird, and Alexis Michaud. 2018. Eval- uation Phonemic Transcription of Low-Resource Tonal Languages for Language Documentation. In Proceedings of the Eleventh International Confer- ence on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Re- sources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Annotating multi-media/multi-modal resources with ELAN", |
| "authors": [ |
| { |
| "first": "Hennie", |
| "middle": [], |
| "last": "Brugman", |
| "suffix": "" |
| }, |
| { |
| "first": "Albert", |
| "middle": [], |
| "last": "Russel", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Fourth International Conference on Language Resources and Evaluation (LREC'04)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hennie Brugman and Albert Russel. 2004. Annotating multi-media/multi-modal resources with ELAN. In Proceedings of the Fourth International Conference on Language Resources and Evaluation (LREC'04), Lisbon, Portugal. European Language Resources As- sociation (ELRA).", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Endangered Language Documentation: Bootstrapping a Chatino Speech Corpus, Forced Aligner, ASR", |
| "authors": [ |
| { |
| "first": "Damir\u0107avar", |
| "middle": [], |
| "last": "Malgorzata\u0107avar", |
| "suffix": "" |
| }, |
| { |
| "first": "Hilaria", |
| "middle": [], |
| "last": "Cruz", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
| "volume": "", |
| "issue": "", |
| "pages": "4004--4011", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Malgorzata\u0106avar, Damir\u0106avar, and Hilaria Cruz. 2016. Endangered Language Documentation: Boot- strapping a Chatino Speech Corpus, Forced Aligner, ASR. In Proceedings of the Tenth International Conference on Language Resources and Evalua- tion (LREC'16), pages 4004-4011, Portoro\u017e, Slove- nia. European Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Hupa texts", |
| "authors": [ |
| { |
| "first": "Goddard", |
| "middle": [], |
| "last": "Pliny Earle", |
| "suffix": "" |
| } |
| ], |
| "year": 1904, |
| "venue": "", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pliny Earle Goddard. 1904. Hupa texts, volume 1. The University Press.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Hupa Language Dictionary Second Edition", |
| "authors": [ |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Golla", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor Golla. 1996. Hupa Language Dictionary Second Edition.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Hupa grammar", |
| "authors": [ |
| { |
| "first": "Karl", |
| "middle": [], |
| "last": "Victor", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Golla", |
| "suffix": "" |
| } |
| ], |
| "year": 1970, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor Karl Golla. 1970. Hupa grammar. Ph.D. thesis, University of California, Berkeley.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "The phonetic structures of Hupa. UCLA Working Papers in Phonetics", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Gordon", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "164--187", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Gordon. 1996. The phonetic structures of Hupa. UCLA Working Papers in Phonetics, pages 164-187.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Speech Transcription Challenges for Resource Constrained Indigenous Language Cree", |
| "authors": [ |
| { |
| "first": "Vishwa", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Gilles", |
| "middle": [], |
| "last": "Boulianne", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
| "volume": "", |
| "issue": "", |
| "pages": "362--367", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vishwa Gupta and Gilles Boulianne. 2020. Speech Transcription Challenges for Resource Constrained Indigenous Language Cree. In Proceedings of the 1st Joint Workshop on Spoken Language Technolo- gies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL), pages 362-367, Marseille, France. European Language Resources association.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Documentary and descriptive linguistics", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Nikolaus", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Himmelmann", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "161--195", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikolaus P Himmelmann. 1998. Documentary and de- scriptive linguistics. Linguistics, pages 161-195.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Challenging \"extinction\" through modern Miami language practices", |
| "authors": [ |
| { |
| "first": "Wesley", |
| "middle": [], |
| "last": "Leonard", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wesley Leonard. 2011. Challenging \"extinction\" through modern Miami language practices.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Indigenous language technologies in Canada: Assessment, challenges, and successes", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Littell", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Kazantseva", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Kuhn", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [], |
| "last": "Pine", |
| "suffix": "" |
| }, |
| { |
| "first": "Antti", |
| "middle": [], |
| "last": "Arppe", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Cox", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Odile", |
| "middle": [], |
| "last": "Junker", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2620--2632", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick Littell, Anna Kazantseva, Roland Kuhn, Aidan Pine, Antti Arppe, Christopher Cox, and Marie- Odile Junker. 2018. Indigenous language technolo- gies in Canada: Assessment, challenges, and suc- cesses. In Proceedings of the 27th International Conference on Computational Linguistics, pages 2620-2632, Santa Fe, New Mexico, USA. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Character-Aware Sub-Word Level Language Modeling for Uyghur and Turkish ASR", |
| "authors": [ |
| { |
| "first": "Chang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Pengyuan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonghong", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "The Annual Conference of the International Speech Communication Association (Interspeech)", |
| "volume": "", |
| "issue": "", |
| "pages": "3495--3499", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chang Liu, Zhen Zhang, Pengyuan Zhang, and Yonghong Yan. 2019. Character-Aware Sub-Word Level Language Modeling for Uyghur and Turk- ish ASR. In The Annual Conference of the Inter- national Speech Communication Association (Inter- speech), pages 3495-3499.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Speaker adaptive training of deep neural network acoustic models using i-vectors", |
| "authors": [ |
| { |
| "first": "Yajie", |
| "middle": [], |
| "last": "Miao", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Metze", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "IEEE/ACM Transactions on Audio, Speech, and Language Processing", |
| "volume": "23", |
| "issue": "11", |
| "pages": "1938--1949", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yajie Miao, Hao Zhang, and Florian Metze. 2015. Speaker adaptive training of deep neural network acoustic models using i-vectors. IEEE/ACM Trans- actions on Audio, Speech, and Language Processing, 23(11):1938-1949.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Integrating automatic transcription into the language documentation workflow: Experiments with Na data and the Persephone toolkit", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Michaud", |
| "suffix": "" |
| }, |
| { |
| "first": "Oliver", |
| "middle": [], |
| "last": "Adams", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "S\u00e9verine", |
| "middle": [], |
| "last": "Guillaume", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Language Documentation & Conservation", |
| "volume": "12", |
| "issue": "", |
| "pages": "393--429", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Michaud, Oliver Adams, Trevor Cohn, Graham Neubig, and S\u00e9verine Guillaume. 2018. Integrat- ing automatic transcription into the language docu- mentation workflow: Experiments with Na data and the Persephone toolkit. Language Documentation & Conservation, 12:393-429.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "One size does not fit all in resource-constrained ASR", |
| "authors": [ |
| { |
| "first": "Ethan", |
| "middle": [], |
| "last": "Morris", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Jimerson", |
| "suffix": "" |
| }, |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Prud", |
| "suffix": "" |
| }, |
| { |
| "first": "'", |
| "middle": [], |
| "last": "Hommeaux", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "The Annual Conference of the International Speech Communication Association (Interspeech)", |
| "volume": "", |
| "issue": "", |
| "pages": "4354--4358", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ethan Morris, Robert Jimerson, and Emily Prud'hommeaux. 2021. One size does not fit all in resource-constrained ASR. In The Annual Conference of the International Speech Communica- tion Association (Interspeech), pages 4354-4358.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "The Kaldi speech recognition toolkit", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "Arnab", |
| "middle": [], |
| "last": "Ghoshal", |
| "suffix": "" |
| }, |
| { |
| "first": "Gilles", |
| "middle": [], |
| "last": "Boulianne", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukas", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| }, |
| { |
| "first": "Ondrej", |
| "middle": [], |
| "last": "Glembek", |
| "suffix": "" |
| }, |
| { |
| "first": "Nagendra", |
| "middle": [], |
| "last": "Goel", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirko", |
| "middle": [], |
| "last": "Hannemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Petr", |
| "middle": [], |
| "last": "Motlicek", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanmin", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Petr", |
| "middle": [], |
| "last": "Schwarz", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "IEEE 2011 workshop on automatic speech recognition and understanding", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Povey, Arnab Ghoshal, Gilles Boulianne, Lukas Burget, Ondrej Glembek, Nagendra Goel, Mirko Hannemann, Petr Motlicek, Yanmin Qian, Petr Schwarz, et al. 2011. The Kaldi speech recogni- tion toolkit. In IEEE 2011 workshop on automatic speech recognition and understanding, CONF. IEEE Signal Processing Society.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Automatic Speech Recognition for Supporting Endangered Language Documentation. Language Documentation & Conservation", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Prud'hommeaux", |
| "suffix": "" |
| }, |
| { |
| "first": "Robbie", |
| "middle": [], |
| "last": "Jimerson", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Hatcher", |
| "suffix": "" |
| }, |
| { |
| "first": "Karin", |
| "middle": [], |
| "last": "Michelson", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "15", |
| "issue": "", |
| "pages": "491--513", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily Prud'hommeaux, Robbie Jimerson, Richard Hatcher, and Karin Michelson. 2021. Automatic Speech Recognition for Supporting Endangered Language Documentation. Language Documenta- tion & Conservation, 15:491-513.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Reflections on descriptive and documentary adequacy", |
| "authors": [ |
| { |
| "first": "Sonja", |
| "middle": [], |
| "last": "Riesberg", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "SP15: Reflections on Language Documentation 20 Years after Himmelmann", |
| "volume": "15", |
| "issue": "", |
| "pages": "151--156", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sonja Riesberg. 2018. Reflections on descriptive and documentary adequacy. In Bradley McDonnell, An- drea L. Berez-Kroeker, and Gary Holton, editors, SP15: Reflections on Language Documentation 20 Years after Himmelmann 1998, chapter 15, pages 151-156. University of Hawai'i Press.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Hupa texts, with notes and lexicon. The Collected Works of Edward Sapir", |
| "authors": [ |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Sapir", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Golla", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "", |
| "volume": "14", |
| "issue": "", |
| "pages": "19--1011", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edward Sapir and Victor Golla. 2001. Hupa texts, with notes and lexicon. The Collected Works of Edward Sapir, ed. by Victor Golla & Sean O'Neill, 14:19- 1011.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Leveraging End-to-End ASR for Endangered Language Documentation: An Empirical Study on Yol\u00f3xochitl Mixtec", |
| "authors": [ |
| { |
| "first": "Jiatong", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [ |
| "D" |
| ], |
| "last": "Amith", |
| "suffix": "" |
| }, |
| { |
| "first": "Rey", |
| "middle": [], |
| "last": "Castillo Garc\u00eda", |
| "suffix": "" |
| }, |
| { |
| "first": "Esteban", |
| "middle": [ |
| "Guadalupe" |
| ], |
| "last": "Sierra", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| }, |
| { |
| "first": "Shinji", |
| "middle": [], |
| "last": "Watanabe", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume", |
| "volume": "", |
| "issue": "", |
| "pages": "1134--1145", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiatong Shi, Jonathan D. Amith, Rey Castillo Garc\u00eda, Esteban Guadalupe Sierra, Kevin Duh, and Shinji Watanabe. 2021. Leveraging End-to-End ASR for Endangered Language Documentation: An Empiri- cal Study on Yol\u00f3xochitl Mixtec. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 1134-1145, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "SRILM-an extensible language modeling toolkit", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Stolcke", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Seventh international conference on spoken language processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andreas Stolcke. 2002. SRILM-an extensible lan- guage modeling toolkit. In Seventh international conference on spoken language processing.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "ESPnet: End-to-End Speech Processing Toolkit", |
| "authors": [ |
| { |
| "first": "Shinji", |
| "middle": [], |
| "last": "Watanabe", |
| "suffix": "" |
| }, |
| { |
| "first": "Takaaki", |
| "middle": [], |
| "last": "Hori", |
| "suffix": "" |
| }, |
| { |
| "first": "Shigeki", |
| "middle": [], |
| "last": "Karita", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomoki", |
| "middle": [], |
| "last": "Hayashi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiro", |
| "middle": [], |
| "last": "Nishitoba", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuya", |
| "middle": [], |
| "last": "Unno", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "2207--2211", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shinji Watanabe, Takaaki Hori, Shigeki Karita, Tomoki Hayashi, Jiro Nishitoba, Yuya Unno, Nelson En- rique Yalta Soplin, Jahn Heymann, Matthew Wies- ner, Nanxin Chen, Adithya Renduchintala, and Tsubasa Ochiai. 2018. ESPnet: End-to-End Speech Processing Toolkit. In Proceedings of Interspeech, pages 2207-2211.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Towards Building an Automatic Transcription System for Language Documentation: Experiences from Muyu", |
| "authors": [ |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Zahrer", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrej", |
| "middle": [], |
| "last": "Zgank", |
| "suffix": "" |
| }, |
| { |
| "first": "Barbara", |
| "middle": [], |
| "last": "Schuppler", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "2893--2900", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander Zahrer, Andrej Zgank, and Barbara Schup- pler. 2020. Towards Building an Automatic Tran- scription System for Language Documentation: Ex- periences from Muyu. In Proceedings of the 12th Language Resources and Evaluation Conference, pages 2893-2900, Marseille, France. European Lan- guage Resources Association.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "num": null, |
| "text": "Descriptive statistics for the text data of Hupa applied in experiments.", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF3": { |
| "num": null, |
| "text": "ASR evaluation results for the verified data.", |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"2\">Evaluation Data random train: 6h6m; splits test: 1h31m</td><td colspan=\"2\">Training setting WER (%) CER (%) just coarse data 45.13 21.37 add verified data 35.13 12.65</td></tr><tr><td>held-out dates</td><td colspan=\"2\">train: 7h24m; just coarse data test: 13m add verified data 35.60 37.70</td><td>12.58 12.37</td></tr></table>", |
| "html": null |
| }, |
| "TABREF4": { |
| "num": null, |
| "text": "ASR evaluation results for the coarse data.", |
| "type_str": "table", |
| "content": "<table><tr><td>Evaluation random splits train: 7h22m; 35.26 Data WER (%) CER (%) 12.38 test: 1h50m</td></tr></table>", |
| "html": null |
| }, |
| "TABREF5": { |
| "num": null, |
| "text": "ASR evaluation results when combining all verified and coarse data together.", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |