| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:34:17.468499Z" |
| }, |
| "title": "Multilingual Graphemic Hybrid ASR with Massive Data Augmentation", |
| "authors": [ |
| { |
| "first": "Chunxi", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Facebook AI New York", |
| "location": { |
| "settlement": "Menlo Park", |
| "region": "NY, CA", |
| "country": "USA" |
| } |
| }, |
| "email": "chunxiliu@fb.com" |
| }, |
| { |
| "first": "Qiaochu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Facebook AI New York", |
| "location": { |
| "settlement": "Menlo Park", |
| "region": "NY, CA", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Xiaohui", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Facebook AI New York", |
| "location": { |
| "settlement": "Menlo Park", |
| "region": "NY, CA", |
| "country": "USA" |
| } |
| }, |
| "email": "xiaohuizhang@fb.com" |
| }, |
| { |
| "first": "Kritika", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Facebook AI New York", |
| "location": { |
| "settlement": "Menlo Park", |
| "region": "NY, CA", |
| "country": "USA" |
| } |
| }, |
| "email": "skritika@fb.com" |
| }, |
| { |
| "first": "Yatharth", |
| "middle": [], |
| "last": "Saraf", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Facebook AI New York", |
| "location": { |
| "settlement": "Menlo Park", |
| "region": "NY, CA", |
| "country": "USA" |
| } |
| }, |
| "email": "ysaraf@fb.com" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Zweig", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Facebook AI New York", |
| "location": { |
| "settlement": "Menlo Park", |
| "region": "NY, CA", |
| "country": "USA" |
| } |
| }, |
| "email": "gzweig@fb.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Towards developing high-performing ASR for low-resource languages, approaches to address the lack of resources are to make use of data from multiple languages, and to augment the training data by creating acoustic variations. In this work we present a single grapheme-based ASR model learned on 7 geographically proximal languages, using standard hybrid BLSTM-HMM acoustic models with lattice-free MMI objective. We build the single ASR grapheme set via taking the union over each language-specific grapheme set, and we find such multilingual graphemic hybrid ASR model can perform language-independent recognition on all 7 languages, and substantially outperform each monolingual ASR model. Secondly, we evaluate the efficacy of multiple data augmentation alternatives within language, as well as their complementarity with multilingual modeling. Overall, we show that the proposed multilingual graphemic hybrid ASR with various data augmentation can not only recognize any within training set languages, but also provide large ASR performance improvements.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Towards developing high-performing ASR for low-resource languages, approaches to address the lack of resources are to make use of data from multiple languages, and to augment the training data by creating acoustic variations. In this work we present a single grapheme-based ASR model learned on 7 geographically proximal languages, using standard hybrid BLSTM-HMM acoustic models with lattice-free MMI objective. We build the single ASR grapheme set via taking the union over each language-specific grapheme set, and we find such multilingual graphemic hybrid ASR model can perform language-independent recognition on all 7 languages, and substantially outperform each monolingual ASR model. Secondly, we evaluate the efficacy of multiple data augmentation alternatives within language, as well as their complementarity with multilingual modeling. Overall, we show that the proposed multilingual graphemic hybrid ASR with various data augmentation can not only recognize any within training set languages, but also provide large ASR performance improvements.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "It can be challenging to build high-accuracy automatic speech recognition (ASR) systems in the real world due to the vast language diversity and the requirement of extensive manual annotations on which the ASR algorithms are typically built. Series of research efforts have thus far been focused on guiding the ASR of a target language by using the supervised data from multiple languages. Consider the standard hidden Markov models (HMM) based hybrid ASR system with a phonemic lexicon, where the vocabulary is specified by a pronunciation lexicon. One popular strategy is to make all languages share the same phonemic representations through a universal phonetic alphabet such as International Phonetic Alphabet (IPA) phone set (Lin et al., 2009; Liu et al., 2016; Pulugundla et al., 2018; Tong et al., 2019) , or X-SAMPA phone set (Wells, 1995; Knill et al., 2013; Knill et al., 2014; Wiesner et al., 2018) . In this case, multilingual joint training can be directly applied. Given the effective neural network based acoustic modeling, another line of research is to share the hidden layers across multiple languages while the softmax layers are language dependent (Huang et al., 2013; Heigold et al., 2013) ; such multitask learning procedure can improve ASR accuracies for both within training set languages, and also unseen languages after language-specific adaptation, i.e., cross-lingual transfer learning. Different nodes in hidden layers have been shown in response to distinct phonetic features (Nagamine et al., 2015) , and hidden layers can be potentially transferable across languages. Note that the above works all assume the test language identity to be known at decoding time, and the language specific lexicon and language model applied. In the absence of a phonetic lexicon, building graphemic systems has shown comparable performance to phonetic lexicon-based approaches in extensive monolingual evaluations (Kanthak and Ney, 2002; Gales et al., 2015; Trmal et al., 2017) . Recent advances in end-to-end or sequenceto-sequence ASR models have attempted to take the union of multiple language-specific grapheme (i.e. orthographic character) sets, and use such union as a universal grapheme set for a single sequence-to-sequence ASR model (Watanabe et al., 2017; Toshniwal et al., 2018; Kim and Seltzer, 2018; Kannan et al., 2019) . It allows for learning a graphemebased model jointly on data from multiple languages, and performing ASR on within training set languages. In various cases it can produce performance gains over monolingual modeling that uses in-language data only. Since HMM-based hybrid model remains a competitive ASR approach especially in low/medium-resource settings (L\u00fcscher et al., 2019; Wang et al., 2020) , in our work, we aim to examine the same approach above of building a multilingual graphemic lexicon, while using a hybrid ASR systembased on Bidirectional Long Short-Term Memory (BLSTM) and HMM -learned with lattice-free maximum mutual information (MMI) objective (Povey et al., 2016) . Our initial attempt is on building a single cascade of an acoustic model, a phonetic decision tree, a graphemic lexicon and a language model -for 7 geographically proximal languages that have little overlap in their character sets. We evaluate it in a low resource context where each language has around 160 hours training data. We find that, despite the lack of explicit language identification (ID) guidance, our multilingual graphemic hybrid ASR model can accurately produce ASR transcripts in the correct test language scripts, and provide higher ASR accuracies than each language-specific ASR model. We further examine if using a subset of closely related languages -along language family or orthographycan achieve the same performance improvements as using all 7 languages. Though extensive end-to-end or sequence-to-sequence ASR works have been built on multilingual graphemic models, to the best of our knowledge, there is no prior work in hybrid ASR that uses a single multilingual graphemic lexicon (rather than an IPA or X-SAMPA based phonetic lexicon) for multiple training languages. In this work, we show for the first time that multilingual graphemic hybrid ASR can provide large improvements across all training languages, even though almost each training language has distinct graphemic set. We proceed with our investigation on various data augmentation techniques to overcome the lack of training data in the above low-resource setting. Given the highly scalable neural network acoustic modeling, extensive alternatives to increasing the amount or diversity of existing training data have been explored in prior works, e.g., applying vocal tract length perturbation and speed perturbation (Ko et al., 2015) , volume perturbation and normalization , additive noises (Amodei et al., 2016) , reverberation (Peddinti et al., 2015; Ko et al., 2017; Kim et al., 2017) , and SpecAugment (Park et al., 2019) . In this work we focus particularly on techniques that mostly apply to our wildly collected video datasets. In comparing their individual and complementary effects, we aim to answer: (i) if there is benefit in scaling the model training to significantly larger quantities, e.g., up to 9 times greater than the original training set size, and (ii) if any, is the data augmentation efficacy comparable or complementary with the above multilingual modeling. Improving accessibility to videos \"in the wild\" such as automatic captioning on YouTube has been studied in (Liao et al., 2013; Soltau et al., 2017) . While allowing for applications like video captions, indexing and retrieval, transcribing the heterogeneous social media videos of extensively diverse languages is highly challenging for ASR systems. On the whole, we present empirical studies in building a single multilingual graphemic hybrid ASR model capable of language-independent decoding on multiple languages, and in effective data augmentation techniques for video datasets.", |
| "cite_spans": [ |
| { |
| "start": 730, |
| "end": 748, |
| "text": "(Lin et al., 2009;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 749, |
| "end": 766, |
| "text": "Liu et al., 2016;", |
| "ref_id": null |
| }, |
| { |
| "start": 767, |
| "end": 791, |
| "text": "Pulugundla et al., 2018;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 792, |
| "end": 810, |
| "text": "Tong et al., 2019)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 834, |
| "end": 847, |
| "text": "(Wells, 1995;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 848, |
| "end": 867, |
| "text": "Knill et al., 2013;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 868, |
| "end": 887, |
| "text": "Knill et al., 2014;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 888, |
| "end": 909, |
| "text": "Wiesner et al., 2018)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 1168, |
| "end": 1188, |
| "text": "(Huang et al., 2013;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1189, |
| "end": 1210, |
| "text": "Heigold et al., 2013)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1506, |
| "end": 1529, |
| "text": "(Nagamine et al., 2015)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1928, |
| "end": 1951, |
| "text": "(Kanthak and Ney, 2002;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1952, |
| "end": 1971, |
| "text": "Gales et al., 2015;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1972, |
| "end": 1991, |
| "text": "Trmal et al., 2017)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 2257, |
| "end": 2280, |
| "text": "(Watanabe et al., 2017;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 2281, |
| "end": 2304, |
| "text": "Toshniwal et al., 2018;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 2305, |
| "end": 2327, |
| "text": "Kim and Seltzer, 2018;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 2328, |
| "end": 2348, |
| "text": "Kannan et al., 2019)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 2706, |
| "end": 2728, |
| "text": "(L\u00fcscher et al., 2019;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 2729, |
| "end": 2747, |
| "text": "Wang et al., 2020)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 3014, |
| "end": 3034, |
| "text": "(Povey et al., 2016)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 4745, |
| "end": 4762, |
| "text": "(Ko et al., 2015)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 4821, |
| "end": 4842, |
| "text": "(Amodei et al., 2016)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 4859, |
| "end": 4882, |
| "text": "(Peddinti et al., 2015;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 4883, |
| "end": 4899, |
| "text": "Ko et al., 2017;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 4900, |
| "end": 4917, |
| "text": "Kim et al., 2017)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 4936, |
| "end": 4955, |
| "text": "(Park et al., 2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 5520, |
| "end": 5539, |
| "text": "(Liao et al., 2013;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 5540, |
| "end": 5560, |
| "text": "Soltau et al., 2017)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "In this section we first briefly describe our deployed hybrid ASR architecture based on the weighted finite-state transducers (WFSTs) outlined in (Mohri et al., 2008) . Then we present its extension to multilingual training. Lastly, we discuss its language-independent decoding and languagespecific decoding.", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 166, |
| "text": "(Mohri et al., 2008)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual Graphemic Hybrid ASR", |
| "sec_num": "2." |
| }, |
| { |
| "text": "In the ASR framework of a hybrid BLSTM-HMM, the decoding graph can be interpreted as a composed WFST of cascade H \u2022 C \u2022 L \u2022 G. Acoustic models, i.e. BLSTMs, produce acoustic scores over context-dependent HMM (i.e. triphone) states. A WFST H, which represents the HMM set, maps the triphone states to context-dependent phones. While in graphemic ASR, the notion of phone is turned to grapheme, and we typically create the grapheme set via modeling each orthographic character as a separate grapheme. Then a WFST C maps each context-dependent grapheme, i.e. tri-grapheme, to an orthographic character. The lexicon L is specified where each word is mapped to a sequence of characters forming that word. G encodes either the transcript during training, or a language model during decoding.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graphemic ASR with WFST", |
| "sec_num": "2.1." |
| }, |
| { |
| "text": "To build a single grapheme-based acoustic model for multiple languages, a multilingual graphemic set is obtained by taking a union of each grapheme set from each language considered, each of which can be either overlapping or nonoverlapping. In the multilingual graphemic lexicon, each word in any language is mapped to a sequence of characters in that language. A context-dependent acoustic model is constructed using the decision tree clustering of tri-grapheme states, in the same fashion as the context dependent triphone state tying (Young et al., 1994) . The graphemic-context decision tree is constructed over all the multilingual acoustic data including each language of interest. The optimal number of leaves for the multilingual model tends to be larger than for a monolingual neural network. The acoustic model is a BLSTM network, using sequence discriminative training with lattice-free MMI objective (Povey et al., 2016 ). The BLSTM model is bootstrapped from a standard Gaussian mixture model (GMM)-HMM system. A multilingual n-gram language model is learned over the combined transcripts including each language considered.", |
| "cite_spans": [ |
| { |
| "start": 538, |
| "end": 558, |
| "text": "(Young et al., 1994)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 913, |
| "end": 932, |
| "text": "(Povey et al., 2016", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Single Multilingual ASR Model Using Lattice-Free MMI", |
| "sec_num": "2.2." |
| }, |
| { |
| "text": "Given the multilingual lexicon and language model, the multilingual ASR above can decode any within training set language, even though not explicitly given any information about language identity. We refer to it as languageindependent decoding or multilingual decoding. Note that such ASR can thus far produce any word in the multilingual lexicon, and the hypothesized word can either be in the vocabulary of the considered test language, or out of test language vocabulary as a mismatched-language error. We further consider applying language-specific decoding, assuming the test language identity to be known at decoding time. Again consider the decoding graph", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language-Independent and Language-Specific Decoding in the WFST Framework", |
| "sec_num": "2.3." |
| }, |
| { |
| "text": "H \u2022 C \u2022 L \u2022 G,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language-Independent and Language-Specific Decoding in the WFST Framework", |
| "sec_num": "2.3." |
| }, |
| { |
| "text": "and H & C are thus multilingual while the lexicon L and language model G can include only the words in test language vocabulary. The multilingual acoustic model can therefore make use of multilingual training data, while its language-specific decoding operation only produces monolingual words matched with test language identity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language-Independent and Language-Specific Decoding in the WFST Framework", |
| "sec_num": "2.3." |
| }, |
| { |
| "text": "In this section, we consider 3 categories of data augmentation techniques that are effectively applicable to video datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Augmentation", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Both speed and volume perturbation emulate mean shifts in spectrum (Ko et al., 2015; Peddinti et al., 2015) . To perform speed perturbation of the training data, we produce three versions of each audio with speed factors 0.9, 1.0, and 1.1. The training data size is thus tripled. For volume perturbation, each audio is scaled with a random variable drawn from a uniform distribution [0.125, 2].", |
| "cite_spans": [ |
| { |
| "start": 67, |
| "end": 84, |
| "text": "(Ko et al., 2015;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 85, |
| "end": 107, |
| "text": "Peddinti et al., 2015)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Speed and Volume Perturbation", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "To further increase training data size and diversity, we can create new audios via superimposing each original audio with additional noisy audios in time domain. To obtain diverse noisy audios, we use AudioSet, which consists of 632 audio event classes and a collection of over 2 million manually-annotated 10-second sound clips from YouTube videos (Gemmeke et al., 2017) . Note that in our video datasets, video lengths vary between 10 seconds and 5 minutes, with an average duration of about 2 minutes. Rather than constantly repeating the 10-second sound clip to match the original minute-long audio, we superimpose each sound clip on the short utterances via audio segmentation. Specifically, we first use an initial bootstrap model to align each original long audio, and segment each audio into around 10-second utterances via word boundaries. Then for each utterance in the original train set, we can create a new noisy utterance by the steps:", |
| "cite_spans": [ |
| { |
| "start": 349, |
| "end": 371, |
| "text": "(Gemmeke et al., 2017)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive Noise", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "1. Sample a sound clip from AudioSet.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive Noise", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "2. Trim or repeat the sound clip as necessary to match the duration of the original utterance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive Noise", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "3. Sample a signal-to-noise ratio (SNR) from a Gaussian distribution with mean 10, and round the SNR up to 0 or down to 20 if the sample is beyond 0-20dB. Then scale the sound clip signal to obtain the target SNR.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive Noise", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "4. Superimpose the original utterance signal with the scaled sound clip signal in time domain to create the resulting utterance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive Noise", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "Thus for each original utterance, we can create a variable number of new noisy utterances via sampling sound clips. We use a 3-fold augmentation that combines the original train set with two noisy copies.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive Noise", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "We consider applying the frequency and time masking techniques -which are shown to greatly improve the performance of end-to-end ASR models (Park et al., 2019) -to our hybrid systems. Similarly, they can be applied online during each epoch of LF-MMI training, while time warping requires the need for realignment and thus does not fit hybrid model training. Consider each utterance (i.e. after the audio segmentation in Section 3.2.), and we compute its log mel spectrogram with \u03bd dimension and \u03c4 time steps:", |
| "cite_spans": [ |
| { |
| "start": 140, |
| "end": 159, |
| "text": "(Park et al., 2019)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SpecAugment", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "1. Frequency masking is applied m F times, and each time the frequency bands", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SpecAugment", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "[f 0 , f 0 +f ) are masked, where f is sampled from [0, F ] and f 0 is sampled from [0, \u03bd \u2212 f ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SpecAugment", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "2. Time masking is optionally applied m T times, and each time the time steps [t 0 , t 0 + t) are masked, where t is sampled from [0, T ] and t 0 is sampled from [0, \u03c4 \u2212 t).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SpecAugment", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "As in (Park et al., 2019) , we increase the training schedule accordingly, i.e., number of epochs.", |
| "cite_spans": [ |
| { |
| "start": 6, |
| "end": 25, |
| "text": "(Park et al., 2019)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SpecAugment", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "Our multilingual ASR attempt was on 7 geographically proximal languages: Kannada, Malayalam, Sinhala, Tamil, Bengali, Hindi and Marathi. The datasets were a set of public social media videos, which were wildly collected and anonymized. We categorized them into four sets: xtrmI differed from xtrmII in chronological order, and were both more acoustically challenging than clean and noisy categories. For each language, the train and test set size are described in Table 1 , and most training data were of noisy category. On each language we also had a small validation set for model parameter tuning. Each monolingual ASR baseline was trained on language-specific data only. To create the grapheme set, we consult the unicode character ranges of each language, and also include apostrophe, hyphen and zero width joiner in the final character sets. The character sets of these 7 languages have little overlap except that (i) they all include common basic Latin alphabet, and (ii) both Hindi and Marathi use Devanagari script. We took the union of 7 character sets therein as the multilingual grapheme set (Section 2.2.), which contained 432 characters. In addition, we deliberately split 7 languages into two groups, such that the languages within each group were more closely related in terms of language family, orthography or phonology. We thus built 3 multilingual ASR models trained on:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 464, |
| "end": 471, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.1." |
| }, |
| { |
| "text": "clean,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.1." |
| }, |
| { |
| "text": "(i) all 7 languages, for 1059 training hours in total, (ii) 4 languages -Kannada, Malayalam, Sinhala and Tamil -for 590 training hours, (iii) 3 languages -Bengali, Hindi and Marathi -for 469 training hours, which are referred to as 7lang, 4lang, and 3lang respectively. Note that Kannada, Malayalam and Tamil are Dravidian languages, which have rich agglutinative inflectional morphology (Pulugundla et al., 2018) and resulted in around 10% OOV token rates on test sets (Hindi had the lowest OOV rate as 2-3%). Such experimental setup was designed to answer the questions:", |
| "cite_spans": [ |
| { |
| "start": 388, |
| "end": 413, |
| "text": "(Pulugundla et al., 2018)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.1." |
| }, |
| { |
| "text": "(i) If a single graphemic ASR model could scale its language-independent recognition up to all 7 languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.1." |
| }, |
| { |
| "text": "(ii) If including all 7 languages could yield better ASR performance than using a small subset of closely related languages. Table 2 : WER results on each video dataset. Frequency masking is denoted by fm, speed perturbation by sp, and additive noise (Section 3.2.) by noise. 3lang, 4lang and 7lang denote the multilingual ASR models trained on 3, 4 and 7 languages, respectively, as in Section 4.1.. Lang-specific decoding denotes using multilingual acoustic model with language-specific lexicon and language model, as in Section 2.3.. Average is unweighted average WER across 4 video types. Gain (%) is the relative reduction in the Average WER over each monolingual baseline. (Povey et al., 2011) . Each neural network acoustic model was a latency-controlled BLSTM (Zhang et al., 2016) , learned with lattice-free MMI objective and Adam optimizer (Kingma and Ba, 2015) . All neural networks were implemented with Caffe2 (Hazelwood et al., 2018) . Due to the production real time factor (RTF) requirements, we used the same model size in all cases -a 4 layer latency-controlled BLSTM network with 600 cells in each layer and directionexcept that, the softmax dimensions, i.e. the optimal decision tree leaves, were determined through experiments on validation sets, varying within 7-30k. Input acoustic features were 80-dimensional log-mel filterbank coefficients. After lattice-free MMI training, the model with the best accuracy on validation set was used for evaluation on test set. We used standard 5-gram language models in all cases. Each multilingual 5-gram language model is learned simply via combining transcripts of each language.", |
| "cite_spans": [ |
| { |
| "start": 679, |
| "end": 699, |
| "text": "(Povey et al., 2011)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 768, |
| "end": 788, |
| "text": "(Zhang et al., 2016)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 850, |
| "end": 871, |
| "text": "(Kingma and Ba, 2015)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 923, |
| "end": 947, |
| "text": "(Hazelwood et al., 2018)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 125, |
| "end": 132, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.1." |
| }, |
| { |
| "text": "ASR word error rate (WER%) results are shown in Table 2 . We found that, although not explicitly given any information on test language identities, multilingual ASR with language-independent decoding (Section 2.3.) -trained on 3, 4, or 7 languages -substantially outperformed each monolingual ASR in all cases, and on average led to relative WER reductions between 4.6% (Sinhala) and 10.3% (Hindi). Note that, in contrast to the multilingual phonetic hybrid ASR (i.e. using phonetic lexicons), it is intuitive to see ASR performance improve when different languages share the same phone set via IPA or X-SAMPA , since each phonetic modeling can use more training data than monolingual training. However, in our multilingual graphemic ASR, only 2 of 7 training languages overlapped in character sets; for the first time, we show that, such multilingual graphemiccontext decision tree based hybrid ASR can still improve performance for all languages. Also, the word hypotheses from language-independent decoding could be language mismatched, e.g., part of a Kannada utterance was decoded into Marathi words. So we counted how many word tokens in the decoding transcripts were not in the lexicon of corresponding test language. We found in general only 1-3% word tokens are language mismatched, indicating that the multilingual model was very effective in identifying the language implicitly and jointly recognizing the speech.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 48, |
| "end": 56, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results with Multilingual ASR", |
| "sec_num": "4.3." |
| }, |
| { |
| "text": "Consider the scenario that, test language identities are known likewise in each monolingual ASR, and we proceed with language-specific decoding (Section 2.3.) on Kannada and Hindi, via language-specific lexicon and language model at decoding time. We found that, the language-specific decoding provided only moderate gains, presumably as discussed above, the language-independent decoding had given the mismatched-language word token rates as sufficiently low as 1-3%. Additionally, the multilingual ASR of 4lang and 3lang (Section 4.1.) achieved the same, or even slightly better performance as compared to the ASR of 7lang, suggesting that incorporating closely related languages into multilingual training is most useful for improving ASR performance. However, the 7lang ASR by itself still yields the advantage in language-independent recognition of more languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results with Multilingual ASR", |
| "sec_num": "4.3." |
| }, |
| { |
| "text": "First, we experimented with monolingual ASR on Kannada and Hindi, and performed comprehensive evaluations of the data augmentation techniques described in Section 3.. As in Table 2 , the performance gains of using frequency masking were substantial and comparable to those of using speed perturbation, where m F = 2 and F = 15 (Section 3.3.) worked best. In addition, combining both frequency masking and speed perturbation could provide further improvements. However, applying additional volume perturbation (Section 3.1.) or time masking (Section 3.3.) was not helpful in our monolingual experiments, and we omit showing the results in the table.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 173, |
| "end": 180, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results with Data Augmentation", |
| "sec_num": "4.4." |
| }, |
| { |
| "text": "Note that after speed perturbation, the training data tripled, to which we could apply another 3-fold augmentation based on additive noise (Section 3.2.), and the final train set was thus 9 times the size of original train set. We found that all 3 techniques were complementary, and in combination led to large fusion gains over each monolingual baseline -relative WER reductions of 8.7% on Kannada, and 14.8% on Hindi. Secondly, we applied the 3 data augmentation techniques to the multilingual ASR of 7lang, and tested their additive effects. We show the resulting WERs on Kannada and Hindi in Table 2 . Note that on Kannada, we found around 7% OOV token rate on clean but around 10-11% on other 3 test sets, and we observed more gains on clean ; presumably because the improved acoustic model could only correct the in-vocabulary word errors, lower OOV rates therefore left more room for improvements. Hindi had around 2.5% OOV rates on each test set, and we found incorporating data augmentation into multilingual ASR led to on average 9.0% relative WER reductions. Overall, we demonstrated the multilingual hybrid ASR with massive data augmentation -via a single graphemic model even without the use of explicit language ID -allowed for relative WER reductions of 11.0% on Kannada and 18.4% on Hindi.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 596, |
| "end": 603, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results with Data Augmentation", |
| "sec_num": "4.4." |
| }, |
| { |
| "text": "Multilingual training have been extensively studied in conventional phonetic hybrid ASR (Lin et al., 2009; Knill et al., 2013) and the recent end-to-end ASR (Watanabe et al., 2017; Toshniwal et al., 2018) . In our work, for the first time, we demonstrate that a multilingual grapheme-based hybrid ASR model can effectively perform language-independent recognition on any within training set languages, and substantially outperform each monolingual ASR alternative. Various data augmentation techniques can yield further complementary improvements. Such single multilingual model can not only provide better ASR performance, but also serves as an alternative to a typical production deployment, which typically includes extensive monolingual ASR systems and a separate language ID model. The proposed approach of building a single multilingual graphemic hybrid ASR model without requiring individual language ID -while being especially competitive in low-resource settings -can greatly simplify the productionizing and maintenance process. Additionally, as compared to the multilingual multitask learning plus monolingual fine-tuning methods in (Huang et al., 2013; Heigold et al., 2013) , our preliminary experimentation shows that our proposed approach above can give comparable performance without requiring separate language ID guidance during decoding. We leave the detailed studies to the future work. Also, future work will expand the language coverage to include both geographically proximal and distant languages.", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 106, |
| "text": "(Lin et al., 2009;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 107, |
| "end": 126, |
| "text": "Knill et al., 2013)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 157, |
| "end": 180, |
| "text": "(Watanabe et al., 2017;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 181, |
| "end": 204, |
| "text": "Toshniwal et al., 2018)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 1144, |
| "end": 1164, |
| "text": "(Huang et al., 2013;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1165, |
| "end": 1186, |
| "text": "Heigold et al., 2013)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5." |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The authors would like to thank Duc Le, Ching-Feng Yeh and Siddharth Shah, all with Facebook, for their invaluable infrastructure assistance and technical discussions. We also thank Yifei Ding and Daniel McKinnon, also at Facebook, for coordinating the ASR language expansion efforts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "6." |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Deep speech 2: End-to-end speech recognition in English and Mandarin", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ananthanarayanan", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Anubhai", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Bai", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Battenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Case", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Casper", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Catanzaro", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "International conference on machine learning", |
| "volume": "", |
| "issue": "", |
| "pages": "173--182", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amodei, D., Ananthanarayanan, S., Anubhai, R., Bai, J., Battenberg, E., Case, C., Casper, J., Catanzaro, B., Cheng, Q., Chen, G., et al. (2016). Deep speech 2: End-to-end speech recognition in English and Mandarin. In Interna- tional conference on machine learning, pages 173-182.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Unicodebased graphemic systems for limited resource languages", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Gales", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "M" |
| ], |
| "last": "Knill", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ragni", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gales, M. J., Knill, K. M., and Ragni, A. (2015). Unicode- based graphemic systems for limited resource languages. In Proc. ICASSP.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Audio set: An ontology and human-labeled dataset for audio events", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "F" |
| ], |
| "last": "Gemmeke", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "P" |
| ], |
| "last": "Ellis", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Freedman", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Lawrence", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "C" |
| ], |
| "last": "Moore", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Plakal", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proc. ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gemmeke, J. F., Ellis, D. P., Freedman, D., Jansen, A., Lawrence, W., Moore, R. C., Plakal, M., and Ritter, M. (2017). Audio set: An ontology and human-labeled dataset for audio events. In Proc. ICASSP.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Applied machine learning at facebook: A datacenter infrastructure perspective", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Hazelwood", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Bird", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Brooks", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Chintala", |
| "suffix": "" |
| }, |
| { |
| "first": "U", |
| "middle": [], |
| "last": "Diril", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Dzhulgakov", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Fawzy", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Kalro", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "IEEE International Symposium on High Performance Computer Architecture (HPCA)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hazelwood, K., Bird, S., Brooks, D., Chintala, S., Diril, U., Dzhulgakov, D., Fawzy, M., Jia, B., Jia, Y., Kalro, A., et al. (2018). Applied machine learning at facebook: A data- center infrastructure perspective. In IEEE International Symposium on High Performance Computer Architecture (HPCA).", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Multilingual acoustic models using distributed deep neural networks", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Heigold", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Vanhoucke", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Senior", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Devin", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heigold, G., Vanhoucke, V., Senior, A., Nguyen, P., Ran- zato, M., Devin, M., and Dean, J. (2013). Multilingual acoustic models using distributed deep neural networks. In Proc. ICASSP.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Cross-language knowledge transfer using multilingual deep neural network with shared hidden layers", |
| "authors": [ |
| { |
| "first": "J.-T", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Gong", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Huang, J.-T., Li, J., Yu, D., Deng, L., and Gong, Y. (2013). Cross-language knowledge transfer using multilingual deep neural network with shared hidden layers. In Proc. ICASSP.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Large-scale multilingual speech recognition with a streaming end-to-end model", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Kannan", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Datta", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "N" |
| ], |
| "last": "Sainath", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Weinstein", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Ramabhadran", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Bapna", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kannan, A., Datta, A., Sainath, T. N., Weinstein, E., Ram- abhadran, B., Wu, Y., Bapna, A., Chen, Z., and Lee, S. (2019). Large-scale multilingual speech recognition with a streaming end-to-end model.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Context-dependent acoustic modeling using graphemes for large vocabulary speech recognition", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Kanthak", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proc. ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kanthak, S. and Ney, H. (2002). Context-dependent acous- tic modeling using graphemes for large vocabulary speech recognition. In Proc. ICASSP.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Towards languageuniversal end-to-end speech recognition", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "L" |
| ], |
| "last": "Seltzer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proc. ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kim, S. and Seltzer, M. L. (2018). Towards language- universal end-to-end speech recognition. In Proc. ICASSP.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Generation of large-scale simulated utterances in virtual rooms to train deep-neural networks for far-field speech recognition in google home", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Misra", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Chin", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Hughes", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Narayanan", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Sainath", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Bacchiani", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proc. Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kim, C., Misra, A., Chin, K., Hughes, T., Narayanan, A., Sainath, T., and Bacchiani, M. (2017). Generation of large-scale simulated utterances in virtual rooms to train deep-neural networks for far-field speech recognition in google home. In Proc. Interspeech.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "P" |
| ], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "The International Conference on Learning Representations (ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kingma, D. P. and Ba, J. (2015). Adam: A method for stochastic optimization. In The International Conference on Learning Representations (ICLR).", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Investigation of multilingual deep neural networks for spoken term detection", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [ |
| "M" |
| ], |
| "last": "Knill", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Gales", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "P" |
| ], |
| "last": "Rath", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "C" |
| ], |
| "last": "Woodland", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "S.-X", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. ASRU", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Knill, K. M., Gales, M. J., Rath, S. P., Woodland, P. C., Zhang, C., and Zhang, S.-X. (2013). Investigation of multilingual deep neural networks for spoken term detec- tion. In Proc. ASRU.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Language independent and unsupervised acoustic models for speech recognition and keyword spotting", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [ |
| "M" |
| ], |
| "last": "Knill", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Gales", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ragni", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "P" |
| ], |
| "last": "Rath", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Knill, K. M., Gales, M. J., Ragni, A., and Rath, S. P. (2014). Language independent and unsupervised acoustic models for speech recognition and keyword spotting. In Proc. Interspeech.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Audio augmentation for speech recognition", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Ko", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Peddinti", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ko, T., Peddinti, V., Povey, D., and Khudanpur, S. (2015). Audio augmentation for speech recognition. In Proc. In- terspeech.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A study on data augmentation of reverberant speech for robust speech recognition", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Ko", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Peddinti", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "L" |
| ], |
| "last": "Seltzer", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proc. ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ko, T., Peddinti, V., Povey, D., Seltzer, M. L., and Khu- danpur, S. (2017). A study on data augmentation of reverberant speech for robust speech recognition. In Proc. ICASSP.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Large scale deep neural network acoustic modeling with semisupervised training data for YouTube video transcription", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Liao", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Mcdermott", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Senior", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. ASRU", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liao, H., McDermott, E., and Senior, A. (2013). Large scale deep neural network acoustic modeling with semi- supervised training data for YouTube video transcription. In Proc. ASRU.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "A study on multilingual acoustic modeling for large vocabulary ASR", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Y.-F", |
| "middle": [], |
| "last": "Gong", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Acero", |
| "suffix": "" |
| }, |
| { |
| "first": "C.-H", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lin, H., Deng, L., Yu, D., Gong, Y.-f., Acero, A., and Lee, C.-H. (2009). A study on multilingual acoustic modeling for large vocabulary ASR. In Proc. ICASSP.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Adapting ASR for under-resourced languages using mismatched transcriptions", |
| "authors": [], |
| "year": null, |
| "venue": "Proc. ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adapting ASR for under-resourced languages using mis- matched transcriptions. In Proc. ICASSP.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "RWTH ASR systems for librispeech: Hybrid vs attention-w/o data augmentation", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "L\u00fcscher", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Beck", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Irie", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Kitza", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Michel", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Zeyer", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Schl\u00fcter", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proc. Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "L\u00fcscher, C., Beck, E., Irie, K., Kitza, M., Michel, W., Zeyer, A., Schl\u00fcter, R., and Ney, H. (2019). RWTH ASR sys- tems for librispeech: Hybrid vs attention-w/o data aug- mentation. In Proc. Interspeech.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Speech recognition with weighted finite-state transducers", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Mohri", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| }, |
| { |
| "first": "Riley", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Springer Handbook of Speech Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "559--584", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohri, M., Pereira, F., and Riley, M. (2008). Speech recognition with weighted finite-state transducers. In Springer Handbook of Speech Processing, pages 559-584. Springer.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Exploring how deep neural networks form phonemic categories", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Nagamine", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "L" |
| ], |
| "last": "Seltzer", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Mesgarani", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nagamine, T., Seltzer, M. L., and Mesgarani, N. (2015). Exploring how deep neural networks form phonemic cat- egories. In Proc. Interspeech.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "SpecAugment: A simple data augmentation method for automatic speech recognition", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "S" |
| ], |
| "last": "Park", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Chan", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "C.-C", |
| "middle": [], |
| "last": "Chiu", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Zoph", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [ |
| "D" |
| ], |
| "last": "Cubuk", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1904.08779" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Park, D. S., Chan, W., Zhang, Y., Chiu, C.-C., Zoph, B., Cubuk, E. D., and Le, Q. V. (2019). SpecAugment: A simple data augmentation method for automatic speech recognition. arXiv preprint arXiv:1904.08779.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "JHU ASPIRE system: Robust LVCSR with tdnns, ivector adaptation and RNN-LMs", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Peddinti", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Manohar", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Ko", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. ASRU", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peddinti, V., Chen, G., Manohar, V., Ko, T., Povey, D., and Khudanpur, S. (2015). JHU ASPIRE system: Robust LVCSR with tdnns, ivector adaptation and RNN-LMs. In Proc. ASRU.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "The Kaldi speech recognition toolkit", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ghoshal", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Boulianne", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Glembek", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Goel", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hannemann", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Motl\u00ed\u010dek", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Schwarz", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proc. ASRU", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Povey, D., Ghoshal, A., Boulianne, G., Burget, L., Glembek, O., Goel, N., Hannemann, M., Motl\u00ed\u010dek, P., Qian, Y., Schwarz, P., et al. (2011). The Kaldi speech recognition toolkit. In Proc. ASRU.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Purely sequence-trained neural networks for ASR based on lattice-free MMI", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Peddinti", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Galvez", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Ghahremani", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Manohar", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Na", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proc. Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Povey, D., Peddinti, V., Galvez, D., Ghahremani, P., Manohar, V., Na, X., Wang, Y., and Khudanpur, S. (2016). Purely sequence-trained neural networks for ASR based on lattice-free MMI. In Proc. Interspeech.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "BUT system for low resource Indian language ASR. Proc. Interspeech", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Pulugundla", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "K" |
| ], |
| "last": "Baskar", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Kesiraju", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Egorova", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Karafi\u00e1t", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "And\u010dernock\u1ef3", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pulugundla, B., Baskar, M. K., Kesiraju, S., Egorova, E., Karafi\u00e1t, M., Burget, L., and\u010cernock\u1ef3, J. (2018). BUT system for low resource Indian language ASR. Proc. In- terspeech.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Neural speech recognizer: Acoustic-to-word LSTM model for large vocabulary speech recognition", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Soltau", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Liao", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Sak", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proc. Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Soltau, H., Liao, H., and Sak, H. (2017). Neural speech recognizer: Acoustic-to-word LSTM model for large vo- cabulary speech recognition. Proc. Interspeech.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "An investigation of multilingual ASR using end-to-end LF-MMI", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Tong", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "N" |
| ], |
| "last": "Garner", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Bourlard", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proc. ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tong, S., Garner, P. N., and Bourlard, H. (2019). An inves- tigation of multilingual ASR using end-to-end LF-MMI. In Proc. ICASSP.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Multilingual speech recognition with a single end-to-end model", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Toshniwal", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "N" |
| ], |
| "last": "Sainath", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "J" |
| ], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Moreno", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Weinstein", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Rao", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proc. ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Toshniwal, S., Sainath, T. N., Weiss, R. J., Li, B., Moreno, P., Weinstein, E., and Rao, K. (2018). Multilingual speech recognition with a single end-to-end model. In Proc. ICASSP.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "The Kaldi OpenKWS system: Improving low resource keyword search", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Trmal", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Wiesner", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Peddinti", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Ghahremani", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Manohar", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proc. Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Trmal, J., Wiesner, M., Peddinti, V., Zhang, X., Ghahremani, P., Wang, Y., Manohar, V., Xu, H., Povey, D., and Khudan- pur, S. (2017). The Kaldi OpenKWS system: Improving low resource keyword search. In Proc. Interspeech.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Transformer-based acoustic modeling for hybrid speech recognition", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Mahadeokar", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Tjandra", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proc. ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang, Y., Mohamed, A., Le, D., Liu, C., Xiao, A., Ma- hadeokar, J., Huang, H., Tjandra, A., Zhang, X., Zhang, F., et al. (2020). Transformer-based acoustic modeling for hybrid speech recognition. In Proc. ICASSP.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Language independent end-to-end architecture for joint language identification and speech recognition", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Watanabe", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Hori", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "R" |
| ], |
| "last": "Hershey", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proc. ASRU", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Watanabe, S., Hori, T., and Hershey, J. R. (2017). Language independent end-to-end architecture for joint language identification and speech recognition. In Proc. ASRU.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Computer-coding the IPA: a proposed extension of SAMPA", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "C" |
| ], |
| "last": "Wells", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wells, J. C. (1995). Computer-coding the IPA: a proposed extension of SAMPA. Revised draft, 4(28):1995.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Automatic speech recognition and topic identification for almost-zero-resource languages", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Wiesner", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Ondel", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Harman", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Manohar", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Trmal", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Dehak", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proc. Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wiesner, M., Liu, C., Ondel, L., Harman, C., Manohar, V., Trmal, J., Huang, Z., Dehak, N., and Khudanpur, S. (2018). Automatic speech recognition and topic iden- tification for almost-zero-resource languages. In Proc. Interspeech.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Treebased state tying for high accuracy acoustic modelling", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "J" |
| ], |
| "last": "Young", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "J" |
| ], |
| "last": "Odell", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "C" |
| ], |
| "last": "Woodland", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Proceedings of the workshop on Human Language Technology", |
| "volume": "", |
| "issue": "", |
| "pages": "307--312", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Young, S. J., Odell, J. J., and Woodland, P. C. (1994). Tree- based state tying for high accuracy acoustic modelling. In Proceedings of the workshop on Human Language Tech- nology, pages 307-312. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Highway long short-term memory rnns for distant speech recognition", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Yaco", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proc. ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, Y., Chen, G., Yu, D., Yaco, K., Khudanpur, S., and Glass, J. (2016). Highway long short-term memory rnns for distant speech recognition. In Proc. ICASSP.", |
| "links": null |
| } |
| }, |
| "ref_entries": {} |
| } |
| } |