| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:33:56.567696Z" |
| }, |
| "title": "Adapting Language Specific Components of Cross-Media Analysis Frameworks to Less-Resourced Languages: the Case of Amharic", |
| "authors": [ |
| { |
| "first": "Yonas", |
| "middle": [], |
| "last": "Woldemariam", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ume\u00e5 University", |
| "location": { |
| "country": "Sweden" |
| } |
| }, |
| "email": "yonasd@cs.umu.se" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Dahlgren", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ume\u00e5 University", |
| "location": { |
| "country": "Sweden" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We present an ASR based pipeline for Amharic that orchestrates NLP components within a cross media analysis framework (CMAF). One of the major challenges that are inherently associated with CMAFs is effectively addressing multilingual issues. As a result, many languages remain under-resourced and fail to leverage out of available media analysis solutions. Although spoken natively by over 22 million people and there is an ever-increasing amount of Amharic multimedia content on the Web, querying them with simple text search is difficult. Searching for, especially audio/video content with simple key words, is even hard as they exist in their raw form. In this study, we introduce a spoken and textual content processing workflow into a CMAF for Amharic. We design an ASR-named entity recognition (NER) pipeline that includes three main components: ASR, a transliterator and NER. We explore various acoustic modeling techniques and develop an OpenNLP-based NER extractor along with a transliterator that interfaces between ASR and NER. The designed ASR-NER pipeline for Amharic promotes the multilingual support of CMAFs. Also, the state-of-the art design principles and techniques employed in this study shed light for other less-resourced languages, particularly the Semitic ones.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We present an ASR based pipeline for Amharic that orchestrates NLP components within a cross media analysis framework (CMAF). One of the major challenges that are inherently associated with CMAFs is effectively addressing multilingual issues. As a result, many languages remain under-resourced and fail to leverage out of available media analysis solutions. Although spoken natively by over 22 million people and there is an ever-increasing amount of Amharic multimedia content on the Web, querying them with simple text search is difficult. Searching for, especially audio/video content with simple key words, is even hard as they exist in their raw form. In this study, we introduce a spoken and textual content processing workflow into a CMAF for Amharic. We design an ASR-named entity recognition (NER) pipeline that includes three main components: ASR, a transliterator and NER. We explore various acoustic modeling techniques and develop an OpenNLP-based NER extractor along with a transliterator that interfaces between ASR and NER. The designed ASR-NER pipeline for Amharic promotes the multilingual support of CMAFs. Also, the state-of-the art design principles and techniques employed in this study shed light for other less-resourced languages, particularly the Semitic ones.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Automatic Speech Recognition (ASR) and Named Entity Recognition (NER) perform information extraction tasks on spoken and textual documents respectively. ASR generates a transcription text from speech data. ASR technologies have been used for many applications such as spoken document indexing and retrieval (Chang et al., 2005; Aichroth et al., 2015; Le et al., 2017) , spoken dialogue systems (Ivanov et al., 2015) , speech translation (St\u00fcker et al., 2011) , and so on. NER is used to identify and extract entity mentions, such as names of people, locations, etc from textual contents. In natural text analysis, NER performs a pre-processing task for downstream annotators (e.g., syntactic parsers (Marneffe et al., 2006) ) and identifies proper nouns and classifies them into known categories (e.g., Person, Place, Organization and so on). While both ASR and NER are essential to solve specific problems in isolation, it is also possible to join them systematically to operate on same media (e.g., a webpage containing text and audio tracks), and apply them in succession to add contextual information on the metadata associated with audio/video contents for semantic search (Aichroth et al., 2015; Le et al., 2017) . Depending on the purpose of the application in question, ASR and NER can be combined in various ways. For example, in cross-media analysis frameworks such as EUMSSI 1 (Event Understanding through Multimodal Social Stream Interpretation) and MICO 2 (Media in Context), their combination is defined as an analysis workflow or analysischain called an ASR-NER pipeline that basically includes speech transcription and named entity extraction services. Within these frameworks there also exist complex multimedia analysis pipelines designed to meet the requirements of complex information retrieval use cases, for instance, searching for video shots, where a person (in the shots) says something about a specific political issue using a keywordsdriven approach. Nowadays, there are plenty of multimedia extraction tools used to make searching web contents convenient. However, most of these tools are developed for well researched and resourced languages such as English and Spanish, and specific domains of applications. Due to this reason, many languages including Amharic, remained undersourced. That severely limits the access of information available in those languages. There are some studies (Abate et al., 2009; Yifiru, 2003; Belay, 2014; Demeke and Hailemariam, 2012) and contributions on building language technologies for Amharic, but most of them are developed as proof-of-concept prototypes with very limited data and resources (Gauthier et al., 2016; ELRA-W0074, 2014; HaBiT, 2016) . As a result, it is often challenging to get computational linguistic resources for Amharic required for either NLP studies or commercial use. Amharic is the official language of Ethiopia, spoken by over 22 million people, also according to the latest census carried out by Central Statistical Agency of Ethiopia 3 , the second most spoken Semitic language next to Arabic. The writing system of Amharic is called \"fidel\"; shared with the other Semitic language of Ethiopia, Tigrinya. Amharic has a unique writing system and its basic alphabet units have a consonant-vowel (CV) syllabic structure, usually vowels are omitted in the written form of CV. There is an everincreasing amount of Amharic digital contents of various types: text, images, audio, video, etc. on the Web due to emerging information sharing platforms such as social media and video hosting sites. However, querying them with simple text search is difficult, especially audio and video contents, is even very hard as they exist in raw format (not well indexed). Thus, obviously it is very demanding to have linguistically motivated multimedia analysis and extraction tools that could potentially deal with language-related concerns and make Amharic contents more searchable through keywords. The most reasonable and affordable solution is to use open-source multi-lingual information extraction frameworks that provide media analysis, extraction and indexing, search and retrieval services, though they require language models of certain types. One of existing open-source media analysis solutions, is the MICO platform, though it is at early stage of its release. Ideally, the platform allows extraction of multimedia contents of different languages using the corresponding language models. Within the platform, there are a number of pre-defined analysis pipelines along with their metadata extractors. The aim of this study is to investigate adapting language specific components of MICO for Amharic. That could potentially be extended to other languages, particularly Semitic ones as they share similar orthography (e.g., Tigrinya) and phonology (e.g., Arabic). Within MICO, there are several natural language dependent multimedia analysis components such as text classification and text language detection including the ASR-NER pipeline. However, we only focus on designing of an ASR-NER pipeline for Amharic using the design principles, the standards and the technologies used in MICO. The pipeline could be considered as the first step to be able to use the MICO platform and for developing other important metadata extractors to analyze Amharic contents. Indeed, the pipeline is useful in itself, at least to index video /audio contents with extracted entities. To completely benefit from the platform more effort is needed in the direction of identifying and adapting other language dependent analysis components, for instance, sentiment analysis. We basically develop Kaldibased acoustic models, a transliterator and an OpenNLP based NER extractor, to build the Amharic ASR-NER pipeline. We got motivated for this study as we are one the partners of the MICO project and responsible for implementing NLP tools. While most of the implementation is done only for English, the MICO architecture allows for the integration of other language models via its API. Nevertheless, it is challenging to adapt MICO to under-resourced languages due to its requirement of trained language models that strictly satisfy the underlying design principles. This presents an opportunity to investigate the possibilities of adapting relevant language models for Amharic. We discuss related works in Section 2., the MICO platform in Section 3., the designed ASR-NER pipeline and the discussion in Section 4., the challenges and solutions in Section 5. and, finally, future work and conclusion in Section 6..", |
| "cite_spans": [ |
| { |
| "start": 307, |
| "end": 327, |
| "text": "(Chang et al., 2005;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 328, |
| "end": 350, |
| "text": "Aichroth et al., 2015;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 351, |
| "end": 367, |
| "text": "Le et al., 2017)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 394, |
| "end": 415, |
| "text": "(Ivanov et al., 2015)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 437, |
| "end": 458, |
| "text": "(St\u00fcker et al., 2011)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 700, |
| "end": 723, |
| "text": "(Marneffe et al., 2006)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1178, |
| "end": 1201, |
| "text": "(Aichroth et al., 2015;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1202, |
| "end": 1218, |
| "text": "Le et al., 2017)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 2415, |
| "end": 2435, |
| "text": "(Abate et al., 2009;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 2436, |
| "end": 2449, |
| "text": "Yifiru, 2003;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 2450, |
| "end": 2462, |
| "text": "Belay, 2014;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 2463, |
| "end": 2492, |
| "text": "Demeke and Hailemariam, 2012)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 2657, |
| "end": 2680, |
| "text": "(Gauthier et al., 2016;", |
| "ref_id": null |
| }, |
| { |
| "start": 2681, |
| "end": 2698, |
| "text": "ELRA-W0074, 2014;", |
| "ref_id": null |
| }, |
| { |
| "start": 2699, |
| "end": 2711, |
| "text": "HaBiT, 2016)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction and Background", |
| "sec_num": "1." |
| }, |
| { |
| "text": "There are a number of papers (Magnini et al., 2013; Hori and Nakamura, 2006) on extraction of named entities on speech transcripts on digital spoken archives for various purposes, though it is hardly possible to get any for Amharic. There are also a few research projects that investigated the introduction of an ASR-NER pipeline in multi-modal cross-media analysis frameworks for different types of languages. We primarily focus on discussing the methods used and the results achieved by these projects, as they probably best put our study into perspective, namely MICO and EUMSSI. In addition to that, although there is no published literature on the task of NER on speech transcription for Amharic, we present a brief review of research works on standalone speech recognition and named entity recognition conducted independently from each other. During the development of MICO metadata extractors, special attention was given to the ASR component due to the fact that most extractors, particularly text analysis components including NER heavily depend on the result produced by the ASR extractor. In order to achieve high-quality speech transcription, state-of-the-art open-source and proprietary libraries for ASR have been well studied and evaluated against sample video contents, then the respective comparative analysis was carried out beforehand. Consequently, Kaldi 4 was chosen based on the criterion of accuracy and other technical reasons. The other good quality of Kaldi is its multi-lingual support. Most of the experiments that make use of Kaldi within MICO were effectively carried out only for English, though the MICO Showcases were planned for Arabic and Italian as well. The most challenging part of training Kaldi is that preparing a parallel corpus (speech and text) is quite costly. Within MICO, the ASR is implemented as a speechto-text pipeline to analyze video content and produce the corresponding text transcription in various formats. The pipeline includes audio-demultiplexing, for extracting and down-sampling the audio signal from video content, speaker diarization (Meignier and Merlin, 2010; Tranter and Reynolds, 2006) for segmenting information along with gender classification and speaker partitioning, speech transcription, for transcribing audio signal into text. The resulting textual content generated from the pipeline is further analyzed by text analysis components including the NER extractor. The NER extractor provides a named entity extraction service on-demand when requested by other registered extractors requiring (depending) on the output produced by it. NER also takes plain text (with a text/plain MIME type) from other possible sources of textual contents such as forum discussion posts after pre-processing and parsing tasks. The NER extractor is based on the OpenNLP toolkit, that is an open-source library providing a NER service. MICO provides OpenNLP-based NER language models for English, German, Spanish and Italian, and allows an integration of models for other languages. The ASR-NER pipeline introduced in MICO performs analysis workflows, for instance, detecting a person in a video, by collaborating with image analysis components such as the face detection extractor. Some preliminary showcases have been demonstrated by the use case partners, for instance, InsideOut10 (one of the use case partners of the MICO project) built a showcase application that retrieves video shots containing a specific person talking about a specific title (Kurz et al., 2015) .", |
| "cite_spans": [ |
| { |
| "start": 29, |
| "end": 51, |
| "text": "(Magnini et al., 2013;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 52, |
| "end": 76, |
| "text": "Hori and Nakamura, 2006)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 2098, |
| "end": 2125, |
| "text": "(Meignier and Merlin, 2010;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 2126, |
| "end": 2153, |
| "text": "Tranter and Reynolds, 2006)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 3505, |
| "end": 3524, |
| "text": "(Kurz et al., 2015)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2." |
| }, |
| { |
| "text": "The EUMSSI platform basically provides multimodal analytics and interpretation services for different types of data obtained from various online media sources. (their demo is available on 5 ). EUMSSI seems to mainly target journalists as end users, automating their time-consuming tasks of organizing information about various events from different online and traditional data sources providing un/structured contents. The platform allows to search multimedia contents aggregated and filtered from media search engines in an interactive fashion, then enriching, contextualizing the media with extracted metadata and retrieves the result with the multimodal approach. The NER component of EUMSSI is based on the Stanford NER (Finkel et al., 2005) , running on the transcription generated by ASR and text extracted by OCR (Optical character recognition) from video contents, in addition to other types of textual contents from news and social media. The transcription returned from the ASR service is normalized by an auxiliary component beforehand. The ASR-NER pipeline implemented in EUMSSI, is used to annotate the speech segments uttered by each speaker shown in a video with the corresponding transcriptions and mentioned names. The resulting information is intended to get combined with the annotations obtained from the face recognition component, that enables video retrieval applications to support different search options, for instance, retrieving quotations of peoples (Le et al., 2017) . There are also several studies on named entity extraction on speech transcripts for independent NLP systems or audio/video analysis frameworks. For example, in the Evalita (evaluation campaign of NLP and Speech tools for Italian) 2011 workshop (Magnini et al., 2013) , one of the tasks was named entity recognition on transcribed broadcast news. The purpose is to investigate the impact of the transcription errors on NLP systems and explore NER approaches that cope with the peculiarities of the resulting transcripts from ASR systems. There are a number of studies on the design and development of ASR and NER systems for Amharic. Relatively, NER is a less researched area than ASR. The survey in (Abate et al., 2009) , summarizes ASR research works attempted for Amharic, ranging from syllable to sentence level detection, from speaker dependent to speaker independent speech recognition. According to the survey most of the works are done using quite similar techniques i.e. HMM (Hidden Markov Model) (Rabiner, 1989) and tools such as HTK (HMM Tool Kit). There is an attempt to develop and integrate an ASR system into the Microsoft Word application to enable it to receive file related commands. The survey also pointed out that the major reasons, why the ASR systems failed to be used in speech applications, to mentions some of them: they are trained on read speech with a limited dataset and fail to handle germination and morphological variations. There are also a few unpublished research works on Amharic NER (Mehamed, 2010; Belay, 2014) . The recent work (Gamb\u00e4ck and Sikdar, 2017) introduced deep learning with the skip-gram word-embedding technique by extending the previous works. The authors in (Gamb\u00e4ck and Sikdar, 2017) developed Amharic NER prototypes using the same method i.e., Conditional Random Fields (Sobhana et al., 2010; Finkel et al., 2005) and the same corpus as in (Mehamed, 2010; Belay, 2014) but different subsets, and obtained different results.", |
| "cite_spans": [ |
| { |
| "start": 724, |
| "end": 745, |
| "text": "(Finkel et al., 2005)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1479, |
| "end": 1496, |
| "text": "(Le et al., 2017)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1743, |
| "end": 1765, |
| "text": "(Magnini et al., 2013)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 2198, |
| "end": 2218, |
| "text": "(Abate et al., 2009)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 2504, |
| "end": 2519, |
| "text": "(Rabiner, 1989)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 3019, |
| "end": 3034, |
| "text": "(Mehamed, 2010;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 3035, |
| "end": 3047, |
| "text": "Belay, 2014)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 3066, |
| "end": 3092, |
| "text": "(Gamb\u00e4ck and Sikdar, 2017)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 3210, |
| "end": 3236, |
| "text": "(Gamb\u00e4ck and Sikdar, 2017)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 3324, |
| "end": 3346, |
| "text": "(Sobhana et al., 2010;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 3347, |
| "end": 3367, |
| "text": "Finkel et al., 2005)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 3394, |
| "end": 3409, |
| "text": "(Mehamed, 2010;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 3410, |
| "end": 3422, |
| "text": "Belay, 2014)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Basically the MICO plaform provides media analysis, metadata publishing, search and recommendation services (described in (Aichroth et al., 2015) ). It has three types of metadata extractors, textual extractors for performing linguistic analysis such as parsing, sentiment analysis and text classification, image extractors for performing image analysis for detecting and human faces and animals from images, audio extractors for performing different speech analysis tasks such as detecting whether audio signals contain music or speech, and extracting audio tracks from video content and producing a transcription. Metadata extractors interact and collaborate with each other in automatic fashion via a service orchestration component (aka broker) to put a media resource in context. Several semantic web technologies such as Apache Marmotta 6 and SPARQL-MM 7 are used for storing the metadata annotation of analysis results in a RDF format and querying the metadata respectively. The Apache Hadoop 8 distributed file system is used for binary data, and Apache Solr 9 for fulltext searching.", |
| "cite_spans": [ |
| { |
| "start": 122, |
| "end": 145, |
| "text": "(Aichroth et al., 2015)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The MICO Platform", |
| "sec_num": "3." |
| }, |
| { |
| "text": "The Amharic ASR-NER pipeline designed in this study includes three main components: ASR, a transliterator and NER (see Figure 2 ). The pipeline performs extracting named mentioned from audio and video contents. Within the MICO architecture, the core ASR component needs to be connected with pre-processing and post-processing components, that forms a speech-to-text sub-pipeline. There are two pre-processing components, namely audidemux and LIUM diarization. The former does extracting audio tracks from a video input and down-sampling the audio tracks. The later does segmenting the audio tracks into smaller units using gender and speaker information. The post-processing component, namely XML2text transforms the output file (in the text/xml format) generated by the core ASR component to plain text (text/plain) required by the NER component.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 119, |
| "end": 127, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Amharic ASR-NER Pipeline", |
| "sec_num": "4." |
| }, |
| { |
| "text": "We explored and applied three different acoustic modeling techniques, namely GMM-HMMs ( Gaussian mixture model-hidden Markov model), DNN (Deep Neural Networks) and SGMM-HMMs (Subspace Gaussian Mixture model) to build the Amharic ASR. The Kaldi (Povey et al., 2011b) framework is used as an open speech recognition toolkit. While DNN-HMM is the state-of-the-art ASR modeling technique, SGMM-HMM (Povey et al., 2011a) Figure 1: The Architecture of the MICO Platform is an extension of GMM-HMM that is one of conventional acoustic modeling approaches. In HMM-based ASR systems, (S)GMMs and DNN estimate probability distributions of phonemes over HMM states given observations (acoustic inputs). During training they compute model parameters (e.g, mean vectors and covariance matrices) from training data, (S)GMMs use the expectation maximization algorithm, whereas DNN uses stochastic gradient descent and back-propagation to adjust weights and biases. As a result, three acoustic models have been built using each technique with a parallel speech-transcription corpus (Gauthier et al., 2016), a pronunciation lexicon and a language model. Originally, the raw corpus was prepared for the study in (Tachbelie et al., 2014) , it is about 20 and 2 hours of speech for training and testing respectively. We built a 5-gram language model using the SRILM 10 language modeling toolkit with the Kneser-Ney smoothing method. All the three acoustic models are trained with 13 Melfrequency cepstrum coefficients (MFCCs) features, followed by linear discriminant analysis (LDA) and transformation, maximum likelihood transform (MLLT). Also, feature-space maximum likelihood linear regression (fM-LLR) has been used as a speaker adaptation technique. The models are evaluated on the same test set containing 6203 words using the Word Error Rate (WER) metric, and obtained a WER of 50.88%, 38.72%, and 46.25% for GMM-HMM, DNN-HMM, SGMM-HMM respectively.", |
| "cite_spans": [ |
| { |
| "start": 244, |
| "end": 265, |
| "text": "(Povey et al., 2011b)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 394, |
| "end": 415, |
| "text": "(Povey et al., 2011a)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 1194, |
| "end": 1218, |
| "text": "(Tachbelie et al., 2014)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Building the Amharic ASR", |
| "sec_num": "4.1." |
| }, |
| { |
| "text": "The experimental results obtained from the ASR models evaluation show that the DNN-HMM model outperforms 10 http://www.speech.sri.com/projects/srilm/ than GMM-HMM and SGMM-HMM models, with a WER of 12.16% and 7.53% respectively. The SGMM-HMM model in turn outperforms GMM-HMM with a WER of 4.63%. In our experiments, the GMM-HMM acoustic model gets trained with utterance-level transcriptions, the resulting model is used to generate phone alignments for DNN training. For that reason, the DNN acoustic model appears to have the best performance (regarding WER). DNNs also have the ability to capture larger context (larger window of frames), for example, the DNN in this study, is trained with 5 preceding and 5 following frames. Moreover, the number of model parameters (weights) computed by DNN is extremely larger than (S)GMMs, that potentially help learn the complex relationship between acoustic features extracted from input speech signal and their associated sequence of phonemes. For (S)GMMs, the training data seems to be too small to effectively model the distributions of acoustic units and generalize for new input data. Compared with state-of-the-art ASR systems built for other languages (Wang and Zheng, 2015; Xiong et al., 2018; Ghahremani et al., 2017) , for instance, authors in (Xiong et al., 2018 ) achieved a 5.1% of WER, that suggests more tasks are needed to improve our ASR. Unlike these studies where a large amount of data is used to train acoustic models, in our study the amount of training data is limited to 20 hours. Basically, the results obtained in this study could be improved by increasing the size of the training data, including a large vocabulary to deal with the problem of out-of-vocabulary (OOV) and language models with different size of n-gram (e.g., (n=3 to 7)). However, preparing such resources is quite expensive and time-consuming, especially for less studied and under-resourced languages like Amharic. Therefore, adapting from pre-trained acous- Figure 2 : The Amharic ASR-NER Pipeline within a Cross-media Analysis Framework tic models trained on other languages, particularly well resourced ones, seems to be more reasonable. Besides, multilingual model training (Ghoshal et al., 2013; Wang and Zheng, 2015; Feng and Lee, 2018) could be considered, where under multiple languages (including less-resourced ones) get trained together. Then, the resulting acoustic model can be used to produce speech transcriptions of any of these languages. However, it, in turn, requires a huge amount of multilingual resources including parallel speechtext corpora, language models and pronunciation dictionaries (Besacier et al., 2014; Wang and Zheng, 2015) . The languages also need to be related and share the same phone set. In practice, it is too difficult to meet these requirements, especially the problem gets worse when it comes to Amharic and other Semitic languages as they are not yet studied using this approach. The other alternative approach is transfer learning (Wang and Zheng, 2015; Huang et al., 2013; Ghahremani et al., 2017) , that allows an acoustic model trained on one language to get adapted to other languages. That is possible via sharing parameters learned during neural net based model training of one language to others.", |
| "cite_spans": [ |
| { |
| "start": 1203, |
| "end": 1225, |
| "text": "(Wang and Zheng, 2015;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 1226, |
| "end": 1245, |
| "text": "Xiong et al., 2018;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 1246, |
| "end": 1270, |
| "text": "Ghahremani et al., 2017)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1298, |
| "end": 1317, |
| "text": "(Xiong et al., 2018", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 2217, |
| "end": 2239, |
| "text": "(Ghoshal et al., 2013;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 2240, |
| "end": 2261, |
| "text": "Wang and Zheng, 2015;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 2262, |
| "end": 2281, |
| "text": "Feng and Lee, 2018)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 2652, |
| "end": 2675, |
| "text": "(Besacier et al., 2014;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 2676, |
| "end": 2697, |
| "text": "Wang and Zheng, 2015)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 3017, |
| "end": 3039, |
| "text": "(Wang and Zheng, 2015;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 3040, |
| "end": 3059, |
| "text": "Huang et al., 2013;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 3060, |
| "end": 3084, |
| "text": "Ghahremani et al., 2017)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1998, |
| "end": 2006, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion of ASR Acoustic Models", |
| "sec_num": "4.1.1." |
| }, |
| { |
| "text": "Given the very limited alternative choices for Amharic NER models, we used the NER model developed as part of the master thesis by Belay M. (Belay, 2014) . As the original model was built in a format which is incompatible with other meta-data extractors within MICO, the data needs to be re-labeled manually to train the OpenNLP name finder models. That is the right format supported by MICO. However, it was possible only to label the small portion of the whole data used in (Belay, 2014) with only the following entities: persons, locations and organizations. The models are trained using machine learning algorithms provided by OpenNLP: MaxEntropy (Berger et al., 1996) and Perceptron (Kazama and Torisawa, 2007) . As shown in Table 1 , the Perceptron based model outperforms the MaxEntropy based model, regarding all considered metrics. As both the training and testing sets are quite small (compared to the standard requirement, i.e., 15K sentences, but here the models are trained on 420 sentences and evaluated on 45 sentences along with 126 entities) for generalization, the evaluation details are not included in this study. In order to use the models, we then developed a Java-based application that loads the NER models and extracts named mentions from speech transcriptions. While the NER models are trained on the transliterated form of Amharic text, the ASR acoustic models are trained on transcripts with the actual Amharic orthography. Because it seems to be most open-source NLP research tools are primarily designed for English, Amharic NLP studies tend to use an Amharic-English transliteration scheme (Sebsibe et al., 2004) in their prototype development. In order to support the interfacing of ASR with NER, we implemented a simple rule-based transliteration program that converts Amharic scripts to its corresponding English transliteration form.", |
| "cite_spans": [ |
| { |
| "start": 140, |
| "end": 153, |
| "text": "(Belay, 2014)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 476, |
| "end": 489, |
| "text": "(Belay, 2014)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 651, |
| "end": 672, |
| "text": "(Berger et al., 1996)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 688, |
| "end": 715, |
| "text": "(Kazama and Torisawa, 2007)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 1621, |
| "end": 1643, |
| "text": "(Sebsibe et al., 2004)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 730, |
| "end": 737, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Amharic NER Extractor and Transliterator", |
| "sec_num": "4.2." |
| }, |
| { |
| "text": "Since the main goal of this research is to make lessresourced languages beneficial out of media analysis technologies built for resource rich languages, by dealing with issues related with scarcity of computational linguistic resources, most of the challenges faced in the course of the study is inherently associated with the lack of resources. In addition, we assumed that the resources that have been available can be modified with reasonable amount of configuration tasks and then would fit to the designed experimental settings, but a number of evaluations (compatibility tests) have shown that they turned out to require to get transformed with much amount of works. For example, re-labeling the NER dataset, improving the quality of the acoustic models and so on. As part of our study, we also observed major important issues that arise from the natural language perspective during the adaptation of MICO for Amharic. The issues are very important for other new languages as well to be considered in advance. That mostly include availability of compatible language dependent analysis components and other pre/post processing auxiliary utilities (e.g., language detection, file format adaptors). In order to effectively meet the compatibility requirements (e.g., data models, file formats), one needs to closely look at the synergies and the dependencies between all meta-data extractors. Although MICO aims to provide an open data model via its API, at the current stage of its implementation new languages are required to strictly adhere some specifications, for example, while NER models need to be in an OpenNLP based, ASR acoustic models in deep neural net. Among other important language specific components ASR and NER seem to be very foundational and take a high priority, as others downstream extractors such as sentiment analysis, text classification and topic detection rely on the quality provided by the ASR-NER chain. The other problem is related with computational resources, training the DNN-HMM model has been challenging due to the requirement of GPU processors along with the queue scheduling service configuration. Although it is extremely slow, the training has been done on our CPU machine with a slight job-scheduling configuration task. Lastly, it concerns the interfacing Amharic ASR with NER. The transcription generated by ASR is in the actual orthographic form of Amharic, where as the NER models are trained on an English-transliteration form. Thus, to support the NER models a simple rule-based transliteration program has been written.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Challenges and Solutions", |
| "sec_num": "5." |
| }, |
| { |
| "text": "We identified language dependent analysis components that are viewed as a high priority including ASR and NER, within a cross-media analysis platform. We designed an ASR-NER analysis pipeline for Amharic based on state-ofthe art design principles and techniques employed in crossmedia solutions, thus promoting the multi-lingual support of the MICO platform. Moreover, this study provides a chance to further explore ASR methods introduced to potentially support under-resourced languages such as transfer learning. Moreover,the quality of both the ASR and NER models can be enhanced with availability of more data and improve the transliteration phase to reasonable quality in the future. Also, as this study has been done during the early release stages of the MICO platform for English, it has been hard to fully support Amharic for more detailed experiments. However, for future it would be interesting to carry out additional evaluations across other parts of the pipeline. Generally, other languages somehow take advantages of the methods proposed here, especially those that share a similar orthographic structure with Amharic, such as Tigrinya. Also, the method can be easily extended for other Semitic languages such as Arabic and Hebrew.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6." |
| }, |
| { |
| "text": "https://www.eumssi.eu 2 https://www.mico-project.eu", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://www.csa.gov.et", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://kaldi-asr.org", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://demo.eumssi.eu/demo/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://marmotta.apache.org 7 http://marmotta.apache.org/kiwi/sparql-mm.html 8 http://hadoop.apache.org 9 http://lucene.apache.org/solr/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We acknowledge the financial support from the EU FP7 MICO project. We also thank Mikyas Belay for providing Amharic NER models used in this study.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": "7." |
| }, |
| { |
| "text": "ter's thesis, School of Information Studies for Africa, Addis Ababa University. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Amharic speech recognition: Past, present and future", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Abate", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Tachbelie", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Menze", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 16th International Conference of Ethiopian Studies", |
| "volume": "", |
| "issue": "", |
| "pages": "1391--1401", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abate, S., Tachbelie, M., and Menze, W. (2009). Amharic speech recognition: Past, present and future. In Proceed- ings of the 16th International Conference of Ethiopian Studies, pages 1391-1401.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Mico-media in context", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Aichroth", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Weigel", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Kurz", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Stadler", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Drewes", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Bj\u00f6rklund", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Schlegel", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Berndl", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Perez", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Bowyer", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Volpini", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of 2015 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--4", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aichroth, P., Weigel, C., Kurz, T., Stadler, H., Drewes, F., Bj\u00f6rklund, J., Schlegel, K., Berndl, E., Perez, A., Bowyer, A., and Volpini, A. (2015). Mico-media in con- text. In Proceedings of 2015 IEEE International Con- ference on Multimedia and Expo Workshops (ICMEW), pages 1-4.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Amharic Named Entity Recognition Using a Hybrid Approach", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Belay", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Belay, M. (2014). Amharic Named Entity Recognition Us- ing a Hybrid Approach. Master's thesis, School of Infor- mation Informatics, Addis Ababa University.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A maximum entropy approach to natural language processing", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "L" |
| ], |
| "last": "Berger", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "D" |
| ], |
| "last": "Pietra", |
| "suffix": "" |
| }, |
| { |
| "first": "Pietra", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [ |
| "J D" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Computational Linguistics", |
| "volume": "22", |
| "issue": "", |
| "pages": "39--71", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Berger, A. L., Pietra, S. D., and Pietra, V. J. D. (1996). A maximum entropy approach to natural language process- ing. Computational Linguistics, 22:39-71.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Automatic speech recognition for underresourced languages", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Besacier", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Barnard", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Karpov", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Schultz", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Speech Communication", |
| "volume": "56", |
| "issue": "", |
| "pages": "85--100", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Besacier, L., Barnard, E., Karpov, A., and Schultz, T. (2014). Automatic speech recognition for under- resourced languages. Speech Communication, 56:85- 100.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Combining text and audio-visible features in video indexing", |
| "authors": [ |
| { |
| "first": "S.-F", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Manmatha", |
| "suffix": "" |
| }, |
| { |
| "first": "T.-S", |
| "middle": [], |
| "last": "Chua", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Acoustics, Speech, and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1005--1008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chang, S.-F., Manmatha, R., and Chua, T.-S. (2005). Com- bining text and audio-visible features in video index- ing. In Acoustics, Speech, and Signal Processing, pages 1005-1008.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Duration modeling of phonemes for amharic text to speech system", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Demeke", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Hailemariam", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the International Conference on Management of Emergent Digital EcoSystems", |
| "volume": "", |
| "issue": "", |
| "pages": "1--7", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Demeke, Y. and Hailemariam, S. (2012). Duration mod- eling of phonemes for amharic text to speech system. In Proceedings of the International Conference on Manage- ment of Emergent Digital EcoSystems, pages 1-7.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Improving cross-lingual knowledge transferability using multilingual tdnn-blstm with language-dependent pre-final layer", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "2439--2443", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Feng, S. and Lee, T. (2018). Improving cross-lingual knowledge transferability using multilingual tdnn-blstm with language-dependent pre-final layer. In Proceedings of Interspeech, pages 2439-2443.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Incorporating non-local information into information extraction systems by gibbs sampling", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "R" |
| ], |
| "last": "Finkel", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Grenager", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 43rd annual meeting on association for computational linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "363--370", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Finkel, J. R., Grenager, T., and Manning, C. (2005). Incor- porating non-local information into information extrac- tion systems by gibbs sampling. In Proceedings of the 43rd annual meeting on association for computational linguistics, pages 363-370.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Named entity recognition for amharic using deep learning", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Gamb\u00e4ck", |
| "suffix": "" |
| }, |
| { |
| "first": "U", |
| "middle": [ |
| "K" |
| ], |
| "last": "Sikdar", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "2017 IST-Africa Week Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gamb\u00e4ck, B. and Sikdar, U. K. (2017). Named entity recognition for amharic using deep learning. In 2017 IST-Africa Week Conference (IST-Africa), pages 1-8.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Investigation of transfer learning for asr using lf-mmi trained neural networks", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Ghahremani", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Manohar", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Hadian", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of 2017 IEEE Automatic Speech Recognition and Understanding Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "279--286", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ghahremani, P., Manohar, V., Hadian, H., Povey, D., and Khudanpur, S. (2017). Investigation of transfer learning for asr using lf-mmi trained neural networks. In Pro- ceedings of 2017 IEEE Automatic Speech Recognition and Understanding Workshop, pages 279-286.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Multilingual training of deep neural networks", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ghoshal", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Swietojanski", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Renals", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of 2013 IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "7319--7323", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ghoshal, A., Swietojanski, P., and Renals, S. (2013). Mul- tilingual training of deep neural networks. In Proceed- ings of 2013 IEEE International Conference on Acous- tics, Speech and Signal Processing, pages 7319-7323.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "An extremely large vocabulary approach to named entity extraction from speech", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Hori", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "2006 IEEE International Conference on Acoustics Speech and Signal Processing Proceedings", |
| "volume": "", |
| "issue": "", |
| "pages": "973--976", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hori, T. and Nakamura, A. (2006). An extremely large vocabulary approach to named entity extraction from speech. In 2006 IEEE International Conference on Acoustics Speech and Signal Processing Proceedings, pages 973-976.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Cross-language knowledge transfer using multilingual deep neural network with shared hidden layers", |
| "authors": [ |
| { |
| "first": "J.-T", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Gong", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of 2013 IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "7304--7308", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Huang, J.-T., Li, J., Yu, D., Deng, L., and Gong, Y. (2013). Cross-language knowledge transfer using mul- tilingual deep neural network with shared hidden lay- ers. In Proceedings of 2013 IEEE International Confer- ence on Acoustics, Speech and Signal Processing, pages 7304-7308.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Automated speech recognition technology for dialogue interaction with non-native interlocutors", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "V" |
| ], |
| "last": "Ivanov", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Ramanarayanan", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Suendermann-Oeft", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Lopez", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Evanini", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Tau", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 16th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "134--138", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivanov, A. V., Ramanarayanan, V., Suendermann-Oeft, D., Lopez, M., Evanini, K., and Tau, J. (2015). Automated speech recognition technology for dialogue interaction with non-native interlocutors. In Proceedings of the 16th Annual Meeting of the Special Interest Group on Dis- course and Dialogue, pages 134-138.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A new perceptron algorithm for sequence labeling with non-local features", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kazama", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Torisawa", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "315--324", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kazama, J. and Torisawa, K. (2007). A new perceptron al- gorithm for sequence labeling with non-local features. In Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Compu- tational Natural Language Learning (EMNLP-CoNLL), pages 315-324.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Enabling access to linked media with sparql-mm", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Kurz", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Schlegel", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Kosch", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 24th International Conference on World Wide Web", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kurz, T., Schlegel, K., and Kosch, H. (2015). Enabling access to linked media with sparql-mm. In Proceedings of the 24th International Conference on World Wide Web.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Towards large scale multimedia indexing: A case study on person discovery in broadcast news", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Bredin", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Sargent", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "India", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Lopez-Otero", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Barras", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Guinaudeau", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Gravier", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [ |
| "B" |
| ], |
| "last": "Da Fonseca", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [ |
| "L" |
| ], |
| "last": "Freire", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [ |
| "K G" |
| ], |
| "last": "Do Patroc\u00ednio", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "J F" |
| ], |
| "last": "Guimar\u00e3es", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Mart\u00ed", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "R" |
| ], |
| "last": "Morros", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Hernando", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [ |
| "D" |
| ], |
| "last": "Fern\u00e1ndez", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Garc\u00eda-Mateo", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Meignier", |
| "suffix": "" |
| }, |
| { |
| "first": "J.-M", |
| "middle": [], |
| "last": "Odobez", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of International Workshop on Content-Based Multimedia Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "1--6", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Le, N., Bredin, H., Sargent, G., India, M., Lopez-Otero, P., Barras, C., Guinaudeau, C., Gravier, G., da Fonseca, G. B., Freire, I. L., do Patroc\u00ednio, Z. K. G., Guimar\u00e3es, S. J. F., Mart\u00ed, G., Morros, J. R., Hernando, J., Fern\u00e1n- dez, L. D., Garc\u00eda-Mateo, C., Meignier, S., and Odobez, J.-M. (2017). Towards large scale multimedia indexing: A case study on person discovery in broadcast news. In Proceedings of International Workshop on Content- Based Multimedia Retrieval, pages 1-6.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Evaluation of natural language and speech tools for italian", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Magnini", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Cutugno", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Falcone", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Pianta", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Lecture Notes in Computer Science", |
| "volume": "", |
| "issue": "", |
| "pages": "98--106", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Magnini, B., Cutugno, F., Falcone, M., and Pianta, E. (2013). Evaluation of natural language and speech tools for italian. In Lecture Notes in Computer Science, pages 98-106.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Generating typed dependency parses from phrase structure parses", |
| "authors": [ |
| { |
| "first": "M.-C", |
| "middle": [], |
| "last": "Marneffe", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Maccartney", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "InProc. 5th Interna-tional Conference on Language Resources and Evaluation (LREC 2006)", |
| "volume": "", |
| "issue": "", |
| "pages": "449--454", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marneffe, M.-C., MacCartney, B., and Manning, C. (2006). Generating typed dependency parses from phrase struc- ture parses. In InProc. 5th Interna-tional Conference on Language Resources and Evaluation (LREC 2006), pages 449-454.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Amharic Named Entity Recognition", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Mehamed", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mehamed, M. (2010). Amharic Named Entity Recogni- tion. Master's thesis, College of Natural Sciences, Addis Ababa University.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Lium spkdiarization: An open source toolkit for diarization", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Meignier", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Merlin", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "CMU SPUD Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Meignier, S. and Merlin, T. (2010). Lium spkdiarization: An open source toolkit for diarization. In CMU SPUD Workshop.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "The subspace gaussian mixture model -a structured model for speech recognition", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Agarwal", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Akyazi", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ghoshal", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Glembek", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "K" |
| ], |
| "last": "Goel", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Karafi\u00e1t", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Rastrow", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "C" |
| ], |
| "last": "Rose", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Schwarz", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Computer Speech and Language", |
| "volume": "25", |
| "issue": "2", |
| "pages": "404--439", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Povey, D., Burget, L., Agarwal, M., Akyazi, P., Feng, K., Ghoshal, A., Glembek, O., Goel, N. K., Karafi\u00e1t, M., Rastrow, A., Rose, R. C., Schwarz, P., and Thomas, S. (2011a). The subspace gaussian mixture model -a struc- tured model for speech recognition. Computer Speech and Language, 25(2):404-439.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "The Kaldi speech recognition toolkit", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ghoshal", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Boulianne", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Glembek", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Goel", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hannemann", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Motlicek", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Schwarz", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Silovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Stemmer", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Vesely", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of IEEE 2011 Workshop on Automatic Speech Recognition and Understanding", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Povey, D., Ghoshal, A., Boulianne, G., Burget, L., Glem- bek, O., Goel, N., Hannemann, M., Motlicek, P., Qian, Y., Schwarz, P., Silovsky, J., Stemmer, G., and Vesely, K. (2011b). The Kaldi speech recognition toolkit. In Pro- ceedings of IEEE 2011 Workshop on Automatic Speech Recognition and Understanding.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "A tutorial on hidden Markov models and selected applications in speech recognition. Proceedings of the", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [ |
| "R" |
| ], |
| "last": "Rabiner", |
| "suffix": "" |
| } |
| ], |
| "year": 1989, |
| "venue": "IEEE", |
| "volume": "77", |
| "issue": "2", |
| "pages": "257--286", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rabiner, L. R. (1989). A tutorial on hidden Markov mod- els and selected applications in speech recognition. Pro- ceedings of the IEEE, 77(2):257-286.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Unit selection voice for amharic using festvox", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Sebsibe", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Prahallad", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Alan", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Rohit", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajeev", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Fifth ISCA Workshop on Speech Synthesis", |
| "volume": "", |
| "issue": "", |
| "pages": "103--107", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebsibe, H., Prahallad, K., Alan, B., Rohit, K., and Ra- jeev, S. (2004). Unit selection voice for amharic using festvox. In Fifth ISCA Workshop on Speech Synthesis, pages 103-107.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Conditional random field based named entity recognition in geological text", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Sobhana", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Mitra", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ghosh", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "International Journal of Computer Applications", |
| "volume": "1", |
| "issue": "3", |
| "pages": "143--147", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sobhana, N., Mitra, P., and Ghosh, S. (2010). Conditional random field based named entity recognition in geolog- ical text. International Journal of Computer Applica- tions, 1(3):143-147.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Quaero speech-to-text and text translation evaluation systems", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "St\u00fcker", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Kilgour", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Niehues", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "High Performance Computing in Science and Engineering'10", |
| "volume": "", |
| "issue": "", |
| "pages": "529--542", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "St\u00fcker, S., Kilgour, K., and Niehues, J. (2011). Quaero speech-to-text and text translation evaluation systems. In High Performance Computing in Science and Engineer- ing'10, pages 529-542. Springer.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Using different acoustic, lexical and language modeling units for asr of an under-resourced language -amharic", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Tachbelie", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Abate", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Besacier", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Speech Communication", |
| "volume": "56", |
| "issue": "", |
| "pages": "181--194", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tachbelie, M., Abate, S., and Besacier, L. (2014). Using different acoustic, lexical and language modeling units for asr of an under-resourced language -amharic. Speech Communication, 56:181-194.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "An overview of automatic speaker diarization systems", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Tranter", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "A" |
| ], |
| "last": "Reynolds", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "IEEE Transactions on Audio, Speech, and Language Processing", |
| "volume": "14", |
| "issue": "", |
| "pages": "1557--1565", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tranter, S. and Reynolds, D. A. (2006). An overview of au- tomatic speaker diarization systems. IEEE Transactions on Audio, Speech, and Language Processing, 14:1557- 1565.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Transfer learning for speech and language processing", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "F" |
| ], |
| "last": "Zheng", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang, D. and Zheng, T. F. (2015). Transfer learning for speech and language processing. In Proceedings of 2015", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Signal and Information Processing Association Annual Summit and Conference", |
| "authors": [ |
| { |
| "first": "Asia-Pacific", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1225--1237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Asia-Pacific Signal and Information Processing Associa- tion Annual Summit and Conference, pages 1225-1237.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "The microsoft 2017 conversational speech recognition system", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Alleva", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Droppo", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Stolcke", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "5934--5938", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiong, W., Wu, L., Alleva, F., Droppo, J., Huang, X., and Stolcke, A. (2018). The microsoft 2017 conversational speech recognition system. In Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Sig- nal Processing, pages 5934-5938.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Automatic Amharic Speech Recognition System to Command and Control Computers", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Yifiru", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yifiru, M. (2003). Automatic Amharic Speech Recogni- tion System to Command and Control Computers. Mas-", |
| "links": null |
| } |
| }, |
| "ref_entries": {} |
| } |
| } |