| { |
| "paper_id": "2019", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:49:05.537810Z" |
| }, |
| "title": "Yor\u00f9b\u00e1 Gender Recognition from Speech using Attention-based BiLSTM", |
| "authors": [ |
| { |
| "first": "Ibukunola", |
| "middle": [ |
| "A" |
| ], |
| "last": "Modupe", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Vaal University of Technology", |
| "location": {} |
| }, |
| "email": "ibukunolam@vut.ac.za" |
| }, |
| { |
| "first": "Tshephisho", |
| "middle": [ |
| "J" |
| ], |
| "last": "Sefara", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Next Generation Enterprises and Institutions Council for Scientific and Industrial Research", |
| "institution": "", |
| "location": {} |
| }, |
| "email": "tsefara@csir.co.za" |
| }, |
| { |
| "first": "Sunday", |
| "middle": [ |
| "O" |
| ], |
| "last": "Ojo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tshwane University of Technology", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Gender recognition in speech processing is one of the most challenging tasks. While many studies rely on extracting features and designing enhancement classifiers, classification accuracy is still not satisfactory. The remarkable improvement in performance achieved through the use of neural networks for automatic speech recognition has encouraged the use of deep neural networks in other voice techniques such as speech, emotion, language and gender recognition. An earlier study showed a significant improvement in the gender recognition of pictures and videos. In this paper, speech is used to create a gender recognition scheme based on neural networks. Attention-based BiLSTM architecture is proposed to discover the best approach for gender identification in Yor\u00f9b\u00e1. Acoustic features, including time, frequency, and cepstral features are extracted to train the model. The model obtained the state-of-the-art performance in speech-based gender recognition with 99% accuracy and F 1 score.", |
| "pdf_parse": { |
| "paper_id": "2019", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Gender recognition in speech processing is one of the most challenging tasks. While many studies rely on extracting features and designing enhancement classifiers, classification accuracy is still not satisfactory. The remarkable improvement in performance achieved through the use of neural networks for automatic speech recognition has encouraged the use of deep neural networks in other voice techniques such as speech, emotion, language and gender recognition. An earlier study showed a significant improvement in the gender recognition of pictures and videos. In this paper, speech is used to create a gender recognition scheme based on neural networks. Attention-based BiLSTM architecture is proposed to discover the best approach for gender identification in Yor\u00f9b\u00e1. Acoustic features, including time, frequency, and cepstral features are extracted to train the model. The model obtained the state-of-the-art performance in speech-based gender recognition with 99% accuracy and F 1 score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Gender recognition is an important topic in signal processing and can be applied in mobile healthcare system (Alhussein et al., 2016) , facial recognition (Hwang et al., 2009) , and age classification (Chen et al., 2011) . Applications of gender recognition system includes tasks such as (Mukherjee and Liu, 2010) : (i) Verifying a customer when making telephone bank transaction, (ii) Security measure when retrieving confidential information, (iii) Forensic, (iv) Surveillance, (v) and Blog authorship. Recognition of gender from the speech is a challenging task with these increasing number of systems in real-life. Recent hardware and software development allowed new techniques and methods to be explored to improve the efficiency of gender recognition systems. Gender classification systems from speech signal are affected by the performance of the recording tools, the language of the speaker, and noisy recording settings. As a result, to obtain adequate classification results, gender recognition from speech signals requires valid classifiers and feature extractors. In the areas of machine learning and computer vision, deep neural networks (DNNs) have shown notable achievements (Moghaddam and Ming-Hsuan Yang, 2000; Hwang et al., 2009) . Deep neural networks, after thorough training, can effectively extract and classify different feature sets. DNNs are most effective when the training set contains a complicated feature space that needs high-level representation. In this paper, deep recurrent neural networks (DRNNs) are used as classifiers and genderrecognition extractors. Bidirectional long-short term memory (BiLSTM) is combined with an attention mechanism to learn the features. Because gender recognition is a binary classification, a sigmoid activation function has been used to classify the gender.", |
| "cite_spans": [ |
| { |
| "start": 109, |
| "end": 133, |
| "text": "(Alhussein et al., 2016)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 155, |
| "end": 175, |
| "text": "(Hwang et al., 2009)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 201, |
| "end": 220, |
| "text": "(Chen et al., 2011)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 288, |
| "end": 313, |
| "text": "(Mukherjee and Liu, 2010)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1191, |
| "end": 1228, |
| "text": "(Moghaddam and Ming-Hsuan Yang, 2000;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 1229, |
| "end": 1248, |
| "text": "Hwang et al., 2009)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Gender recognition systems for well-resourced languages like English are available, but for African languages like Yor\u00f9b\u00e1 are not available. Yor\u00f9b\u00e1 is a Niger-Congo language related to Igala, Edo, Ishan, and Igbo amongst others. It is one of the official languages of Nigeria and spoken in a couple of countries on the West African coast. An estimated 20+ million people speak Yor\u00f9b\u00e1 as their first language in southwestern Nigeria and more in the Republics of Benin and Togo. Yor\u00f9b\u00e1 is also spoken by diaspora communities of traders in Cote d'Ivoire, Ghana, Senegal and the Gambia, and it used to be a vibrant language in Freetown, Sierra Leone. Outside West Africa, millions of people have Yor\u00f9b\u00e1 language and culture as part of their heritage; Yor\u00f9b\u00e1 religion being one of the means of survival in Cuba during the obnoxious slave trade. Many who did not have Yor\u00f9b\u00e1 as their heritage bought into Yor\u00f9b\u00e1 identity through religious transformation. Yor\u00f9b\u00e1 language, culture and religion survived since then until now in Brazil and various other New World countries (Atanda et al., 2013; Pulleyblank et al., 2017) . Yor\u00f9b\u00e1 is identified as one of the under-resourced languages (Besacier et al., 2014) , few systems for under-resourced African languages has been developed Sefara et al., 2017; Van Niekerk and Barnard, 2012; Modipa and Davel, 2015; Manamela et al., 2018; . While the development of speech-based systems for Yor\u00f9b\u00e1 is an open research, it is essential to continue to create a Yor\u00f9b\u00e1 gender recognition system that may later help other researchers and to strengthen the cultural identify of the language.", |
| "cite_spans": [ |
| { |
| "start": 1065, |
| "end": 1086, |
| "text": "(Atanda et al., 2013;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1087, |
| "end": 1112, |
| "text": "Pulleyblank et al., 2017)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 1176, |
| "end": 1199, |
| "text": "(Besacier et al., 2014)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1271, |
| "end": 1291, |
| "text": "Sefara et al., 2017;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 1292, |
| "end": 1322, |
| "text": "Van Niekerk and Barnard, 2012;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 1323, |
| "end": 1346, |
| "text": "Modipa and Davel, 2015;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1347, |
| "end": 1369, |
| "text": "Manamela et al., 2018;", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "The main contributions of this paper can be listed as below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "\u2022 A new classifier architecture is proposed. A BiLSTM architecture with attention mechanism is used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "\u2022 Acoustic features such as Time, Frequency, and Cepstral-domain features are used for gender recognition.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "\u2022 We release the code 1 used in this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "The rest of the paper is organized as follows: Section 2 gives the literature review on gender recognition. Section 3 details the features, learning models, and evaluation methods. Section 4 discusses the experimental results, and the paper is concluded in Section 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "Gender recognition can be approached from text (Mukherjee and Liu, 2010) , images (Moghaddam and Ming-Hsuan Yang, 2000; Hwang et al., 2009; Kumar et al., 2019; Qawaqneh et al., 2017a) , videos (Ding and Ma, 2011; Chen et al., 2017) , accelerometers (Bales et al., 2016) , wearables (G\u00fcm\u00fc\u015f\u00e7\u00fc et al., 2018) , and speech (Harb and Chen, 2003; Azghadi et al., 2007; Meena et al., 2013) to train machine learning models and neural networks for classification. Meena et al. (2013) proposed a novel gender classification technique in speech processing using neural network and fuzzy logic. Authors used acoustic features such as short time energy, zero crossing rate and energy entropy. Their work can be expanded by not only using time domain features but also to include frequency and cepstral domain features. An example of cepstral-domain features are Mel Frequency Cepstral Coefficients (MFCCs). Qawaqneh et al. (2017a) used MFCCs, fundamental frequency (F0) and the shifted delta cepstral coefficients (SDC) to train a jointly fine-tuned deep neural networks. Their model obtained accuracy of 64%. Conversely, Harb and Chen (2003) did not use MFCCs but used Mel Frequency Spectral Coefficients (MFSC) to train a gender identification system using neural networks. Authors showed that smoothing improves the accuracy of the model and MFSC features were better than MFCC features. Azghadi et al. 2007used acoustic features and pitch features to train a gender classification system based on feed-forward backpropagation neural network. Their model obtained an accuracy of 96%. Qawaqneh et al. 2017bintroduced shared class labels among misclassified labels to regularize the DNN weights and to generate transformed MFCCs feature set using Backus-Naur Form (BNF). Authors used DNN and i-vector models to build age and gender classification system. The BNF-DNN obtained accuracy of 58.98 and BNF-I-vector obtained 56.13", |
| "cite_spans": [ |
| { |
| "start": 47, |
| "end": 72, |
| "text": "(Mukherjee and Liu, 2010)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 82, |
| "end": 119, |
| "text": "(Moghaddam and Ming-Hsuan Yang, 2000;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 120, |
| "end": 139, |
| "text": "Hwang et al., 2009;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 140, |
| "end": 159, |
| "text": "Kumar et al., 2019;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 160, |
| "end": 183, |
| "text": "Qawaqneh et al., 2017a)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 193, |
| "end": 212, |
| "text": "(Ding and Ma, 2011;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 213, |
| "end": 231, |
| "text": "Chen et al., 2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 249, |
| "end": 269, |
| "text": "(Bales et al., 2016)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 282, |
| "end": 304, |
| "text": "(G\u00fcm\u00fc\u015f\u00e7\u00fc et al., 2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 318, |
| "end": 339, |
| "text": "(Harb and Chen, 2003;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 340, |
| "end": 361, |
| "text": "Azghadi et al., 2007;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 362, |
| "end": 381, |
| "text": "Meena et al., 2013)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 455, |
| "end": 474, |
| "text": "Meena et al. (2013)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 894, |
| "end": 917, |
| "text": "Qawaqneh et al. (2017a)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1109, |
| "end": 1129, |
| "text": "Harb and Chen (2003)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Literature Review", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Machine learning algorithm are used for gender recognition. Chaudhary and Sharma (2018) used support vector machines (SVMs) to train a gender identification system based on voice signal by extracting the features such as pitch, energy and MFCC. Their model obtained accuracy of 96.45%. Gaussian mixture models (GMMs) and multilayer perceptrons (MLPs) are used in (Djemili et al., 2012) to create a gender identification system. The models obtained accuracy of 96.4% using MFCCs as features. Jadav 2018proposed a voice-based gender identification using machine learning. Author extracted acoustic features to train a SVM which obtained testing accuracy of 97%.", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 87, |
| "text": "Chaudhary and Sharma (2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 363, |
| "end": 385, |
| "text": "(Djemili et al., 2012)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Literature Review", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The architecture of a gender recognition system is shown in Figure 1 . The system consists of the training and prediction phases.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 60, |
| "end": 68, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 In the training phase, the speech signal is inputted to the system, and pre-processing occurs (noise removal, dimensionality reduction). Acoustic features are extracted. Then a machine learning model is built and trained on the extracted features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 In recognition phase, an unlabelled or unknown speech signal is inputted to the system. The model predicts and outputs the gender of the inputted signal. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We obtained speech database from (van Niekerk et al., 2015) used in (Van Niekerk and Barnard, 2012) , where recordings consist of 16 female and 17 male recordings in Yor\u00f9b\u00e1. About 130 utterances were read from short texts for each speaker. The length of the recordings is 165 minutes. The audios are 16 bit PCM at 16kHz sampling rate. We use Principal Component Analysis (PCA) (Moore, 1981; Ding and He, 2004) to explore the data in Figure 2 by scaling to 2 dimension. The centers are illustrated using k-means (Ding and He, 2004) with k = 2. We observe the data can be separated into males and females. This will simplify the learning of the models.", |
| "cite_spans": [ |
| { |
| "start": 33, |
| "end": 59, |
| "text": "(van Niekerk et al., 2015)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 85, |
| "end": 99, |
| "text": "Barnard, 2012)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 377, |
| "end": 390, |
| "text": "(Moore, 1981;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 391, |
| "end": 409, |
| "text": "Ding and He, 2004)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 511, |
| "end": 530, |
| "text": "(Ding and He, 2004)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 433, |
| "end": 441, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Feature extraction is the transformation of original data into a dataset that contains the most discriminatory information, with reduced numbers of variables. The 34 acoustic features shown in Figure 3 are extracted from the short-term windows with frame size of 50ms at a Hamming window of 25ms using a library in (Giannakopoulos, 2015) . The final feature vector contains the mean and standard deviation which sums to feature size of 68. The features can be grouped into three categories: \u2022 Time-domain features (Zero Crossing Rate, Energy, and Entropy of Energy).", |
| "cite_spans": [ |
| { |
| "start": 315, |
| "end": 337, |
| "text": "(Giannakopoulos, 2015)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 193, |
| "end": 201, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Feature Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 Frequency-domain features (Spectral Spread, Spectral Centroid, Spectral Flux, Spectral Entropy, Spectral Rolloff, Chroma Deviation, Chroma Vector).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 Cepstral-domain features -includes MFCCs that has an ability to model the vocal tract filter. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Is an crucial step for gender recognition using speech. The goal is to remove speaker and record-ing variability. We normalize features by removing the mean and scaling to a unit variance using the following normalization equation. For normalized feature\u0177:\u0177", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Normalization", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "= x \u2212 \u00b5 \u03c3 (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Normalization", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where \u03c3 represents the variance and \u00b5 represents the mean for each feature vector x.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Normalization", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "This section explains the proposed BiLSTM model. As shown in Figure 4 , the first layer is the input layer having the same size of the input vector. Followed by the BiLSTM layer having 128 units. Followed by the attention layer, followed by LSTM layer, followed by 4 dense layers with the last layer activated by the sigmoid function.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 61, |
| "end": 69, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Classifier Model", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "For this gender recognition problem, we model the speech signal using recurrent neural network (RNN), specifically BiLSTM. LSTM was introduced by Hochreiter and Schmidhuber (1997) , has shown to be stable and accurately model longtime dependencies in different tasks like speech recognition, machine learning, and computer vision (Moghaddam and Ming-Hsuan Yang, 2000; Hwang et al., 2009) . BiLSTM trains two LSTMs on the input sequence. The second LSTM is a reverse copy of the first one, the aim is to capture past and future input features for a specific time step.", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 179, |
| "text": "Hochreiter and Schmidhuber (1997)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 330, |
| "end": 367, |
| "text": "(Moghaddam and Ming-Hsuan Yang, 2000;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 368, |
| "end": 387, |
| "text": "Hwang et al., 2009)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BiLSTM Layer", |
| "sec_num": "3.4.1" |
| }, |
| { |
| "text": "Attention is a mechanism allowing neural networks to examine specific areas of the input speech signal in more detail to decrease the task complexity and to exclude irrelevant information. An attention layer is included for determining the contribution of each signal frame to the whole speech signal. The attention mechanism assigns a weight w i to each frame feature h i . The hidden state is lastly calculated by a weighted sum function to generate a hidden acoustic feature vector r. Formally:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Layer", |
| "sec_num": "3.4.2" |
| }, |
| { |
| "text": "p j = tanh (W h h j + b h ), p j \u2208 [\u22121, 1] (2) w j = exp(p j ) \u03a3 N t=1 exp(p t ) , \u03a3 N j=1 w j = 1 (3) r =\u03a3 N j=1 w j h j , r \u2208 R 2L (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Layer", |
| "sec_num": "3.4.2" |
| }, |
| { |
| "text": "where W h and b h are the weight and bias from the attention layer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Layer", |
| "sec_num": "3.4.2" |
| }, |
| { |
| "text": "The attention layer is followed by four dense layers with different sizes of neurons. The output of attention layer is fed into first dense layer with 128 hidden neurons activated by rectified linear unit. And to avoid overfitting, we add a dropout layer having probability of 0.5 between the first three dense layers that have 128, 64, and 32 neurons respectively. The last dense layer uses sigmoid activation function to create binary classification. The sigmoid activation function is defined as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dense Layer", |
| "sec_num": "3.4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03c3(x) = 1 1 + e \u2212x", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Dense Layer", |
| "sec_num": "3.4.3" |
| }, |
| { |
| "text": "This section describes the performance measurements used to evaluate model quality. The performance of the model is affected by the speech signal quality, the training data size, and most importantly the optimization of learning algorithm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "The following evaluation metrics are applied:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Accuracy represents all correctly predicted samples, calculated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Accuracy = tp + tn tp + tn + f p + f n (6)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Binary cross entropy is a Sigmoid activation plus a Cross Entropy loss. We use binary cross entropy loss function since the labels of the data are binary. It is calculated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2212(y log(p) + (1 \u2212 y) \u00d7 log(1 \u2212 p))", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "where p is the probability predicted by the model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Precision is the total number of the positively predicted examples that are relevant. It is calculated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P recision = tp tp + f p", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Recall measures how well a model is at predicting the positives. It is calculated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Recall = tp tp + f n", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "F 1 score is the harmonic mean of precision and recall. It is calculated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "F 1 score = 2 \u00d7 precision \u00d7 recall precision + recall", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "where: \u2022 tp (true positive) is the number of males that are predicted as males.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "\u2022 tn (true negative) is the number of females that are predicted as females.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "\u2022 f p (false positive) is the number of females examples that are predicted as males.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "\u2022 f n (false negative) is the number of males examples that are predicted as females.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "This section discusses model performance results based on accuracy, F 1 score and binary cross entropy. The dataset is splitted into 90% for training, 10% for testing. The model is trained for 200 epochs and involved 3884 samples for training and 432 samples for testing. Table 1 shows the testing results after evaluating the model. We observe BiLSTM obtaining high accuracy and F 1 score of 99% after 200 epochs. The BiLSTM outperformed the neural network models in (Harb and Chen, 2003; Azghadi et al., 2007; Meena et al., 2013; Qawaqneh et al., 2017a,b) . Even though Qawaqneh et al. (2017a) used both images + audio files, their performance does not beat the BiLSTM. Figure 5a shows the accuracy curve of the BiLSTM model. The accuracy of model increased as the number of epochs increase. MLP (Harb and Chen, 2003) 92 MLP (Azghadi et al., 2007) 96 ANN + Fuzy Logic (Meena et al., 2013) 65 DNN (Qawaqneh et al., 2017a) 64 DNN (Qawaqneh et al., 2017b) 59 BiLSTM-Attention 99", |
| "cite_spans": [ |
| { |
| "start": 468, |
| "end": 489, |
| "text": "(Harb and Chen, 2003;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 490, |
| "end": 511, |
| "text": "Azghadi et al., 2007;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 512, |
| "end": 531, |
| "text": "Meena et al., 2013;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 532, |
| "end": 557, |
| "text": "Qawaqneh et al., 2017a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 572, |
| "end": 595, |
| "text": "Qawaqneh et al. (2017a)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 798, |
| "end": 807, |
| "text": "(Harb and", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 808, |
| "end": 849, |
| "text": "Chen, 2003) 92 MLP (Azghadi et al., 2007)", |
| "ref_id": null |
| }, |
| { |
| "start": 870, |
| "end": 890, |
| "text": "(Meena et al., 2013)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 898, |
| "end": 922, |
| "text": "(Qawaqneh et al., 2017a)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 930, |
| "end": 954, |
| "text": "(Qawaqneh et al., 2017b)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 272, |
| "end": 279, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 672, |
| "end": 681, |
| "text": "Figure 5a", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Overfitting happens when a model attempts to predict a trend in a noisy data. Overfitting is the consequence of a complicated model with excessive parameters. An overfitted model makes incorrect predictions as the trend does not represent the reality of the data. To show that overfitting is avoided, Figure 5b shows the binary cross entropy loss function curve. The loss function kept decreasing as number of training iterations increased. We observe BiLSTM reaching the lowest loss of 0.1 after 200 epochs. Hence, the model did not overfit.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 301, |
| "end": 310, |
| "text": "Figure 5b", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Overfitting", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "This paper presented a Yor\u00f9b\u00e1 gender recognition from speech using BiLSTM with attention mechanism. We discussed the literature on gender recognition. The acoustic features were explained together with normalization method. We explained the architecture of the proposed model. We observed BiLSTM achieving the state-of-the-art accuracy of 99% for a low-resourced language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The future work will focus on using transformer models for gender recognition. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "https://github.com/SefaraTJ/ yoruba-gender-recognition/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Automatic gender detection based on characteristics of vocal folds for mobile healthcare system", |
| "authors": [ |
| { |
| "first": "Zulfiqar", |
| "middle": [], |
| "last": "Musaed Alhussein", |
| "suffix": "" |
| }, |
| { |
| "first": "Muhammad", |
| "middle": [], |
| "last": "Ali", |
| "suffix": "" |
| }, |
| { |
| "first": "Wadood", |
| "middle": [], |
| "last": "Imran", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Abdul", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Mobile Information Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1155/2016/7805217" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Musaed Alhussein, Zulfiqar Ali, Muhammad Imran, and Wadood Abdul. 2016. Automatic gender detec- tion based on characteristics of vocal folds for mo- bile healthcare system. Mobile Information Systems, 2016.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Yor\u00f9b\u00e1 automatic speech recognition: A review", |
| "authors": [ |
| { |
| "first": "Abdul", |
| "middle": [], |
| "last": "Wahab", |
| "suffix": "" |
| }, |
| { |
| "first": "Funsho", |
| "middle": [], |
| "last": "Atanda", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Rural ICT Development (RICTD) International Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "116--121", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abdul Wahab Funsho Atanda, Shahrul Azmi Mohd Yusof, and M Hariharan. 2013. Yor\u00f9b\u00e1 automatic speech recognition: A review. In Rural ICT Devel- opment (RICTD) International Conference, pages 116-121.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Gender classification based on feedforward backpropagation neural network", |
| "authors": [ |
| { |
| "first": "Reza", |
| "middle": [], |
| "last": "S Mostafa Rahimi Azghadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hamed", |
| "middle": [], |
| "last": "Bonyadi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Shahhosseini", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Artificial Intelligence and Innovations 2007: from Theory to Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "299--304", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/978-0-387-74161-1_32" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "S Mostafa Rahimi Azghadi, M Reza Bonyadi, and Hamed Shahhosseini. 2007. Gender classification based on feedforward backpropagation neural net- work. In Artificial Intelligence and Innovations 2007: from Theory to Applications, pages 299-304, Boston, MA. Springer US.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Gender classification of walkers via underfloor accelerometer measurements", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Bales", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "A" |
| ], |
| "last": "Tarazaga", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Kasarda", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Batra", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "G" |
| ], |
| "last": "Woolard", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "D" |
| ], |
| "last": "Poston", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [ |
| "V N S" |
| ], |
| "last": "Malladi", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "IEEE Internet of Things Journal", |
| "volume": "3", |
| "issue": "6", |
| "pages": "1259--1266", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/JIOT.2016.2582723" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Bales, P. A. Tarazaga, M. Kasarda, D. Batra, A. G. Woolard, J. D. Poston, and V. V. N. S. Malladi. 2016. Gender classification of walkers via underfloor ac- celerometer measurements. IEEE Internet of Things Journal, 3(6):1259-1266.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Automatic speech recognition for under-resourced languages: A survey", |
| "authors": [ |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Besacier", |
| "suffix": "" |
| }, |
| { |
| "first": "Etienne", |
| "middle": [], |
| "last": "Barnard", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexey", |
| "middle": [], |
| "last": "Karpov", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanja", |
| "middle": [], |
| "last": "Schultz", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Speech Communication", |
| "volume": "56", |
| "issue": "", |
| "pages": "85--100", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.specom.2013.07.008" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laurent Besacier, Etienne Barnard, Alexey Karpov, and Tanja Schultz. 2014. Automatic speech recog- nition for under-resourced languages: A survey. Speech Communication, 56:85-100.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Gender identification based on voice signal characteristics", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "K" |
| ], |
| "last": "Sharma", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "2018 International Conference on Advances in Computing, Communication Control and Networking (ICACCCN)", |
| "volume": "", |
| "issue": "", |
| "pages": "869--874", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICACCCN.2018.8748676" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Chaudhary and D. K. Sharma. 2018. Gender identification based on voice signal characteristics. In 2018 International Conference on Advances in Computing, Communication Control and Network- ing (ICACCCN), pages 869-874, Greater Noida (UP), India.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Gender-to-age hierarchical recognition for speech", |
| "authors": [ |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "2011 IEEE 54th International Midwest Symposium on Circuits and Systems (MWSCAS)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--4", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/MWSCAS.2011.6026475" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen. 2011. Gender-to-age hierarchical recognition for speech. In 2011 IEEE 54th International Midwest Sympo- sium on Circuits and Systems (MWSCAS), pages 1- 4.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Gender classification in live videos", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "2017 IEEE International Conference on Image Processing (ICIP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1602--1606", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICIP.2017.8296552" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Chen, S. Liu, and Z. Chen. 2017. Gender classifica- tion in live videos. In 2017 IEEE International Con- ference on Image Processing (ICIP), pages 1602- 1606.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "K-means clustering via principal component analysis", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaofeng", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the twenty-first international conference on Machine learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/1015330.1015408" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Ding and Xiaofeng He. 2004. K-means cluster- ing via principal component analysis. In Proceed- ings of the twenty-first international conference on Machine learning, page 29. ACM.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Manifoldbased face gender recognition for video", |
| "authors": [ |
| { |
| "first": "Zhengming", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanjiao", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of 2011 International Conference on Computer Science and Network Technology", |
| "volume": "2", |
| "issue": "", |
| "pages": "1104--1107", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICCSNT.2011.6182153" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhengming Ding and Yanjiao Ma. 2011. Manifold- based face gender recognition for video. In Proceed- ings of 2011 International Conference on Computer Science and Network Technology, volume 2, pages 1104-1107.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A speech signal based gender identification system using four classifiers", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Djemili", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Bourouba", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "C A" |
| ], |
| "last": "Korba", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "2012 International Conference on Multimedia Computing and Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "184--187", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICMCS.2012.6320122" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Djemili, H. Bourouba, and M. C. A. Korba. 2012. A speech signal based gender identification system using four classifiers. In 2012 International Confer- ence on Multimedia Computing and Systems, pages 184-187.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "pyaudioanalysis: An open-source Python library for audio signal analysis", |
| "authors": [ |
| { |
| "first": "Theodoros", |
| "middle": [], |
| "last": "Giannakopoulos", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "PloS one", |
| "volume": "10", |
| "issue": "12", |
| "pages": "1--17", |
| "other_ids": { |
| "DOI": [ |
| "10.1371/journal.pone.0144610" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Theodoros Giannakopoulos. 2015. pyaudioanalysis: An open-source Python library for audio signal anal- ysis. PloS one, 10(12):1-17.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Gender classification via wearable gait analysis sensor", |
| "authors": [ |
| { |
| "first": "Abd\u00fclkadir", |
| "middle": [], |
| "last": "G\u00fcm\u00fc\u015f\u00e7\u00fc", |
| "suffix": "" |
| }, |
| { |
| "first": "Kerim", |
| "middle": [], |
| "last": "Karadag", |
| "suffix": "" |
| }, |
| { |
| "first": "Mehmet", |
| "middle": [ |
| "Emin" |
| ], |
| "last": "Mustafa \u00c7 Ali\u015fkan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dursun", |
| "middle": [], |
| "last": "Tenekec\u0131", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Akaslan", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "26th Signal Processing and Communications Applications Conference (SIU)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--4", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/SIU.2018.8404181" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abd\u00fclkadir G\u00fcm\u00fc\u015f\u00e7\u00fc, Kerim Karadag, Mustafa \u00c7 ali\u015fkan, Mehmet Emin Tenekec\u0131, and Dursun Akaslan. 2018. Gender classification via wearable gait analysis sensor. In 2018 26th Signal Process- ing and Communications Applications Conference (SIU), pages 1-4. IEEE.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Gender identification using a general audio classifier", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Harb", |
| "suffix": "" |
| }, |
| { |
| "first": "Liming", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "International Conference on Multimedia and Expo. ICME '03. Proceedings (Cat. No.03TH8698)", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICME.2003.1221721" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. Harb and Liming Chen. 2003. Gender identification using a general audio classifier. In 2003 Interna- tional Conference on Multimedia and Expo. ICME '03. Proceedings (Cat. No.03TH8698), volume 2, pages II-733, Baltimore, MD, USA.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/neco.1997.9.8.1735" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Face recognition using gender information", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Hwang", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Kee", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "16th IEEE International Conference on Image Processing (ICIP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4129--4132", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICIP.2009.5413461" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "W. Hwang, H. Ren, H. Kim, S. Kee, and J. Kim. 2009. Face recognition using gender information. In 2009 16th IEEE International Conference on Image Pro- cessing (ICIP), pages 4129-4132.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Voice-based gender identification using machine learning", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Jadav", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "4th International Conference on Computing Communication and Automation (ICCCA)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--4", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/CCAA.2018.8777582" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Jadav. 2018. Voice-based gender identification using machine learning. In 2018 4th International Confer- ence on Computing Communication and Automation (ICCCA), pages 1-4.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Gender classification using machine learning with multifeature method", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "2019 IEEE 9th Annual Computing and Communication Workshop and Conference (CCWC)", |
| "volume": "", |
| "issue": "", |
| "pages": "648--0653", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/CCWC.2019.8666601" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Kumar, S. Singh, and J. Kumar. 2019. Gender classification using machine learning with multi- feature method. In 2019 IEEE 9th Annual Comput- ing and Communication Workshop and Conference (CCWC), pages 0648-0653.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "The automatic recognition of Sepedi speech emotions based on machine learning algorithms", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [ |
| "J" |
| ], |
| "last": "Manamela", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Manamela", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "I" |
| ], |
| "last": "Modipa", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "J" |
| ], |
| "last": "Sefara", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "B" |
| ], |
| "last": "Mokgonyane", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "2018 International Conference on Advances in Big Data, Computing and Data Communication Systems (icABCD)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--7", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICABCD.2018.8465403" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. J. Manamela, M. J. Manamela, T. I. Modipa, T. J. Sefara, and T. B. Mokgonyane. 2018. The auto- matic recognition of Sepedi speech emotions based on machine learning algorithms. In 2018 Interna- tional Conference on Advances in Big Data, Com- puting and Data Communication Systems (icABCD), pages 1-7.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Gender classification in speech recognition using fuzzy logic and neural network", |
| "authors": [ |
| { |
| "first": "Kunjithapatham", |
| "middle": [], |
| "last": "Meena", |
| "suffix": "" |
| }, |
| { |
| "first": "Kulumani", |
| "middle": [], |
| "last": "Subramaniam", |
| "suffix": "" |
| }, |
| { |
| "first": "Muthusamy", |
| "middle": [], |
| "last": "Gomathy", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "International Arab Journal of Information Technology (IAJIT)", |
| "volume": "", |
| "issue": "5", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kunjithapatham Meena, Kulumani Subramaniam, and Muthusamy Gomathy. 2013. Gender classification in speech recognition using fuzzy logic and neural network. International Arab Journal of Information Technology (IAJIT), 10(5).", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Predicting vowel substitution in code-switched speech", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "I" |
| ], |
| "last": "Modipa", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "H" |
| ], |
| "last": "Davel", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "2015 Pattern Recognition Association of South Africa and Robotics and Mechatronics International Conference (PRASA-RobMech)", |
| "volume": "", |
| "issue": "", |
| "pages": "154--159", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/RoboMech.2015.7359515" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. I. Modipa and M. H. Davel. 2015. Predicting vowel substitution in code-switched speech. In 2015 Pat- tern Recognition Association of South Africa and Robotics and Mechatronics International Confer- ence (PRASA-RobMech), pages 154-159.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Gender classification with support vector machines", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Moghaddam", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Hsuan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580)", |
| "volume": "", |
| "issue": "", |
| "pages": "306--311", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/AFGR.2000.840651" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "B. Moghaddam and Ming-Hsuan Yang. 2000. Gender classification with support vector machines. In Pro- ceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580), pages 306-311.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Automatic speaker recognition system based on machine learning algorithms", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "B" |
| ], |
| "last": "Mokgonyane", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "J" |
| ], |
| "last": "Sefara", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "I" |
| ], |
| "last": "Modipa", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mogale", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Manamela", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "J" |
| ], |
| "last": "Manamela", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "2019 Southern African Universities Power Engineering Conference/Robotics and Mechatronics/Pattern Recognition Association of South Africa (SAUPEC/RobMech/PRASA)", |
| "volume": "", |
| "issue": "", |
| "pages": "141--146", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/RoboMech.2019.8704837" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. B. Mokgonyane, T. J. Sefara, T. I. Modipa, M. M. Mogale, M. J. Manamela, and P. J. Man- amela. 2019. Automatic speaker recognition sys- tem based on machine learning algorithms. In 2019 Southern African Universities Power En- gineering Conference/Robotics and Mechatron- ics/Pattern Recognition Association of South Africa (SAUPEC/RobMech/PRASA), pages 141-146.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Principal component analysis in linear systems: Controllability, observability, and model reduction", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Moore", |
| "suffix": "" |
| } |
| ], |
| "year": 1981, |
| "venue": "IEEE Transactions on Automatic Control", |
| "volume": "26", |
| "issue": "1", |
| "pages": "17--32", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/TAC.1981.1102568" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "B. Moore. 1981. Principal component analysis in linear systems: Controllability, observability, and model reduction. IEEE Transactions on Automatic Control, 26(1):17-32.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Improving gender classification of blog authors", |
| "authors": [ |
| { |
| "first": "Arjun", |
| "middle": [], |
| "last": "Mukherjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 conference on Empirical Methods in natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "207--217", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arjun Mukherjee and Bing Liu. 2010. Improving gen- der classification of blog authors. In Proceedings of the 2010 conference on Empirical Methods in natu- ral Language Processing, pages 207-217. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Lagos-NWU Yoruba speech corpus", |
| "authors": [ |
| { |
| "first": "Etienne", |
| "middle": [], |
| "last": "Daniel Van Niekerk", |
| "suffix": "" |
| }, |
| { |
| "first": "Oluwapelumi", |
| "middle": [], |
| "last": "Barnard", |
| "suffix": "" |
| }, |
| { |
| "first": "Azeez", |
| "middle": [], |
| "last": "Giwa", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sosimi", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel van Niekerk, Etienne Barnard, Oluwapelumi Giwa, and Azeez Sosimi. 2015. Lagos-NWU Yoruba speech corpus.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "The World's Major Languages", |
| "authors": [ |
| { |
| "first": "Douglas", |
| "middle": [], |
| "last": "Pulleyblank", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "882--898", |
| "other_ids": { |
| "DOI": [ |
| "10.4324/9781315084862" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douglas Pulleyblank et al. 2017. Yoruba. In The World's Major Languages, pages 882-898. Rout- ledge.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Age and gender classification from speech and face images by jointly finetuned deep neural networks", |
| "authors": [ |
| { |
| "first": "Zakariya", |
| "middle": [], |
| "last": "Qawaqneh", |
| "suffix": "" |
| }, |
| { |
| "first": "Arafat", |
| "middle": [], |
| "last": "Abu Mallouh", |
| "suffix": "" |
| }, |
| { |
| "first": "Buket", |
| "middle": [ |
| "D" |
| ], |
| "last": "Barkana", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Expert Systems with Applications", |
| "volume": "85", |
| "issue": "", |
| "pages": "76--86", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.eswa.2017.05.037" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zakariya Qawaqneh, Arafat Abu Mallouh, and Buket D. Barkana. 2017a. Age and gender classi- fication from speech and face images by jointly fine- tuned deep neural networks. Expert Systems with Applications, 85:76-86.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Deep neural network framework and transformed MFCCs for speaker's age and gender classification. Knowledge-Based Systems", |
| "authors": [ |
| { |
| "first": "Zakariya", |
| "middle": [], |
| "last": "Qawaqneh", |
| "suffix": "" |
| }, |
| { |
| "first": "Arafat", |
| "middle": [], |
| "last": "Abu Mallouh", |
| "suffix": "" |
| }, |
| { |
| "first": "Buket", |
| "middle": [ |
| "D" |
| ], |
| "last": "Barkana", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "115", |
| "issue": "", |
| "pages": "5--14", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.knosys.2016.10.008" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zakariya Qawaqneh, Arafat Abu Mallouh, and Buket D. Barkana. 2017b. Deep neural network framework and transformed MFCCs for speaker's age and gender classification. Knowledge-Based Systems, 115:5-14.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Text-based language identification for some of the under-resourced languages of South Africa", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "J" |
| ], |
| "last": "Sefara", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Manamela", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "T" |
| ], |
| "last": "Malatji", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "2016 International Conference on Advances in Computing and Communication Engineering (ICACCE)", |
| "volume": "", |
| "issue": "", |
| "pages": "303--307", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICACCE.2016.8073765" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. J. Sefara, M. J. Manamela, and P. T. Malatji. 2016. Text-based language identification for some of the under-resourced languages of South Africa. In 2016 International Conference on Advances in Comput- ing and Communication Engineering (ICACCE), pages 303-307.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "HMM-based speech synthesis system incorporated with language identification for low-resourced languages", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "J" |
| ], |
| "last": "Sefara", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "B" |
| ], |
| "last": "Mokgonyane", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Manamela", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "I" |
| ], |
| "last": "Modipa", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Conference on Advances in Big Data, Computing and Data Communication Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICABCD.2019.8851055" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. J. Sefara, T. B. Mokgonyane, M. J. Manamela, and T. I. Modipa. 2019. HMM-based speech synthesis system incorporated with language identification for low-resourced languages. In International Confer- ence on Advances in Big Data, Computing and Data Communication Systems.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Speech synthesis applied to basic mathematics as a language", |
| "authors": [ |
| { |
| "first": "Tshephisho", |
| "middle": [], |
| "last": "Sefara", |
| "suffix": "" |
| }, |
| { |
| "first": "Promise", |
| "middle": [], |
| "last": "Malatji", |
| "suffix": "" |
| }, |
| { |
| "first": "Madimetja", |
| "middle": [], |
| "last": "Manamela", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "South Africa International Conference on Educational Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "243--253", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tshephisho Sefara, Promise Malatji, and Madimetja Manamela. 2016. Speech synthesis applied to basic mathematics as a language. In South Africa Inter- national Conference on Educational Technologies, pages 243-253.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "The development of local synthetic voices for an automatic pronunciation assistant", |
| "authors": [], |
| "year": 2016, |
| "venue": "Southern Africa Telecommunication Networks and Applications Conference (SATNAC)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tshephisho Joseph Sefara and Madimetja Jonas Man- amela. 2016. The development of local synthetic voices for an automatic pronunciation assistant. In Southern Africa Telecommunication Networks and Applications Conference (SATNAC).", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Webbased automatic pronunciation assistant", |
| "authors": [ |
| { |
| "first": "Tshephisho Joseph", |
| "middle": [], |
| "last": "Sefara", |
| "suffix": "" |
| }, |
| { |
| "first": "Madimetja Jonas", |
| "middle": [], |
| "last": "Manamela", |
| "suffix": "" |
| }, |
| { |
| "first": "Thipe Isaiah", |
| "middle": [], |
| "last": "Modipa", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Southern Africa Telecommunication Networks and Applications Conference (SATNAC)", |
| "volume": "", |
| "issue": "", |
| "pages": "112--117", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tshephisho Joseph Sefara, Madimetja Jonas Man- amela, and Thipe Isaiah Modipa. 2017. Web- based automatic pronunciation assistant. In South- ern Africa Telecommunication Networks and Appli- cations Conference (SATNAC), pages 112-117.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Tone realisation in a Yor\u00f9b\u00e1 speech recognition corpus", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Van Niekerk", |
| "suffix": "" |
| }, |
| { |
| "first": "Etienne", |
| "middle": [], |
| "last": "Barnard", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Third Workshop on Spoken Language Technologies for Under-resourced Languages", |
| "volume": "", |
| "issue": "", |
| "pages": "54--59", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Van Niekerk and Etienne Barnard. 2012. Tone realisation in a Yor\u00f9b\u00e1 speech recognition corpus. In Third Workshop on Spoken Language Technologies for Under-resourced Languages, pages 54-59, Cape Town, South Africa.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "Architecture of a gender recognition system." |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "PCA showing gender clusters and k-means showing cluster centres." |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "Acoustic features(Giannakopoulos, 2015)." |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "Architecture of the BiLSTM with Attention Mechanism." |
| }, |
| "FIGREF4": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "Model prediction Accuracy and estimated binary cross entropy for BiLSTM." |
| }, |
| "TABREF0": { |
| "content": "<table><tr><td>Model</td><td>Accuracy</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "Comparison with other models" |
| } |
| } |
| } |
| } |