| { |
| "paper_id": "L16-1010", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:08:43.036056Z" |
| }, |
| "title": "Could Speaker, Gender or Age Awareness be beneficial in Speech-based Emotion Recognition?", |
| "authors": [ |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Sidorov", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ulm University", |
| "location": {} |
| }, |
| "email": "maxim.sidorov@uni-ulm.de" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Schmitt", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ulm University", |
| "location": {} |
| }, |
| "email": "alexander.schmitt@uni-ulm.de" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Semenkin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Siberian State Aerospace University Ulm Germany", |
| "location": { |
| "country": "Krasnoyarsk Russia" |
| } |
| }, |
| "email": "eugene.semenkin@sibsau.ru" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Minker", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ulm University", |
| "location": {} |
| }, |
| "email": "wolfgang.minker@uni-ulm.de" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Emotion Recognition (ER) is an important part of dialogue analysis which can be used in order to improve the quality of Spoken Dialogue Systems (SDSs). The emotional hypothesis of the current response of an end-user might be utilised by the dialogue manager component in order to change the SDS strategy which could result in a quality enhancement. In this study additional speaker-related information is used to improve the performance of the speech-based ER process. The analysed information is the speaker identity, gender and age of a user. Two schemes are described here, namely, using additional information as an independent variable within the feature vector and creating separate emotional models for each speaker, gender or age-cluster independently. The performances of the proposed approaches were compared against the baseline ER system, where no additional information has been used, on a number of emotional speech corpora of German, English, Japanese and Russian. The study revealed that for some of the corpora the proposed approach significantly outperforms the baseline methods with a relative difference of up to 11.9%.", |
| "pdf_parse": { |
| "paper_id": "L16-1010", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Emotion Recognition (ER) is an important part of dialogue analysis which can be used in order to improve the quality of Spoken Dialogue Systems (SDSs). The emotional hypothesis of the current response of an end-user might be utilised by the dialogue manager component in order to change the SDS strategy which could result in a quality enhancement. In this study additional speaker-related information is used to improve the performance of the speech-based ER process. The analysed information is the speaker identity, gender and age of a user. Two schemes are described here, namely, using additional information as an independent variable within the feature vector and creating separate emotional models for each speaker, gender or age-cluster independently. The performances of the proposed approaches were compared against the baseline ER system, where no additional information has been used, on a number of emotional speech corpora of German, English, Japanese and Russian. The study revealed that for some of the corpora the proposed approach significantly outperforms the baseline methods with a relative difference of up to 11.9%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "By deploying the ER component within the SDS, its quality could be significantly increased. It might be beneficial during human-robot or even human-human interaction. Whereas the majority of studies concentrate on speakerindependent ER experiments, in some cases speakerawareness can bring an additional advantage. Despite the fact that the basic emotions are shared between cultures and nationalities (Scherer, 2002) obviously, each person expresses his emotions individually. This thesis lies behind the idea of building different emotional models for each speaker independently or incorporating the speaker-specific information within the single ER model in a different way. On the one hand it results in a problem-decomposition, similar to the cluster-then-classify approach, but on the other hand by deploying different models for each speaker, the individual features of the corresponding speaker can be caught and utilised properly. Furthermore, as has been mentioned in many studies (Brody, 1985) , (Hall et al., 2000) the gender difference in emotional expression has been detected during several psychological investigations. In contrast to the very specific nature of speaker-adaptive ER, gender-adaptive ER might be more general. A similar idea is behind the age-adaptive ER models, where each user has one of the age-specific labels (for example youth or adult). The global aim of the study is to figure out whether the speaker-, gender-or even age-related information of an enduser might be utilised in order to improve the quality of the ER models. We proposed here a two-stage approach, where firstly the speaker or other additional information (gender or age) is identified and secondly, an adaptive ER procedure is performed. We intend to study both cases: the theoretically possible improvement when the known speakerrelated information is taken into account, and the actual difference, which can be observed by deploying speaker-state recognition models, i.e. Speaker Identification (SI), Gender (GR), or Age Recognition (AR). Thus, in the first case we took the ground-truth information about the speaker, gender and age (the G experiments, for Ground truth), whereas in the second series of experiments we deployed the actual SI, GR, and AR models to estimate the corresponding hypothesis (the E experiments, for Estimated).", |
| "cite_spans": [ |
| { |
| "start": 402, |
| "end": 417, |
| "text": "(Scherer, 2002)", |
| "ref_id": null |
| }, |
| { |
| "start": 991, |
| "end": 1004, |
| "text": "(Brody, 1985)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1007, |
| "end": 1026, |
| "text": "(Hall et al., 2000)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Since the emotions themselves have a subjective nature and generally may vary depending on what language one speaks, we carried out the experiments based on 8 different emotional corpora of English, German, Russian and Japanese in order to gain generalizability of the results obtained. The rest of the paper is organised as follows: Significant related work is presented in 2. Section, whereas 3. Section describes the applied corpora and outlines their differences. Our approach to incorporating the additional speaker-related information within the ER system is presented in 4. Section. The results of numerical experiments are demonstrated in 5. Section. Finally, the conclusion and future work are described in 6. Section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "The authors in (Lopez-Otero et al., 2015) researched dependencies between speaker-dependent and -independent approaches when the depression level of the speaker is under examination. It has been concluded that the system performance is much better when the test speaker is in both the training and testing sets. Intuitively, the results could be extrapolated in the case of other speaker traits such as emotions, in a similar way to how it was implemented in the case of the speaker identification approach (Kockmann et al., 2011) . The authors in (Vogt and Andr\u00e9, 2006) improved the performance of emotion classification by automatic gender detection. The authors have used two different classifiers in order to classify male and female voices from the Emo-DB (Burkhardt et al., 2005) and the SmartKom (Steininger et al., 2002) corpora. They concluded that the combined gender and emotion recognition system improved the recognition rate of a gender-independent emotion recognition system by 2-4% relatively by applying the Naive Bayes classifier for building the emotion models.", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 41, |
| "text": "(Lopez-Otero et al., 2015)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 507, |
| "end": 530, |
| "text": "(Kockmann et al., 2011)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 548, |
| "end": 570, |
| "text": "(Vogt and Andr\u00e9, 2006)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 761, |
| "end": 785, |
| "text": "(Burkhardt et al., 2005)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 803, |
| "end": 828, |
| "text": "(Steininger et al., 2002)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Significant related work", |
| "sec_num": "2." |
| }, |
| { |
| "text": "All evaluations were conducted using several audio emotional databases. Here is a brief description of them and their statistical characteristics. The AVEC-2014 database was used for the fourth Audio-Visual Emotion Challenge and Workshop 2014 (Valstar et al., 2014) . This corpus is a subset of the AVEC'13 database (Valstar et al., 2013) consisting of 150 videos. Only two tasks in a human-computer interaction scenario have been selected to be included in the dataset. During the Northwind scheme participants read aloud an extract of the story 'The North Wind and the Sun' in German. The Freeform task is participants' answers to several general questions such as 'What was your best gift, and why?', again in German. Each affect dimension (Arousal, Dominance, and Valence) has been annotated separately by a minimum of three and a maximum of five human raters. We averaged the valence and arousal values over the whole recording's duration to obtain only one pair of continuous labels. The Emo-DB emotional database (Burkhardt et al., 2005) was recorded at the Technical University of Berlin and consists of labelled emotional German utterances which were spoken by 10 actors (5 females). 10 German sentences of non-emotional content have been acted by professional actors so that every utterance has one of the following emotional labels: anger, boredom, disgust, anxiety/fear, happiness, sadness and neutral. The total number of utterances in the corpus is 535. The RadioS database consists of recordings from a popular German radio talk-show. Within this corpus, 69 native German speakers talked about their personal troubles. The labelling was performed by a human rater so that each utterance has one of the following emotional labels: neutral, happy, sad and angry.", |
| "cite_spans": [ |
| { |
| "start": 243, |
| "end": 265, |
| "text": "(Valstar et al., 2014)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 316, |
| "end": 338, |
| "text": "(Valstar et al., 2013)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 1020, |
| "end": 1044, |
| "text": "(Burkhardt et al., 2005)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpora description", |
| "sec_num": "3." |
| }, |
| { |
| "text": "The VAM (Grimm et al., 2008) dataset was created at Karlsruhe University and consists of utterances extracted from the popular German talk-show 'Vera am Mittag' (Vera in the afternoon). For this database 12 broadcasts of the talkshow have been recorded. Each broadcast consists of several dialogues of between two and five people each. Continuous emotional labels have been set by evaluators using the valence, activation and dominance basis. The LEGO emotional database (Schmitt et al., 2012) comprises non-acted English (American) utterances which were extracted from the SDS-based bus-stop navigational system (Eskenazi et al., 2008) . The utterances are requests to the system spoken by real users with real concern. Each utterance has one of the following emotional labels: anger, slight anger, much anger, neutral, friendliness and nonspeech -critical noisy recordings or just silence. The corpus was manually annotated by a human rater who chooses one of the labels. We combined all the utterances with different anger levels into a single class with anger labels. Moreover, since there are very few friendly recordings we removed them from the database. As a result we operated only with recordings of 3 labels, namely anger, neutral and non-speech. The SAVEE (Surrey Audio-Visual Expressed Emotion) corpus (Haq and Jackson, 2010 ) was recorded as a part of research into the field of audio-visual emotion classification, from four native English male speakers aged from 27 to 31. The emotional label for each utterance is one of the standard set of emotions (anger, disgust, fear, happiness, sadness, surprise and neutral). The corpus of Russian emotional speech (Makarova and Petrushin, 2002) Ruslana includes records of utterances from 61 subjects (49 females). Each native Russian speaker (aged from 16 to 28 with the average equalling 18.7) read aloud 10 sentences of different content conveying the following six emotional states: neutral, surprise, happiness, anger, sadness and fear. Altogether the database contains 3,660 emotional utterances (61 speakers x 10 sentences x 6 emotional primitives). The UUDB (The Utsunomiya University Spoken Dialogue Database for Paralinguistic Information Studies) database Colours show the optimal classifiers. All the experiments are 10 repetitions of 10-fold cross-validation emotion-stratified. Box-plots with bold frames indicate T-test-based significant differences against the baseline results (at least with p = 0.05).", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 28, |
| "text": "(Grimm et al., 2008)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 471, |
| "end": 493, |
| "text": "(Schmitt et al., 2012)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 613, |
| "end": 636, |
| "text": "(Eskenazi et al., 2008)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1315, |
| "end": 1337, |
| "text": "(Haq and Jackson, 2010", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1672, |
| "end": 1702, |
| "text": "(Makarova and Petrushin, 2002)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpora description", |
| "sec_num": "3." |
| }, |
| { |
| "text": "(Mori et al., 2011) consists of spontaneous Japanese speech through task-oriented dialogue which was produced by 7 pairs of speakers (12 females), 4,737 utterances in total. Emotional labels for each utterance were created by 3 annotators on a 5-dimensional emotional basis: interest (interested-indifferent), credibility (credibledoubtful), dominance (dominant-submissive), arousal (aroused-sleepy) and pleasantness (pleasant-unpleasant). The human raters evaluated the perceived emotional state of the speakers for each utterance on a 7-point scale. Thus, on the pleasantness scale, 1 corresponds to extremely unpleasant, 4 to neutral, and 7 to extremely pleasant. Since a classification task is under consideration, we have used just pleasantness (a synonym for evaluation) and arousal axes from the AVEC-2014, VAM, and UUDB corpora. The corresponding quadrant (anticlockwise, starting in the positive quadrant, assuming arousal as abscissa) can also be assigned emotional labels: happy-exciting, angry-anxious, sad-bored and relaxed-serene (Schuller et al., 2009b) . There is a description of the used corpora in Table 1 .", |
| "cite_spans": [ |
| { |
| "start": 1044, |
| "end": 1068, |
| "text": "(Schuller et al., 2009b)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1117, |
| "end": 1124, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Corpora description", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Incorporating speaker-specific information into the emotion recognition process may be done in several ways. A very straightforward way is to add this information to the set of features as an additional variable; we will refer to this approach as System Aug for augmented feature vector (Sidorov et al., 2014a) . Another way is to create speakerdependent models: While, for conventional emotion recognition, one statistical model is created independently of the speaker, one may create a separate emotion model for each speaker, we will refer to this approach as System Sep for separate model (Sidorov et al., 2014b) . Both approaches result in a two-stage recognition procedure: First, the speaker is identified and then this information is included into the feature set directly (for the System Aug), or the corresponding emotion model is used for estimating the emotions (for the System Sep). Both emotion recognition-speaker identification hybrid systems have been investigated and evaluated in this study.", |
| "cite_spans": [ |
| { |
| "start": 287, |
| "end": 310, |
| "text": "(Sidorov et al., 2014a)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 593, |
| "end": 616, |
| "text": "(Sidorov et al., 2014b)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The two-stage adaptive emotion recognition", |
| "sec_num": "4." |
| }, |
| { |
| "text": "To investigate the theoretical improvement of using speaker-specific information for ER, the ground truth information about the speaker has been used (AugG and SepG approaches). Then, in order to perform experiments in realworld conditions, an actual speaker identification component has been applied (AugE and SepE systems). We used a number of classification algorithms, namely k-Nearest Neighbours (KNN) (Cover and Hart, 1967) algorithm (Platt and others, 1999) , and boosted Logistic Regression (LR) (Menard, 2002) , in order to provide statistically reliable and algorithm-independent results. In the first experiment, the focus was on investigating the theoretical improvement, which may be achieved using speaker-based adaptiveness. For this, known speaker information (true labels) was used for both approaches. In System Aug, the speaker information was simply added to the feature vector as an additional variable. Hence, all utterances with the corresponding speaker information were used to create and evaluate an emotion model through the augmented feature vector. For the System Sep, individual emotion models were built for each speaker. During the training phase all speaker utterances were used for creating the emotion models. During testing, all speaker utterances were evaluated with the corresponding emotion model, based on known speaker-related information. Additionally, a second experiment was conducted including an actual speaker identification module instead of using known speaker information. First, a speaker identifier was created during the training phase. Furthermore, for System Aug, the known speaker information was included into the feature vector for the training of the emotion classifier. The testing phase starts with the SI procedure. Then, the speaker hypothesis was included into the feature set which was in turn fed into the emotion recogniser. For System Sep, an emotion recogniser was created for each speaker separately. For testing, the speaker hypothesis of the speaker recognition is used to select the emotion model which corresponds to the recognised speaker to create an emotion hypothesis. In contrast to the first experiment, these experiments are not free of speaker identification errors. Therefore, relatively worse results were expected here. It should be noted that similar experiments have been performed in the case of gender-and age-adaptive studies, where instead of using speaker ID directly (AugG and SepG experiments) and the speaker-identification procedure (AugE and SepE experiments), both gender-and agerelated information, as well as gender-and age-recognition systems have been used correspondingly.", |
| "cite_spans": [ |
| { |
| "start": 407, |
| "end": 429, |
| "text": "(Cover and Hart, 1967)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 440, |
| "end": 464, |
| "text": "(Platt and others, 1999)", |
| "ref_id": null |
| }, |
| { |
| "start": 504, |
| "end": 518, |
| "text": "(Menard, 2002)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The two-stage adaptive emotion recognition", |
| "sec_num": "4." |
| }, |
| { |
| "text": "Within the Sep systems, one may perform normalisation only once for the whole set of utterances, or speakerwise (similarly for gender and age groups). We used Ztransformation, as it was found to perform best for the problem of ER previously (Zhang et al., 2011 ), using both strategies described. Regarding the Aug system, one may consider an augmented feature vector with speaker ID as a unique integer or as a dummy variable (one-hot encoding). When the dummy coding is applied, for all values of the speaker ID attribute a new attribute is created. Next, in every utterance, the new attribute which corresponds to the actual nominal value of the example gets the value 1 and all other new attributes get the value 0. It means that each utterance gets N additional binary variables where N is equal to the number of speakers in the training set, where all the values except for a single one are equal to 0. In such cases when an utterance of an unknown speaker is in the testing set (which could be a case when the number of utterances of this particular speaker is not high enough, provided random emotion-stratified crossvalidation splitting) all new attributes are set to 0. Another aspect is whether these additional speaker-related attributes (either unique integer or dummy variable) should be normalised.", |
| "cite_spans": [ |
| { |
| "start": 241, |
| "end": 260, |
| "text": "(Zhang et al., 2011", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The two-stage adaptive emotion recognition", |
| "sec_num": "4." |
| }, |
| { |
| "text": "As a baseline for acoustic features we consider the 384dimensional feature vector which was used within the In-terSpeech 2009 Emotion Challenge (Schuller et al., 2009a) , (Eyben et al., 2010) . We used 10 repetitions of the 10-fold Cross-Validation (CV) emotion-stratified experiment and F 1 measure as a main performance metric. We deployed four machine learning algorithms of different nature to avoid algorithmdependent results. Thus, in the case of speaker identity for the system Sep, we performed 4 (classification algorithms) x 8 (corpora) x 2 (known speaker-related information -SepG vs. estimated one -SepE) x 2 (normalisation once vs. speaker-wise) = 128 experiments, each of them is 10 repetitions of 10-fold cross-validation. For the system Aug, we performed 4 (classification algorithms) x 8 (corpora) x 2 (known speakerrelated information -AugG vs. estimated one -AugE) x 2 (speaker incorporating method -unique integer vs. dummy coding) x 2 (speaker ID normalised vs. non-normalised) = 256 experiments, each of them is 10 repetitions of 10fold cross-validation. It should be noted that gender-related information was available only for 6 corpora, and age-related information was found only within the LEGO corpus, therefore the total number of experiments for genderand age-adaptive ER systems was less than for the speakeradaptive experiments.", |
| "cite_spans": [ |
| { |
| "start": 144, |
| "end": 168, |
| "text": "(Schuller et al., 2009a)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 171, |
| "end": 191, |
| "text": "(Eyben et al., 2010)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Numerical evaluations", |
| "sec_num": "5." |
| }, |
| { |
| "text": "For each experiment we calculated the mean of F 1 measure (over 100 runs, that is -10 repetitions of 10-fold CV) and among 4 classifiers we selected the algorithm with the highest mean. After that, for each combination of normalisation and speaker ID incorporation methods we calculated average ranks, that is -the nominal value depending on the performance of the system on a particular data set, similar to Friedman's statistic (Theodorsson-Norheim, 1987) , (Dem\u0161ar, 2006) . Thus, the best approach will be assigned rank 1, while the runner-up, 2, etc. In the case of identical highest average F 1 measures, we set 1.5 to both approaches. We chose this ranking method due to its simplicity and since it has been observed that the average ranking outperformed more advanced ones, when the performance of classification algorithms was analysed (Brazdil and Soares, 2000) .", |
| "cite_spans": [ |
| { |
| "start": 430, |
| "end": 457, |
| "text": "(Theodorsson-Norheim, 1987)", |
| "ref_id": null |
| }, |
| { |
| "start": 460, |
| "end": 474, |
| "text": "(Dem\u0161ar, 2006)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 844, |
| "end": 870, |
| "text": "(Brazdil and Soares, 2000)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Numerical evaluations", |
| "sec_num": "5." |
| }, |
| { |
| "text": "We calculated the ranks separately for the systems which used known speaker-related information and estimated one, as well as for two groups of settings related to Sep and Aug systems, since they have different nature and potentially may result in very different levels of performances. Next, we calculated these ranks for all the corpora considered and used average ranks for the eventual assessment of approaches. Hence, the lower the average rank, the better the system performed on average on all the emotional corpora. For speaker-adaptive systems, the average ranks for the systems Sep and Aug are depicted in Table 2 and in Table 3 , respectively. Subsequently, we selected the systems with the highest ranks to include the corresponding results in graphs. As a visualisation tool we selected a box-plot graph (Williamson et al., 1989) for its high descriptive ability. We used a rather standard declaration of box-plots: the upper hinge is the first quartile (the 25th percentile), the lower hinge is the third quartile (the 75th percentile), upper (lower) whisker -to the highest (lowest) value within 1.5 * IQR (Inter-Quartile Range), points are outliers, lines within boxes depict medians, numbers within boxes are means. Figure 1 depicts the following systems' results for each database: baseline approach -without any additional speaker-related information, SepG and SepE systems performing Z-transformation speaker-wise, AugG system with non-normalised speaker-related attributes within the dummy variable, and AugE approach with normalised speaker-related attributes. We chose these settings due to their highest average ranks (see Table 2 and Table 3 ).", |
| "cite_spans": [ |
| { |
| "start": 817, |
| "end": 842, |
| "text": "(Williamson et al., 1989)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 616, |
| "end": 638, |
| "text": "Table 2 and in Table 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 1233, |
| "end": 1241, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 1647, |
| "end": 1654, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 1659, |
| "end": 1666, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Numerical evaluations", |
| "sec_num": "5." |
| }, |
| { |
| "text": "Since we did not pay any attention to a significance test while performing the rank calculation, now we performed the paired Student's T-Test, comparing the proposed systems with the baseline approach for each corpus independently. Thus, the box-plots with bold outlines indicate significant difference against the baseline approach with at least p = 0.05. The speaker identification procedure has been performed in such a way, that we used the same algorithm as for the ER task. We used the SI procedure in a corpus-based manner, which means that for each corpus on each iteration of the cross-validation experiments we used exactly the same speech data and features to train both the ER and SI models. Since the results of speaker recognition have changed dramatically depending on the corpus and the algorithms used, we also presented the results of speaker recognition in Figure 2 . Next, we repeated the same experiments for gender-adaptive settings. The average ranks of the systems proposed are depicted in Table 4 and Table 5 . Figure 4 : F 1 measure of speech-based emotion recognition with ground truth age-related information (AugG and SepG), with the estimated age-related hypothesis (AugE and SepE), and without any additional information (baseline). All the experiments are 10 repetitions of 10-fold cross-validation emotion-stratified. Box-plots with bold frames indicate T-test-based significant differences against the baseline results (at least with p = 0.05).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 876, |
| "end": 884, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1014, |
| "end": 1033, |
| "text": "Table 4 and Table 5", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 1036, |
| "end": 1044, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Numerical evaluations", |
| "sec_num": "5." |
| }, |
| { |
| "text": "The results, which correspond to the systems with the highest ranks, are depicted in Figure 3 . Finally, we performed age-adaptive experiments on the LEGO corpus since it has age-related information including the following 3 classes: youth, adult and elder. Again, we selected the highest average F 1 measure among all the algorithms considered and depicted the results obtained in Figure 4 . Since only one emotional corpus has been analysed within the age-adaptivity, we did not calculate average ranks for this system.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 85, |
| "end": 93, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 382, |
| "end": 390, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Numerical evaluations", |
| "sec_num": "5." |
| }, |
| { |
| "text": "It turned out that in both cases -using actual and estimated speaker identity, the system Sep performed the best with speaker-wise normalisation (see corresponding cells in Table 2 ). Similar results were previously obtained for speech recognition, where speaker normalisation improved the performance of speech recognisers (Giuliani et al., 2006) . Regarding the Aug systems, the one-hot codding performed better than using a unique integer. This was expected due to fact that speaker ID is not numerical but a nominal value and dummy-coding allows this fact to be handled in a more proper way than with a unique integer. Moreover, nonnormalised speaker-related attributes resulted in the best performance within the AugG systems, whereas the normalised version achieved a higher F 1 measure within the AugE system (see corresponding cells in Table 3 ).", |
| "cite_spans": [ |
| { |
| "start": 324, |
| "end": 347, |
| "text": "(Giuliani et al., 2006)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 173, |
| "end": 180, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 844, |
| "end": 851, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Speaker identity", |
| "sec_num": "5.1." |
| }, |
| { |
| "text": "The proposed system using an actual SI module resulted in a significant improvement on most of the corpora with a remarkable enhancement of the F 1 measure on the AVEC corpus (49.6 SepE vs. 42.2 baseline), Ruslana (53 AugE vs. 47.3 baseline), and SAVEE (70.1 SepE vs. 63.2 baseline). The results may be even better if the SI component performs more accurately (compare E and G systems in Figure 1) . However, the performances of the Sep system dropped on Emo-DB, LEGO, and Ruslana. In the case of the Emo-DB corpus this can be explained by highly unbalanced coverage of emotions by the speakers. Thus, the speakers ID03 and ID10 have only one single utterance with the disgust label. By using 10-fold CV we ensured that this particular utterance will appear in the testing data exactly once. Let us consider this case and suppose that in a particular iteration of the CV we have the disgust recording in the testing set. When we train the model speaker-wise, then the emotional model for the speaker ID03 and ID10 has no chance to recognise it properly, since during the training phase there were not any recordings with the disgust label. Alternatively, during the baseline approach we operate with the whole training data in order to build only one single emotional model for all the speakers from the training set. It means that the algorithm is trained not only on disgust samples of ID03 or ID10 but all speakers from the training set. Therefore, on each iteration the model could operate with enough samples of all possible labels, enhancing the probability of proper recognition of a particular sample from the testing set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 388, |
| "end": 397, |
| "text": "Figure 1)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Speaker identity", |
| "sec_num": "5.1." |
| }, |
| { |
| "text": "Regarding the LEGO corpus, it was collected from the busnavigation system (see 3. Section) containing real-user requests. Each dialogue consists of from 5 to 9 system-user turns in which the speaker tried to determine an optimal bus-route from the current to the desired location within the city. We supposed that each dialogue had been initiated by a new user and therefore each speaker in the database has very few utterances. As a result, in each iteration of CV we do not have enough data to build a reasonable speaker identification model (see rather poor SI performance on the LEGO in Figure 2 ). Therefore, the performance of the SepE system is much lower than that of the SepG system. The Ruslana corpus contains 10 recordings for each emotional tag for each of 61 speakers. It means that if we perform speaker-dependent modelling for the Sep system, then on each iteration of the CV a modelling algorithm could operate at most with 10 recordings for each emotional label (in this case all the recordings of a particular label should be placed by chance in all the CV folds but not in the one which is currently used for testing) -obviously it is not enough to obtain a reasonable model which would show good generalisation ability. On another hand, the baseline approach operates with the whole set of recordings from all speakers which are in the training set. Therefore, a modelling algorithm within the baseline approach operates with more data of a particular emotional label which in turn lead to higher generalisation ability and recognition performance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 591, |
| "end": 599, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Speaker identity", |
| "sec_num": "5.1." |
| }, |
| { |
| "text": "The results of gender recognition itself were rather high and quite similar for the 4 algorithms used, having an F 1 measure on average (over 4 algorithms) of 97.2 on Emo-DB, 85.7 on LEGO, 93.1 on VAM, 97.1 on UUDB, 94.9 on RadioS, and 98.5 on the Ruslana corpus. It turned out that for the approach which used actual gender information speaker normalisation performed best, whereas for the system with the actual GR component normalisation should be performed only once for all the utterances (see the cor-responding ranks in Table 4 ). Regarding the system Aug, in both cases normalised dummy-based speaker ID encoding resulted in the highest average ranks (see the corresponding ranks in Table 5 ). The results of gender-adaptive ER are more regular, without large variability, and in the case of most corpora resulted in improvement (see Figure 3) .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 527, |
| "end": 534, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 691, |
| "end": 698, |
| "text": "Table 5", |
| "ref_id": "TABREF7" |
| }, |
| { |
| "start": 842, |
| "end": 851, |
| "text": "Figure 3)", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Gender-awareness", |
| "sec_num": "5.2." |
| }, |
| { |
| "text": "The result of age recognition itself was equal to 67.7, 70.9, 64.9, and 68.6 using SVM, LR, KNN, and MLP, respectively. However, no improvement on LEGO has been achieved by performing age-adaptiveness (see Figure 4) . We state that more sophisticated experiments with several corpora are needed.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 206, |
| "end": 215, |
| "text": "Figure 4)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Age-awareness", |
| "sec_num": "5.3." |
| }, |
| { |
| "text": "We concluded that the speaker-adaptive ER can significantly improve the performance using both approaches proposed. However, the Sep system requires balanced data and enough training material of all the target users of the ER system. Moreover, the Sep systems tend to be more sensitive to both the speaker identification error and statistical characteristics of the databases. Indeed, when the Aug systems are applied all of the utterances from the training set are used in order to train the model, whereas only the utterances of the corresponding speaker are used to build the Sep models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "6." |
| }, |
| { |
| "text": "In terms of future work, applying multi-agent emotional models can be considered by performing a simple vote or by building a meta-classifier based on individual single classifiers. In this paper we took into account only audio signals, however a dialogue might consist of visual representation, and by analysing visual cues, ER might be more successful. An additional use of advanced machine learning algorithms and contemporary feature selection methods may further improve the ER performance. Specifically, we consider using the deep learning concept to perform ER (Kim et al., 2013) , and the multi-objective genetic algorithm-based feature selection (Sidorov et al., 2015) and state-of-the-art iVector-based SI procedure to further enhance the performance of the ER systems.", |
| "cite_spans": [ |
| { |
| "start": 568, |
| "end": 586, |
| "text": "(Kim et al., 2013)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 655, |
| "end": 677, |
| "text": "(Sidorov et al., 2015)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "6." |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "A comparison of ranking methods for classification algorithm selection", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [ |
| "B" |
| ], |
| "last": "Brazdil", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Soares", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Machine Learning: ECML 2000", |
| "volume": "", |
| "issue": "", |
| "pages": "63--75", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brazdil, P. B. and Soares, C. (2000). A comparison of ranking methods for classification algorithm selec- tion. In Machine Learning: ECML 2000, pages 63-75. Springer.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Gender differences in emotional development: A review of theories and research", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [ |
| "R" |
| ], |
| "last": "Brody", |
| "suffix": "" |
| } |
| ], |
| "year": 1985, |
| "venue": "Journal of Personality", |
| "volume": "53", |
| "issue": "2", |
| "pages": "102--149", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brody, L. R. (1985). Gender differences in emotional de- velopment: A review of theories and research. Journal of Personality, 53(2):102-149.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A database of german emotional speech", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Burkhardt", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Paeschke", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Rolfes", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "F" |
| ], |
| "last": "Sendlmeier", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Weiss", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "1517--1520", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Burkhardt, F., Paeschke, A., Rolfes, M., Sendlmeier, W. F., and Weiss, B. (2005). A database of german emotional speech. In Interspeech, pages 1517-1520.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Nearest neighbor pattern classification. Information Theory", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "M" |
| ], |
| "last": "Cover", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hart", |
| "suffix": "" |
| } |
| ], |
| "year": 1967, |
| "venue": "IEEE Transactions on", |
| "volume": "13", |
| "issue": "1", |
| "pages": "21--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cover, T. M. and Hart, P. E. (1967). Nearest neighbor pat- tern classification. Information Theory, IEEE Transac- tions on, 13(1):21-27.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Statistical comparisons of classifiers over multiple data sets", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Dem\u0161ar", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "7", |
| "issue": "", |
| "pages": "1--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dem\u0161ar, J. (2006). Statistical comparisons of classifiers over multiple data sets. The Journal of Machine Learn- ing Research, 7:1-30.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Let's go lab: a platform for evaluation of spoken dialog systems with real world users", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Eskenazi", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "W" |
| ], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Raux", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Langner", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Ninth Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eskenazi, M., Black, A. W., Raux, A., and Langner, B. (2008). Let's go lab: a platform for evaluation of spoken dialog systems with real world users. In Ninth Annual Conference of the International Speech Communication Association.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Opensmile: the munich versatile and fast open-source audio feature extractor", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Eyben", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "W\u00f6llmer", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Schuller", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the international conference on Multimedia", |
| "volume": "", |
| "issue": "", |
| "pages": "1459--1462", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eyben, F., W\u00f6llmer, M., and Schuller, B. (2010). Opens- mile: the munich versatile and fast open-source audio feature extractor. In Proceedings of the international conference on Multimedia, pages 1459-1462. ACM.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Improved automatic speech recognition through speaker normalization", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Giuliani", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Gerosa", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Brugnara", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Computer Speech & Language", |
| "volume": "20", |
| "issue": "1", |
| "pages": "107--123", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Giuliani, D., Gerosa, M., and Brugnara, F. (2006). Improved automatic speech recognition through speaker normalization. Computer Speech & Language, 20(1):107-123.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "The vera am mittag german audio-visual emotional speech database", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Grimm", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Kroschel", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "IEEE International Conference on", |
| "volume": "", |
| "issue": "", |
| "pages": "865--868", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Grimm, M., Kroschel, K., and Narayanan, S. (2008). The vera am mittag german audio-visual emotional speech database. In Multimedia and Expo, 2008 IEEE Interna- tional Conference on, pages 865-868. IEEE.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Gender differences in nonverbal communication of emotion. Gender and emotion: Social psychological perspectives", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "A" |
| ], |
| "last": "Hall", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "D" |
| ], |
| "last": "Carter", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "G" |
| ], |
| "last": "Horgan", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "97--117", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hall, J. A., Carter, J. D., and Horgan, T. G. (2000). Gen- der differences in nonverbal communication of emotion. Gender and emotion: Social psychological perspectives, pages 97-117.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Machine Audition: Principles, Algorithms and Systems, chapter Multimodal Emotion Recognition", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Haq", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Jackson", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "398--423", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Haq, S. and Jackson, P., (2010). Machine Audition: Princi- ples, Algorithms and Systems, chapter Multimodal Emo- tion Recognition, pages 398-423. IGI Global, Hershey PA, Aug.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Deep learning for robust feature generation in audiovisual emotion recognition", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [ |
| "M" |
| ], |
| "last": "Provost", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference on", |
| "volume": "", |
| "issue": "", |
| "pages": "3687--3691", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kim, Y., Lee, H., and Provost, E. M. (2013). Deep learn- ing for robust feature generation in audiovisual emotion recognition. In Acoustics, Speech and Signal Process- ing (ICASSP), 2013 IEEE International Conference on, pages 3687-3691. IEEE.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Application of speaker-and language identification state-of-the-art techniques for emotion recognition", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Kockmann", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Speech Communication", |
| "volume": "53", |
| "issue": "9", |
| "pages": "1172--1185", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kockmann, M., Burget, L., et al. (2011). Application of speaker-and language identification state-of-the-art tech- niques for emotion recognition. Speech Communication, 53(9):1172-1185.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Assessing speaker independence on a speech-based depression level estimation system", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Lopez-Otero", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Docio-Fernandez", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Garcia-Mateo", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lopez-Otero, P., Docio-Fernandez, L., and Garcia-Mateo, C. (2015). Assessing speaker independence on a speech-based depression level estimation system. Pat- tern Recognition Letters.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Ruslana: A database of russian emotional utterances", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Makarova", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Petrushin", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proc. Int. Conf. Spoken Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Makarova, V. and Petrushin, V. (2002). Ruslana: A database of russian emotional utterances. In Proc. Int. Conf. Spoken Language Processing (ICSLP 2002).", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Applied logistic regression analysis", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Menard", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "106", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Menard, S. (2002). Applied logistic regression analysis, volume 106. Sage.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Constructing a spoken dialogue corpus for studying paralinguistic information in expressive conversation and analyzing its statistical/acoustic characteristics", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Mori", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Satake", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Kasuya", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Speech Communication", |
| "volume": "53", |
| "issue": "1", |
| "pages": "36--50", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mori, H., Satake, T., Nakamura, M., and Kasuya, H. (2011). Constructing a spoken dialogue corpus for studying paralinguistic information in expressive conver- sation and analyzing its statistical/acoustic characteris- tics. Speech Communication, 53(1):36-50.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Fast training of support vector machines using sequential minimal optimization", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Platt", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Advances in kernel methodssupport vector learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Platt, J. et al. (1999). Fast training of support vector ma- chines using sequential minimal optimization. Advances in kernel methodssupport vector learning, 3.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "A parameterized and annotated spoken dialog corpus of the cmu let's go bus information system", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Schmitt", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ultes", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Minker", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "3369--3373", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Schmitt, A., Ultes, S., and Minker, W. (2012). A param- eterized and annotated spoken dialog corpus of the cmu let's go bus information system. In LREC, pages 3369- 3373.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "The interspeech 2009 emotion challenge", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Schuller", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Steidl", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Batliner", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "INTERSPEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "312--315", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Schuller, B., Steidl, S., and Batliner, A. (2009a). The in- terspeech 2009 emotion challenge. In INTERSPEECH, volume 2009, pages 312-315.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Acoustic emotion recognition: A benchmark comparison of performances", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Schuller", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Vlasenko", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Eyben", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Rigoll", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Wendemuth", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Automatic Speech Recognition & Understanding", |
| "volume": "", |
| "issue": "", |
| "pages": "552--557", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Schuller, B., Vlasenko, B., Eyben, F., Rigoll, G., and Wendemuth, A. (2009b). Acoustic emotion recogni- tion: A benchmark comparison of performances. In Automatic Speech Recognition & Understanding, 2009. ASRU 2009. IEEE Workshop on, pages 552-557. IEEE.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Comparison of gender-and speaker-adaptive emotion recognition", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Sidorov", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ultes", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Schmitt", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "International Conference on Language Resources and Evaluation (LREC)", |
| "volume": "", |
| "issue": "", |
| "pages": "3476--3480", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sidorov, M., Ultes, S., and Schmitt, A. (2014a). Compari- son of gender-and speaker-adaptive emotion recognition. International Conference on Language Resources and Evaluation (LREC), pages 3476-3480.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Emotions are a personal thing: Towards speaker-adaptive emotion recognition", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Sidorov", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ultes", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Schmitt", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4803--4807", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sidorov, M., Ultes, S., and Schmitt, A. (2014b). Emotions are a personal thing: Towards speaker-adaptive emotion recognition. In Acoustics, Speech and Signal Process- ing (ICASSP), 2014 IEEE International Conference on, pages 4803-4807. IEEE.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Contemporary stochastic feature selection algorithms for speechbased emotion recognition", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Sidorov", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Brester", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Schmitt", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Annual Conference of the International Speech Communication Association (INTERSPEECH)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sidorov, M., Brester, C., and Schmitt, A. (2015). Contem- porary stochastic feature selection algorithms for speech- based emotion recognition. In Proceedings of the An- nual Conference of the International Speech Commu- nication Association (INTERSPEECH), Dresden, Ger- many, September.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Friedman and quade tests: Basic computer program to perform nonparametric two-way analysis of variance and multiple comparisons on ranks of several related samples", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Steininger", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Schiel", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Dioubina", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Raubold", |
| "suffix": "" |
| } |
| ], |
| "year": 1987, |
| "venue": "LREC Workshop on \"Multimodal Resources", |
| "volume": "17", |
| "issue": "", |
| "pages": "85--99", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steininger, S., Schiel, F., Dioubina, O., and Raubold, S. (2002). Development of user-state conventions for the multimodal corpus in smartkom. In LREC Workshop on \"Multimodal Resources\", Las Palmas, Spain. Theodorsson-Norheim, E. (1987). Friedman and quade tests: Basic computer program to perform nonparametric two-way analysis of variance and multiple comparisons on ranks of several related samples. Computers in biol- ogy and medicine, 17(2):85-99.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Avec 2013: the continuous audio/visual emotion and depression recognition challenge", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Valstar", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Schuller", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Eyben", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Bilakhia", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Schnieder", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Cowie", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Pantic", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 3rd ACM international workshop on Audio/visual emotion challenge", |
| "volume": "", |
| "issue": "", |
| "pages": "3--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Valstar, M., Schuller, B., Smith, K., Eyben, F., Jiang, B., Bilakhia, S., Schnieder, S., Cowie, R., and Pantic, M. (2013). Avec 2013: the continuous audio/visual emotion and depression recognition challenge. In Proceedings of the 3rd ACM international workshop on Audio/visual emotion challenge, pages 3-10. ACM.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Avec 2014: 3d dimensional affect and depression recognition challenge", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Valstar", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Schuller", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Almaev", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Eyben", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Krajewski", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Cowie", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Pantic", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 4th International Workshop on Audio/Visual Emotion Challenge", |
| "volume": "", |
| "issue": "", |
| "pages": "3--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Valstar, M., Schuller, B., Smith, K., Almaev, T., Eyben, F., Krajewski, J., Cowie, R., and Pantic, M. (2014). Avec 2014: 3d dimensional affect and depression recognition challenge. In Proceedings of the 4th International Work- shop on Audio/Visual Emotion Challenge, pages 3-10. ACM.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Improving automatic emotion recognition from speech via gender differentiation", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Vogt", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Andr\u00e9", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. Language Resources and Evaluation Conference (LREC 2006)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vogt, T. and Andr\u00e9, E. (2006). Improving automatic emo- tion recognition from speech via gender differentiation. In Proc. Language Resources and Evaluation Confer- ence (LREC 2006), Genoa. Citeseer.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "The box plot: a simple visual method to interpret data", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "F" |
| ], |
| "last": "Williamson", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "A" |
| ], |
| "last": "Parker", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "S" |
| ], |
| "last": "Kendrick", |
| "suffix": "" |
| } |
| ], |
| "year": 1989, |
| "venue": "Annals of internal medicine", |
| "volume": "110", |
| "issue": "11", |
| "pages": "916--921", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Williamson, D. F., Parker, R. A., and Kendrick, J. S. (1989). The box plot: a simple visual method to interpret data. Annals of internal medicine, 110(11):916-921.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Introduction to statistical learning theory and support vector machines", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Xuegong", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Acta Automatica Sinica", |
| "volume": "26", |
| "issue": "1", |
| "pages": "32--42", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xuegong, Z. (2000). Introduction to statistical learning theory and support vector machines. Acta Automatica Sinica, 26(1):32-42.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Unsupervised learning in cross-corpus acoustic emotion recognition", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Weninger", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "W\u00f6llmer", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Schuller", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Automatic Speech Recognition and Understanding (ASRU)", |
| "volume": "", |
| "issue": "", |
| "pages": "523--528", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, Z., Weninger, F., W\u00f6llmer, M., and Schuller, B. (2011). Unsupervised learning in cross-corpus acoustic emotion recognition. In Automatic Speech Recognition and Understanding (ASRU), 2011 IEEE Workshop on, pages 523-528. IEEE.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "F 1 measure of speech-based emotion recognition with ground-truth speaker-related information (AugG and SepG), with the estimated speaker-related hypothesis (AugE and SepE), and without any additional information (baseline).", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "text": "F 1 measure of speech-based speaker recognition. All the experiments are 10 repetitions of 10-fold crossvalidation emotion-stratified. Values within the graphs are average F 1 measures.", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "text": "F 1 measure of speech-based emotion recognition with ground-truth gender-related information (AugG and SepG), with the estimated gender-related hypothesis (AugE and SepE), and without any additional information (baseline). Colours show the optimal classifiers. All the experiments are 10 repetitions of 10-fold cross-validation emotion-stratified. Box-plots with bold frames indicate T-test-based significant differences against the baseline results (at least with p = 0.05).", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "text": "Databases description.", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "text": "Ranks of Sep while speaker-adaptiveness is under examination.", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td/><td/><td>AugG</td><td/></tr><tr><td/><td>Dummy</td><td>Unique</td><td/></tr><tr><td colspan=\"4\">Non-norm Norm Non-norm Norm</td></tr><tr><td>1.5</td><td>1.63</td><td>3.19</td><td>3.69</td></tr><tr><td/><td/><td>AugE</td><td/></tr><tr><td/><td>Dummy</td><td>Unique</td><td/></tr><tr><td colspan=\"4\">Non-norm Norm Non-norm Norm</td></tr><tr><td>2.06</td><td>1.81</td><td>2.56</td><td>3.56</td></tr></table>" |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "text": "Ranks of Aug while speaker-adaptiveness is under examination.", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "text": "Ranks of Sep for gender-adaptive ER.", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td/><td/><td>AugG</td><td/></tr><tr><td/><td>Dummy</td><td>Unique</td><td/></tr><tr><td colspan=\"4\">Non-norm Norm Non-norm Norm</td></tr><tr><td>2.66</td><td>1.92</td><td>3.08</td><td>2.33</td></tr><tr><td/><td/><td>AugE</td><td/></tr><tr><td/><td>Dummy</td><td>Unique</td><td/></tr><tr><td colspan=\"4\">Non-norm Norm Non-norm Norm</td></tr><tr><td>2.42</td><td>2</td><td>3.08</td><td>2.5</td></tr></table>" |
| }, |
| "TABREF7": { |
| "type_str": "table", |
| "text": "Ranks of Aug for gender-adaptive ER.", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |