| { |
| "paper_id": "N19-1037", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:00:46.408334Z" |
| }, |
| "title": "HiGRU: Hierarchical Gated Recurrent Units for Utterance-level Emotion Recognition", |
| "authors": [ |
| { |
| "first": "Wenxiang", |
| "middle": [], |
| "last": "Jiao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "HKSAR", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "wxjiao@cse.cuhk.edu.hk" |
| }, |
| { |
| "first": "Haiqin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "HKSAR", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "hqyang@ieee.org" |
| }, |
| { |
| "first": "Irwin", |
| "middle": [], |
| "last": "King", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "HKSAR", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "king@cse.cuhk.edu.hk" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "R" |
| ], |
| "last": "Lyu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "HKSAR", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "lyu@cse.cuhk.edu.hk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper, we address three challenges in utterance-level emotion recognition in dialogue systems: (1) the same word can deliver different emotions in different contexts; (2) some emotions are rarely seen in general dialogues; (3) long-range contextual information is hard to be effectively captured. We therefore propose a hierarchical Gated Recurrent Unit (HiGRU) framework with a lowerlevel GRU to model the word-level inputs and an upper-level GRU to capture the contexts of utterance-level embeddings. Moreover, we promote the framework to two variants, Hi-GRU with individual features fusion (HiGRUf) and HiGRU with self-attention and features fusion (HiGRU-sf), so that the word/utterancelevel individual inputs and the long-range contextual information can be sufficiently utilized. Experiments on three dialogue emotion datasets, IEMOCAP, Friends, and Emo-tionPush demonstrate that our proposed Hi-GRU models attain at least 8.7%, 7.5%, 6.0% improvement over the state-of-the-art methods on each dataset, respectively. Particularly, by utilizing only the textual feature in IEMO-CAP, our HiGRU models gain at least 3.8% improvement over the state-of-the-art conversational memory network (CMN) with the trimodal features of text, video, and audio. Role Utterance Emotion Rachel Oh okay, I'll fix that to. What's her email address? Neutral Ross Rachel! Anger Rachel All right, I promise. I'll fix this. I swear. I'll-I'll-I'll-I'll talk to her. Non-neutral Ross Okay! Anger Rachel Okay. Neutral Nurse This room's available. Neutral Rachel Okay! Joy Rachel Okay wait! Non-neutral Rachel You listen to me! Anger", |
| "pdf_parse": { |
| "paper_id": "N19-1037", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper, we address three challenges in utterance-level emotion recognition in dialogue systems: (1) the same word can deliver different emotions in different contexts; (2) some emotions are rarely seen in general dialogues; (3) long-range contextual information is hard to be effectively captured. We therefore propose a hierarchical Gated Recurrent Unit (HiGRU) framework with a lowerlevel GRU to model the word-level inputs and an upper-level GRU to capture the contexts of utterance-level embeddings. Moreover, we promote the framework to two variants, Hi-GRU with individual features fusion (HiGRUf) and HiGRU with self-attention and features fusion (HiGRU-sf), so that the word/utterancelevel individual inputs and the long-range contextual information can be sufficiently utilized. Experiments on three dialogue emotion datasets, IEMOCAP, Friends, and Emo-tionPush demonstrate that our proposed Hi-GRU models attain at least 8.7%, 7.5%, 6.0% improvement over the state-of-the-art methods on each dataset, respectively. Particularly, by utilizing only the textual feature in IEMO-CAP, our HiGRU models gain at least 3.8% improvement over the state-of-the-art conversational memory network (CMN) with the trimodal features of text, video, and audio. Role Utterance Emotion Rachel Oh okay, I'll fix that to. What's her email address? Neutral Ross Rachel! Anger Rachel All right, I promise. I'll fix this. I swear. I'll-I'll-I'll-I'll talk to her. Non-neutral Ross Okay! Anger Rachel Okay. Neutral Nurse This room's available. Neutral Rachel Okay! Joy Rachel Okay wait! Non-neutral Rachel You listen to me! Anger", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Emotion recognition is a significant artificial intelligence research topic due to the promising potential of developing empathetic machines for people. Emotion is a universal phenomena across different cultures and mainly consists of six basic types: anger, disgust, fear, happiness, sadness, and surprise (Ekman, 1971 (Ekman, , 1992 .", |
| "cite_spans": [ |
| { |
| "start": 307, |
| "end": 319, |
| "text": "(Ekman, 1971", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 320, |
| "end": 334, |
| "text": "(Ekman, , 1992", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we focus on textual dialogue systems because textual feature dominates the performance over audio and video features (Poria et al., 2015, 2017). In utterance-level emotion recognition, an utterance (Olson, 1977) is a unit of speech bounded by breathes or pauses and its goal is to tag each utterance in a dialogue with the indicated emotion.", |
| "cite_spans": [ |
| { |
| "start": 132, |
| "end": 146, |
| "text": "(Poria et al.,", |
| "ref_id": null |
| }, |
| { |
| "start": 213, |
| "end": 226, |
| "text": "(Olson, 1977)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this task, we address three challenges: First, the same word can deliver different emotions in different contexts. For example, in Figure 1 , the word \"okay\" can deliver three different emotions, anger, neutral, and joy, respectively. Strong emotions like joy and anger may be indicated by the symbols \"!\" or \"?\" along the word. To identify a speaker's emotion precisely, we need to explore the dialogue context sufficiently. Second, some emotions are rarely seen in general dialogues. For example, people are usually calm and present a neutral emotion while only in some particular situations, they express strong emotions, like anger or fear. Thus we need to be sensitive to the minority emotions while relieving the effect of the majority emotions. Third, the long-range contextual information is hard to be effectively captured in an utterance/dialogue, especially when the length of an utterance/dialogue in the testing set is longer than those in the training set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 134, |
| "end": 142, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To tackle these challenges, we propose a hierarchical Gated Recurrent Unit (HiGRU) framework for the utterance-level emotion recognition in dialogue systems. More specifically, HiGRU is composed by two levels of bidirectional GRUs, a lower-level GRU to model the word sequences of each utterance to produce individual utterance embeddings, and an upper-level GRU to capture the sequential and contextual relationship of utterances. We further promote the proposed Hi-GRU to two variants, HiGRU with individual features fusion (HiGRU-f), and HiGRU with selfattention and features fusion (HiGRU-sf). In HiGRU-f, the individual inputs, i.e., the word embeddings in the lower-level GRU and the individual utterance embeddings in the upper-level GRU, are concatenated with the hidden states to generate the contextual word/utterance embeddings, respectively. In HiGRU-sf, a self-attention layer is placed on the hidden states from the GRU to learn long-range contextual embeddings, which are concatenated with the original individual embeddings and the hidden states to generate the contextual word/utterance embeddings. Finally, the contextual utterance embedding is sent to a fullyconnected (FC) layer to determine the corresponding emotion. To alleviate the effect of data imbalance issue, we follow (Khosla, 2018) to train our models by minimizing a weighted categorical cross-entropy.", |
| "cite_spans": [ |
| { |
| "start": 1298, |
| "end": 1312, |
| "text": "(Khosla, 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We summarize our contributions as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We propose a HiGRU framework to better learn both the individual utterance embeddings and the contextual information of utterances, so as to recognize the emotions more precisely. \u2022 We propose two progressive HiGRU variants, HiGRU-f and HiGRU-sf, to sufficiently incorporate the individual word/utterance-level information and the long-range contextual information respectively. \u2022 We conduct extensive experiments on three textual dialogue emotion datasets, IEMO-CAP, Friends, and EmotionPush. The results demonstrate that our proposed HiGRU models achieve at least 8.7%, 7.5%, 6.0% improvement over state-of-the-art methods on each dataset, respectively. Particularly, by utilizing only the textual feature in IEMO-CAP, our proposed HiGRU models gain at least 3.8% improvement over the existing best model, conversational memory network (CMN) with not only the text feature, but also the visual, and audio features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Text-based emotion recognition is a long-standing research topic (Wilson et al., 2004; Yang et al., 2007; Medhat et al., 2014) . Nowadays, deep learning technologies have become dominant methods due to the outstanding performance. Some prominent models include recursive autoencoders (RAEs) (Socher et al., 2011) , convolutional neural networks (CNNs) (Kim, 2014) , and recurrent neural networks (RNNs) (Abdul-Mageed and Ungar, 2017) . However, these models treat texts independently thus cannot capture the inter-dependence of utterances in dialogues (Kim, 2014; Lai et al., 2015; Grave et al., 2017; Chen et al., 2016; Yang et al., 2016) . To exploit the contextual information of utterances, researchers mainly explore in two directions: (1) extracting contextual information among utterances, or (2) enriching the information embedded in the representations of words and utterances.", |
| "cite_spans": [ |
| { |
| "start": 65, |
| "end": 86, |
| "text": "(Wilson et al., 2004;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 87, |
| "end": 105, |
| "text": "Yang et al., 2007;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 106, |
| "end": 126, |
| "text": "Medhat et al., 2014)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 291, |
| "end": 312, |
| "text": "(Socher et al., 2011)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 352, |
| "end": 363, |
| "text": "(Kim, 2014)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 403, |
| "end": 433, |
| "text": "(Abdul-Mageed and Ungar, 2017)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 552, |
| "end": 563, |
| "text": "(Kim, 2014;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 564, |
| "end": 581, |
| "text": "Lai et al., 2015;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 582, |
| "end": 601, |
| "text": "Grave et al., 2017;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 602, |
| "end": 620, |
| "text": "Chen et al., 2016;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 621, |
| "end": 639, |
| "text": "Yang et al., 2016)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Contextual Information Extraction. The RNN architecture is a standard way to capture the sequential relationship of data. Poria et al. propose a bidirectional contextual long short-term memory (LSTM) network, termed bcLSTM, to model the context of textual features extracted by CNNs. Hazarika et al. improve bcLSTM by a conversational memory network (CMN) to capture the self and inter-speaker emotional influence, where GRU is utilized to model the self-influence and the attention mechanism is employed to excavate the inter-speaker emotional influence. Though CMN is reported to attain better performance than bcLSTM on IEMOCAP (Hazarika et al., 2018) , the memory network is too complicated for smallsize dialogue datasets.", |
| "cite_spans": [ |
| { |
| "start": 631, |
| "end": 654, |
| "text": "(Hazarika et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Representation Enrichment. Multimodal features have been utilized to enrich the representation of utterances (Poria et al., 2015 (Poria et al., , 2017 . Previous work indicate that textual features dominate the performance of recognizing emotions in contrast to visual or audio features (Poria et al., 2015 (Poria et al., , 2017 . Recently, the textual features are mainly extracted by CNNs to learn individual utterance embeddings (Poria et al., 2015 (Poria et al., , 2017 Zahiri and Choi, 2018; Hazarika et al., 2018) . However, CNNs do not capture the contextual information within each utterance well. On the other hand, hierarchical RNNs have been proposed and demonstrated good performance in", |
| "cite_spans": [ |
| { |
| "start": 109, |
| "end": 128, |
| "text": "(Poria et al., 2015", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 129, |
| "end": 150, |
| "text": "(Poria et al., , 2017", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 287, |
| "end": 306, |
| "text": "(Poria et al., 2015", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 307, |
| "end": 328, |
| "text": "(Poria et al., , 2017", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 432, |
| "end": 451, |
| "text": "(Poria et al., 2015", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 452, |
| "end": 473, |
| "text": "(Poria et al., , 2017", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 474, |
| "end": 496, |
| "text": "Zahiri and Choi, 2018;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 497, |
| "end": 519, |
| "text": "Hazarika et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "GRU \" # $ % GRU GRU GRU GRU \u210e % \u210e \" \u210e # \u210e $ \u210e % \u210e \" \u210e # \u210e $ ( ) ) Fusion + ( ) ) ( -) Max-pooling Individual Utterance Embedding . / GRU GRU \u210e . / \u210e . / \u2026 \u2026 \u2026 \u2026 Attention \u210e % 0 \u2026 \" # $ % ( -) + ( -) Fully-connected Contextual Utterance Embedding 1 2 Attention Softmax \u210e % 3 \u210e % \u210e % GRU GRU GRU GRU GRU % \" # \u210e $ % \" # $ GRU GRU 1 2 1 2 \u2026 \u2026 \u2026 % 0 \u2026 % 3 % % Fusion \u2026 \u2026 Figure 2:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The architecture of our proposed HiGRU-sf. \"Attention\" denotes self-attention. By removing the \"Attention\" layer, we attain HiGRU-f, and by further removing the \"Fusion\" layer, we can recover the vanilla HiGRU.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "conventional text classification task (Tang et al., 2015) , dialogue act classification (Liu et al., 2017; Kumar et al., 2018) , and speaker change detection (Meng et al., 2017) . But they are not well explored in the task of utterance-level emotion recognition in dialogue systems.", |
| "cite_spans": [ |
| { |
| "start": 38, |
| "end": 57, |
| "text": "(Tang et al., 2015)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 88, |
| "end": 106, |
| "text": "(Liu et al., 2017;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 107, |
| "end": 126, |
| "text": "Kumar et al., 2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 158, |
| "end": 177, |
| "text": "(Meng et al., 2017)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The task of utterance-level emotion recognition is defined as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Definition 1 (Utterance-level Emotion Recogni- tion).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Suppose we are given a set of dialogues,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "D = {D i } L i=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": ", where L is the number of dialogues. In each dialogue,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "D i = {(u j , s j , c j )} N i j=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": ", is a sequence of N i utterances, where the utterance u j is spoken by the speaker s j \u2208 S with a certain emotion c j \u2208 C. All speakers compose the set S and the set C consists of all emotions, such as anger, joy, sadness, and neutral. Our goal is to train a model M to tag each new utterance with an emotion label from C as accurately as possible.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "To solve this task, we propose a hierarchical Gated Recurrent Units (HiGRU) framework and extend two progressive variants, HiGRU with individual features fusion (HiGRU-f) and HiGRU with self-attention and features fusion (HiGRU-sf) (illustrated in Figure 2 ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 248, |
| "end": 256, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The vanilla HiGRU consists of two-level GRUs: the lower-level bidirectional GRU is to learn the individual utterance embedding by modeling the word sequence within an utterance and the upperlevel bidirectional GRU is to learn the contextual utterance embedding by modeling the utterance sequence within a dialogue.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Individual Utterance Embedding. For the j th utterance in D i , u j = {w k } M j k=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": ", where M j is the number of words in the utterance u j . The corresponding sequence of individual word embeddings", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "{e(w k )} M j k=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "are fed into the lower-level bidirectional GRU to learn the individual utterance embedding in two opposite directions:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2212 \u2192 h k = GRU(e(w k ), \u2212\u2212\u2192 h k\u22121 ), (1) \u2190 \u2212 h k = GRU(e(w k ), \u2190\u2212\u2212 h k+1 ).", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The two hidden states", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2212 \u2192 h k and \u2190 \u2212 h k are concatenated into hs = [ \u2212 \u2192 h k ; \u2190 \u2212", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "h k ] to produce the contextual word embedding for w k via the tanh activation function on a linear transformation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "e c (w k ) = tanh(W w \u2022 hs + b w ),", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where W w \u2208 R d 1 \u00d72d 1 and b w \u2208 R d 1 are the model parameters, d 0 and d 1 are the dimensions of word embeddings and the hidden states of the lower-level GRU, respectively. The individual utterance embedding is then obtained by max-pooling on the contextual word embeddings within the utterance:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "e(u j ) = maxpool {e c (w k )} M j k=1 .", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Contextual Utterance Embedding. For the i th dialogue,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "D i = {(u j , s j , c j )} N i j=1 , the learned indi- vidual utterance embeddings, {e(u j )} N i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "j=1 , are fed into the upper-level bidirectional GRU to capture the sequential and contextual relationship of utterances in a dialogue:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2212 \u2192 H j = GRU(e(u j ), \u2212 \u2212\u2212 \u2192 H j\u22121 ), (5) \u2190 \u2212 H j = GRU(e(u j ), \u2190 \u2212\u2212 \u2212 H j+1 ).", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Here, the hidden states of the upper-level GRU are represented by H j \u2208 R d 2 , to distinguish from those learned in the lower-level GRU denoted by h k . Accordingly, we can obtain the contextual utterance embedding by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "e c (u j ) = tanh(W u \u2022 Hs + b u ),", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Hs = [ \u2212 \u2192 H j ; \u2190 \u2212 H j ], W u \u2208 R d 2 \u00d72d 2 and b u \u2208 R d 2 are the model parameters, d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "2 is the dimension of the hidden states in the upper-level GRU. Since the emotions are recognized at utterance-level, the learned contextual utterance embedding e c (u j ) is directly fed to a FC layer followed by a softmax function to determine the corresponding emotion label:\u0177", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "j = softmax(W f c \u2022 e c (u j ) + b f c ),", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where\u0177 j is the predicted vector over all emotions, and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "W f c \u2208 R |C|\u00d7d 2 , b f c \u2208 R |C| .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU: Hierarchical GRU", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The vanilla HiGRU contains two main issues: (1) the individual word/utterance embeddings are diluted with the stacking of layers;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU-f: HiGRU + Individual Features Fusion", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "(2) the upperlevel GRU tends to gather more contextual information from the majority emotions, which deteriorates the overall model performance. To resolve these two problems, we propose to fuse individual word/utterance embeddings with the hidden states from GRUs so as to strengthen the information of each word/utterance in its contextual embedding. This variant is named as HiGRU-f, representing HiGRU with individual features fusion. Hence, the lower-level GRU can maintain individual word embeddings and the upper-level GRU can relieve the effect of majority emotions and attain a more precise utterance representation for different emotions. Specifically, ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HiGRU-f: HiGRU + Individual Features Fusion", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": ". . the contextual embeddings are updated as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "\u210e 1 \u210e 2 \u210e 3 . . . \u210e Copy \u00d7 Softmax Matmul \u210e 1 \u210e 2 \u210e 3 . . . \u210e \u210e 1 \u210e 2 \u210e 3 . . . \u210e", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "e c (w k ) = tanh(W w \u2022 hs f + b w ),", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "e c (u", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "j ) = tanh(W u \u2022 Hs f + b u ),", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "W w \u2208 R d 1 \u00d7(d 0 +2d 1 ) , W u \u2208 R d 2 \u00d7(d 1 +2d 2 ) , hs f = [ \u2212 \u2192 h k ; e(w k ); \u2190 \u2212 h k ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": ", and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "Hs f = [ \u2212 \u2192 H j ; e(u j ); \u2190 \u2212 H j ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "3.3 HiGRU-sf: HiGRU + Self-Attention and Feature Fusion", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "Another challenging issue is to extract the contextual information of long sequences, especially the sequences in the testing set that are longer than those in the training set . To fully utilize the global contextual information, we place a self-attention layer upon the hidden states of HiGRU and fuse the attention outputs with the individual word/utterance embeddings and the hidden states to learn the contextual word/utterance embeddings. Hence, this variant is termed HiGRU-sf, representing HiGRU with selfattention and features fusion. Particularly, we apply self-attention upon the forward and backward hidden states separately to produce the left context embedding, h l k (H l j ), and the right context embedding, h r k (H r j ), respectively. This allows us to gather the unique global contextual information at the current step in two opposite directions and yield the corresponding contextual embeddings computed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "e c (w k ) = tanh(W w \u2022 hs sf + b w ),", |
| "eq_num": "(11)" |
| } |
| ], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "e c (u", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "j ) = tanh(W u \u2022 Hs sf + b u ),", |
| "eq_num": "(12)" |
| } |
| ], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "W w \u2208 R d 1 \u00d7(d 0 +4d 1 ) , W u \u2208 R d 2 \u00d7(d 1 +4d 2 ) , hs sf = [h l k ; \u2212 \u2192 h k ; e(w k ); \u2190 \u2212 h k ; h r k ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": ", and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "Hs sf = [H l j ; \u2212 \u2192 H j ; e(u j ); \u2190 \u2212 H j ; H r j ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": ". Self-Attention (SA). The self-attention mechanism is an effective non-recurrent architecture to compute the relation between one input to all other inputs and has been successfully applied in various natural language processing applications such as reading comprehension (Hu et al., 2018) , and neural machine translation (Vaswani et al., 2017) . Figure 3 shows the dot-product SA over the forward hidden states of GRU to learn the left context h l k . Each element in the attention matrix is computed by", |
| "cite_spans": [ |
| { |
| "start": 273, |
| "end": 290, |
| "text": "(Hu et al., 2018)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 324, |
| "end": 346, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 349, |
| "end": 357, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "f ( \u2212 \u2192 h k , \u2212 \u2192 h p ) = \u2212 \u2192 h k \u2212 \u2192 h p , if k, p \u2264 M j , \u2212\u221e,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "otherwise.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "An attention mask is then applied to waive the inner attention between the sequence inputs and paddings. At each step, the corresponding left context h l k is then computed by the weighted sum of all the forward hidden states:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h l k = M j p=1 a kp \u2212 \u2192 h p , a kp = exp(f ( \u2212 \u2192 h k , \u2212 \u2192 hp)) M j p =1 exp f ( \u2212 \u2192 h k , \u2212 \u2192 h p ) ,", |
| "eq_num": "(14)" |
| } |
| ], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "where a kp is the weight of \u2212 \u2192 h p to be included in h l k . The right context h r k can be computed similarly.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": ".", |
| "sec_num": null |
| }, |
| { |
| "text": "Following (Khosla, 2018) which attains the best performance in the EmotionX shared task (Hsu and Ku, 2018), we minimize a weighted categorical cross-entropy on each utterance of all dialogues to optimize the model parameters:", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 24, |
| "text": "(Khosla, 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "loss = \u2212 1 L i=1 N i L i=1 N i j=1 \u03c9(c j ) |C| c=1 y c j log 2 (\u0177 c j ),", |
| "eq_num": "(15)" |
| } |
| ], |
| "section": "Model Training", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where y j is the original one-hot vector of the emotion labels, and y c j and\u0177 c j are the elements of y j and\u0177 j corresponding to the class c.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Similar to (Khosla, 2018) , we assign the loss weight \u03c9(c j ) inversely proportional to the number of training utterances in the class c j , denoted by I c , i.e., assigning larger loss weights for the minority classes to relieve the data imbalance issue. The difference is that we add a constant \u03b1 to adjust the smoothness of the distribution. Then, we have:", |
| "cite_spans": [ |
| { |
| "start": 11, |
| "end": 25, |
| "text": "(Khosla, 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "1 \u03c9(c) = I \u03b1 c |C| c =1 I \u03b1 c .", |
| "eq_num": "(16)" |
| } |
| ], |
| "section": "Model Training", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We conduct systematical experiments to demonstrate the advantages of our proposed HiGRU models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The experiments are carried out on three textual dialogue emotion datasets (see the statistics in Table 1) :", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 98, |
| "end": 106, |
| "text": "Table 1)", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "IEMOCAP 1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "It contains approximately 12 hours of audiovisual data, including video, speech, motion capture of face, text transcriptions. Following (Poria et al., 2017; Hazarika et al., 2018) : 1We apply the first four sessions for training and the last session for test;", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 156, |
| "text": "(Poria et al., 2017;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 157, |
| "end": 179, |
| "text": "Hazarika et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "(2) The validation set is extracted from the shuffled training set with the ratio of 80:20; (3) We only evaluate the performance on four emotions: anger, happiness, sadness, neutral, and remove the rest utterances.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Friends 2 . The dataset is annotated from the Friends TV Scripts (Hsu and Ku, 2018), where each dialogue in the dataset consists of a scene of multiple speakers. Totally, there are 1,000 dialogues, which are split into 720, 80, and 200 dialogues for training, validation, and testing, respectively. Each utterance in a dialogue is labeled by one of the eight emotions: anger, joy, sadness, neutral, surprise, disgust, fear, and non-neutral.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EmotionPush 3 . The dataset consists of private conversations between friends on the Facebook messenger collected by an App called Emotion-Push, which is released for the EmotionX shared task (Hsu and Ku, 2018). Totally, there are 1,000 dialogues, which are split into 720, 80, 200 dialogue for training, validation, and testing, respectively. All the utterances are categorized into one of the eight emotions as in the Friends dataset. Following the setup of (Hsu and Ku, 2018), in Friends and EmotionPush, we only evaluate the model performance on four emotions: anger, joy, sadness, and neutral, and we exclude the contribution of the rest emotion classes during training by setting their loss weights to zero.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Data Preprocessing. We preprocess the datasets by the following steps: (1) The utterances are split into tokens with each word being made into the lowercase; (2) All non-alphanumerics except \"?\" and \"!\" are removed because these two symbols usually exhibit strong emotions, such as surprise, joy and anger; 3We build a dictionary based on the words and symbols extracted, and follow (Poria et al., 2017) to represent the tokens by the publicly available 300-dimensional word2vec 4 vectors trained on 100 billion words from Google News. The tokens not included in the word2vec dictionary are initialized by randomly-generated vectors.", |
| "cite_spans": [ |
| { |
| "start": 383, |
| "end": 403, |
| "text": "(Poria et al., 2017)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "To conduct fair comparison, we adopt two metrics as (Hsu and Ku, 2018), the weighted accuracy (WA) and unweighted accuracy (UWA):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metrics", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "WA = |C| c=1 p c \u2022 a c , UWA = 1 |C| |C| c=1 a c ,", |
| "eq_num": "(17)" |
| } |
| ], |
| "section": "Evaluation Metrics", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where p c is the percentage of the class c in the testing set, and a c is the corresponding accuracy. Generally, recognizing strong emotions may provide more value than detecting the neutral emotion (Hsu and Ku, 2018). Thus, in Friends and EmotionPush, UWA is a more favorite evaluation metric because WA is heavily compromised with the large proportion of the neutral emotion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metrics", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Our proposed vanilla HiGRU, HiGRU-f, and HiGRU-sf 5 are compared with the following stateof-the-art baselines:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Compared Methods", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "bcLSTM (Poria et al., 2017) : a bidirectional contextual LSTM with multimodal features extracted by CNNs;", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 27, |
| "text": "(Poria et al., 2017)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Compared Methods", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "CMN (Hazarika et al., 2018) : a conversational memory network with multimodal features extracted by CNNs;", |
| "cite_spans": [ |
| { |
| "start": 4, |
| "end": 27, |
| "text": "(Hazarika et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Compared Methods", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "SA-BiLSTM (Luo et al., 2018) : a self-attentive bidirectional LSTM model, a neat model achieving the second place of EmotionX Challenge (Hsu and Ku, 2018); CNN-DCNN (Khosla, 2018) : a convolutionaldeconvolutional autoencoder with more handmade features, the winner of EmotionX Challenge (Hsu and Ku, 2018); bcLSTM * and bcGRU: our implemented bcLSTM and bcGRU with the weighted loss on the textual feature extracted from CNNs.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 28, |
| "text": "(Luo et al., 2018)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 165, |
| "end": 179, |
| "text": "(Khosla, 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Compared Methods", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "All our implementations are coded on the Pytorch framework. To prevent the models fitting the order of data, we randomly shuffle the training set at the beginning of every epoch.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Procedure", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Parameters. For bcLSTM * and bcGRU, the CNN layer follows the setup of (Kim, 2014) , i.e., consisting of the kernels of 3, 4, and 5 with 100 feature maps each. The convolution results of each kernel are fed to a max-over-time pooling operation. The dimension of the hidden states of the upper-level bidirectional LSTM or GRU is set to 300. For HiGRU, HiGRU-f, and HiGRU-sf, the dimensions of hidden states are set to 300 for both levels. The final FC layer contains two sub-layers with 100 neurons each.", |
| "cite_spans": [ |
| { |
| "start": 71, |
| "end": 82, |
| "text": "(Kim, 2014)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Procedure", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Training. We adopt Adam (Kingma and Ba, 2014) as the optimizer and set an initial learning rate, 1 \u00d7 10 \u22124 for IEMOCAP and 2.5 \u00d7 10 \u22124 for Friends and EmotionPush, respectively. An annealing strategy is utilized by decaying the learning rate by half every 20 epochs. Early stopping with a patience of 10 is adopted to terminate training based on the accuracy of the validation set. Specifically, following the best models on each dataset, the parameters are tuned to optimize WA on the validation set of IEMOCAP and to optimize UWA on the validation set of Friends and Emo-tionPush, respectively. Gradient clipping with a norm of 5 is applied to model parameters. To prevent overfitting, dropout with a rate of 0.5 is applied after the contextual word/utterance embeddings, and the FC layer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Procedure", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Loss weights. For Friends and EmotionPush, as mentioned in Section 4.1, the loss weights are set to zero except the four considered emotions, to ignore the others during training. Besides, the power rate \u03b1 of loss weights is tested from 0 to 1.5 with 75.41 91.64 79.79 70.74 80.6(0.5) 79.4(0.5) HiGRU-f (T) 76.69 88.91 80.25 75.92 81.5(0.7) 80.4(0.5) HiGRU-sf (T) 74.78 89.65 80.50 77.58 82.1(0.4) 80.6(0.2) Table 2 : Experimental results on IEMOCAP. \"(Feat)\" represents the features used in the models, where T, V, and A denote the textual, visual, and audio features, respectively. The results of bcLSTM and CMN are from (Poria et al., 2017) and (Hazarika et al., 2018) , respectively. The underlined results are derived by us accordingly, while \"-\" means the results are unavailable from the original paper. a step of 0.25, and we use the best one for each model and dataset. Table 2 and Table 3 report the average results of 10 trials each on the three datasets, where the standard deviations of WA and UWA are recorded by the subscripts in round brackets. The results of bcLSTM, CMN, SA-BiLSTM, and CNN-DCNN are copied directly from the original papers for a fair comparison because we follow the same configuration for the corresponding datasets. From the results, we have the following observations:", |
| "cite_spans": [ |
| { |
| "start": 623, |
| "end": 643, |
| "text": "(Poria et al., 2017)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 648, |
| "end": 671, |
| "text": "(Hazarika et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 408, |
| "end": 415, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 879, |
| "end": 898, |
| "text": "Table 2 and Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training Procedure", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "(1) Baselines. Our implemented bcLSTM * and bcGRU, attain comparable performance with the state-of-the-art methods on all three datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Main Results", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "From the results on IEMOCAP in Table 2 , we observe that: (a) By utilizing the textual feature only, bcGRU outperforms bcLSTM and CMN trained on the textual feature significantly, attaining +3.3 and +2.8 gain in terms of WA, respectively. bcLSTM * performs better than bcGRU, and even beats bcLSTM and CMN with the trimodal features in terms of WA. In terms of UWA, CMN performs better than bcLSTM * only when it is equipped with multimodal features. (b) By examining the detailed accuracy in each emotion, bcLSTM * and bcGRU with the textual feature attain much higher accuracy on the neutral emotion than bcLSTM with the only textual feature while maintaining good performance on the other three emotions. The results show that the weighted loss function benefits the training of models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 31, |
| "end": 38, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Main Results", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "From the results on Friends and EmotionPush in Table 3 , we observe that bcLSTM * and bc-GRU trained on the same dataset (F+E) of CNN-DCNN perform better than CNN-DCNN on Emo-tionPush while attaining comparable performance with CNN-DCNN on Friends. The results show that by utilizing the contextual information with the weighted loss function, bcLSTM * and bcGRU can beat the state-of-the-art method.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 47, |
| "end": 54, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Main Results", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "(2) HiGRUs vs. Baselines. Our proposed Hi-GRUs outperform the state-of-the-art methods with significant margins on all the datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Main Results", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "From Table 2 , we observe that: (a) CMN with the trimodal features attains the best performance on the anger emotion while our vanilla HiGRU achieves the best performance on the happiness emotion and gains further improvement on sadness and neutral emotions over CMN. Overall, the vanilla HiGRU achieves at least 8.7% and 3.8% improvement over CMN with the textual feature and the trimodal features in terms of WA, respectively. The results, including those of bcLSTM * and bcGRU, indicate that GRU learns better representations of utterances than CNN in this task. (b) The two variants, HiGRU-f and HiGRU-sf, can further attain +0.9 and +1.5 improvement over Hi-GRU in terms of WA and +1.0 and +1.2 improvement over HiGRU in terms of UWA, respectively. The results demonstrate that the included individual word/utterance-level features and long-range contextual information in HiGRU-f and HiGRUsf, are indeed capable of boosting the performance of the vanilla HiGRU.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 5, |
| "end": 12, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Main Results", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "From Table 3 , we can see that: (a) In terms of UWA, HiGRU trained and tested on individual sets of Friends and EmotionPush gains at least 7.5% and 6.0% improvement over CNN-DCNN, respectively. Overall, our proposed Hi-GRU achieves well-balanced performance for the four tested emotions, especially attaining significant better performance on the minority emotions of anger and sadness. (b) Moreover, HiGRU-f and HiGRU-sf further improve HiGRU +1.2 accuracy and +1.7 accuracy on Friends and +0.6 accuracy and +1.8 accuracy on EmotionPush in terms of UWA, respectively. The results again demonstrate the superior power of HiGRU-f and HiGRU-sf.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 5, |
| "end": 12, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Main Results", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "(3) Mixing Training Sets. By examining the results from the last ten rows in Table 3 , we conclude that it does not necessarily improve the performance by mixing the two sets of training data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 77, |
| "end": 84, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Main Results", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "Though the best performance of SA-BiLSTM (Luo et al., 2018) and (Khosla, 2018) , respectively. d1 bcGRU HiGRU HiGRU-f HiGRU-sf -65.6(1.2) ---300 -67.2(0.6) 68.4(1.0) 68.9(1.5) 200 -67.6(2.0) 68.9(0.9) 69.1(1.3) 150 -67.6(1.5) 68.5(1.3) 68.9(1.2) 100 -67.5(1.7) 68.4(1.3) 69.6(1.0) and CNN-DCNN is obtained by training on the mixed dataset, the testing results show that our implemented bcLSTM * , bcGRU and our proposed HiGRU models can attain better performance on EmotionPush but yield worse performance on Friends in terms of UWA. By examining the detailed emotions, we speculate that: EmotionPush is a highly imbalanced dataset with over 60% of utterances in the neutral emotion. Introducing EmotionPush into a more balanced dataset, Friends, is equivalent to down-sampling the minority emotions in Friends. This hurts the performance on the minority emotions, anger and sadness. Meanwhile, introducing Friends into EmotionPush corresponds to upsampling the minority emotions in EmotionPush. The performance of the sadness emotion is significantly boosted and that on the anger emotion is at least unaffected.", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 59, |
| "text": "(Luo et al., 2018)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 64, |
| "end": 78, |
| "text": "(Khosla, 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Main Results", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "Model Size. We study how the scale of the utterance encoder affects the performance of our proposed models, especially when our models contain a similar number of parameters as the baseline, say bcGRU. Such a fair condition can be made be-tween our HiGRU-sf and bcGRU if we set d 1 to 150. From the testing results on Friends in Table 4 , we can observe that: (1) Under the fair condition, the performance of our HiGRU-sf is not degraded compared to that when d 1 = 300. HiGRU-sf still outperforms bcGRU by a significant margin.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 329, |
| "end": 336, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussions", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "(2) Overall, no matter d 1 is larger or smaller than 150, HiGRU-sf maintains consistently good performance and the difference between HiGRU-sf and HiGRU-f or HiGRU keeps noticeable. These results further demonstrate the superiority of our proposed models over the baseline bcGRU and the motivation of developing the two variants based on the vanilla HiGRU.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussions", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "Successful Cases. We investigate three scenes related to the word \"okay\" that expresses three distinct emotions. The first two scenes come from the testing set of Friends and the third one from that of IEMOCAP. We report the predictions made by bcGUR and our HiGRU-sf, respectively, in Table 5 . In Scene-1, \"okay\" with period usually exhibits little emotion and both bcGRU and HiGRU-sf correctly classify it as \"Neu\". In Scene-2, \"okay\" with \"!\" expresses strong emotion. However, bcGRU misclassifies it to \"Ang\" while HiGRU-sf successfully recognizes it as \"Joy\". Actually, the mistake can be traced back to the first utterance of this scene which is also misclassified as \"Ang\". This indicates that bcGRU tends to capture the wrong atmosphere within the dialogue. As for Scene-3, \"okay\" with period now indicates \"Sad\" and is correctly recognized by HiGRU-sf but misclassified as \"Neu\" by bcGRU. Note that HiGRU-sf also classifies the third utterance in Scene-3 as \"Sad\" which seems to be conflicting", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 286, |
| "end": 293, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussions", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": ",1 ,2 ,3 . . . , 1,1 1,2 1,3 . . . 1, 2,1 2,2 2,3 . . . 2, 3,1 3,2 3,3 . . . 3, . . .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://sail.usc.edu/iemocap/ 2 http://doraemon.iis.sinica.edu.tw/ emotionlines 3 http://doraemon.iis.sinica.edu.tw/ emotionlines", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://code.google.com/archive/p/ word2vec/ 5 https://github.com/wxjiao/HiGRUs", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work is supported by the Research Grants Council of the Hong Kong Special Administrative Region, China (No. CUHK 14208815 and No. CUHK 14210717 of the General Research Fund, and Project No. UGC/IDS14/16), and Meitu (No. 7010445). We thank the three anonymous reviewers for the insightful suggestions on various aspects of this work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| }, |
| { |
| "text": "Utterance Truth bcGRU HiGRU-sf Scene-1 Phoebe Okay. Oh but don't tell themMonica's pregnant because they frown on that. to the ground truth. In fact, our HiGRU-sf captures the blues of this parting situation, where the true label \"Hap\" may not be that suitable. These results show that our HiGRU-sf learns from both each utterance and the context, and can make correct predictions of the emotion of each utterance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Role", |
| "sec_num": null |
| }, |
| { |
| "text": "Failed Cases. At last, we show some examples that both bcGRU and our HiGRU-sf fail in recognizing the right emotions in Table 6 , i.e., Scene-4 from Friends and Scene-5 from EmotionPush. In Scene-4, both bcGRU and HiGRU-sf make wrong predictions for the fifth and the sixth utterances.It should be good news that Ross has his paper published and Rachel is glad to see related reports about it. However, the transcripts do not reveal very strong emotions compared to what the characters might act in the TV show. This kind of scenes may be addressed by incorporating some other features like audio and video. As for Scene-5, the third and the fifth utterances are classified into wrong emotions. Notice that the emotions indicated from the two utterances are very subtle even for humans. The Speaker-2 did not plan to get up today, but Speaker-1 kept him/her up and it ended up with a really lax day. So, the Speaker-2 feels joyful now. This indicates that even taking into the context into account, the models' capability of understanding subtle emotions is still limited and more exploration is required.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 120, |
| "end": 127, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Neu", |
| "sec_num": null |
| }, |
| { |
| "text": "We propose a hierarchical Gated Recurrent Unit (HiGRU) framework to tackle the utterance-level emotion recognition in dialogue systems, where the individual utterance embeddings are learned by the lower-level GRU and the contexts of utterances are captured by the upper-level GRU. We promote the HiGRU framework to two variants, HiGRU-f, and HiGRU-sf, and effectively capture the word/utterance-level inputs and the long-range contextual information, respectively. Experimental results demonstrate that our proposed HiGRU models can well handle the data imbalance issue and sufficiently capture the available text information, yielding significant performance boosting on all three tested datasets. In the future, we plan to explore semi-supervised learning methods to address the problem of data scarcity in this task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Emonet: Fine-grained emotion detection with gated recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Muhammad", |
| "middle": [], |
| "last": "Abdul", |
| "suffix": "" |
| }, |
| { |
| "first": "-Mageed", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Lyle", |
| "middle": [ |
| "H" |
| ], |
| "last": "Ungar", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "718--728", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muhammad Abdul-Mageed and Lyle H. Ungar. 2017. Emonet: Fine-grained emotion detection with gated recurrent neural networks. In ACL, pages 718-728.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2014. Neural machine translation by jointly learning to align and translate. CoRR, abs/1409.0473.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Neural sentiment classification with user and product attention", |
| "authors": [ |
| { |
| "first": "Huimin", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Cunchao", |
| "middle": [], |
| "last": "Tu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yankai", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1650--1659", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Huimin Chen, Maosong Sun, Cunchao Tu, Yankai Lin, and Zhiyuan Liu. 2016. Neural sentiment classifi- cation with user and product attention. In EMNLP, pages 1650-1659.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Learning phrase representations using RNN encoder-decoder for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Van Merrienboer", |
| "suffix": "" |
| }, |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Aglar G\u00fcl\u00e7ehre", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1724--1734", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyunghyun Cho, Bart van Merrienboer, \u00c7 aglar G\u00fcl\u00e7ehre, Dzmitry Bahdanau, Fethi Bougares, Hol- ger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using RNN encoder-decoder for statistical machine translation. In EMNLP, pages 1724-1734.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Universal and cultural differences in facial expressions of emotion", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Ekman", |
| "suffix": "" |
| } |
| ], |
| "year": 1971, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul Ekman. 1971. Universal and cultural differences in facial expressions of emotion. Lincoln: Univer- sity of Nebraska Press.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Are there basic emotions? Psychological Review", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Ekman", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "", |
| "volume": "99", |
| "issue": "", |
| "pages": "550--553", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul Ekman. 1992. Are there basic emotions? Psycho- logical Review, 99(3):550-553.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Bag of tricks for efficient text classification", |
| "authors": [ |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "427--431", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edouard Grave, Tomas Mikolov, Armand Joulin, and Piotr Bojanowski. 2017. Bag of tricks for efficient text classification. In EACL, pages 427-431.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Conversational memory network for emotion recognition in dyadic dialogue videos", |
| "authors": [ |
| { |
| "first": "Devamanyu", |
| "middle": [], |
| "last": "Hazarika", |
| "suffix": "" |
| }, |
| { |
| "first": "Soujanya", |
| "middle": [], |
| "last": "Poria", |
| "suffix": "" |
| }, |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Zadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Erik", |
| "middle": [], |
| "last": "Cambria", |
| "suffix": "" |
| }, |
| { |
| "first": "Louis-Philippe", |
| "middle": [], |
| "last": "Morency", |
| "suffix": "" |
| }, |
| { |
| "first": "Roger", |
| "middle": [], |
| "last": "Zimmermann", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "2122--2132", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Devamanyu Hazarika, Soujanya Poria, Amir Zadeh, Erik Cambria, Louis-Philippe Morency, and Roger Zimmermann. 2018. Conversational memory net- work for emotion recognition in dyadic dialogue videos. In NAACL-HLT, pages 2122-2132.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Socialnlp 2018 emotionx challenge overview: Recognizing emotions in dialogues", |
| "authors": [ |
| { |
| "first": "Chun", |
| "middle": [], |
| "last": "Chao", |
| "suffix": "" |
| }, |
| { |
| "first": "Lun-Wei", |
| "middle": [], |
| "last": "Hsu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ku", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "SocialNLP@ACL'18", |
| "volume": "", |
| "issue": "", |
| "pages": "27--31", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chao-Chun Hsu and Lun-Wei Ku. 2018. Socialnlp 2018 emotionx challenge overview: Recognizing emotions in dialogues. In SocialNLP@ACL'18, pages 27-31.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Reinforced mnemonic reader for machine reading comprehension", |
| "authors": [ |
| { |
| "first": "Minghao", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuxing", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xipeng", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "4099--4106", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minghao Hu, Yuxing Peng, Zhen Huang, Xipeng Qiu, Furu Wei, and Ming Zhou. 2018. Reinforced mnemonic reader for machine reading comprehen- sion. In IJCAI, pages 4099-4106.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Emotionx-ar: CNN-DCNN autoencoder based emotion classifier", |
| "authors": [ |
| { |
| "first": "Sopan", |
| "middle": [], |
| "last": "Khosla", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "So-cialNLP@ACL'18", |
| "volume": "", |
| "issue": "", |
| "pages": "37--44", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sopan Khosla. 2018. Emotionx-ar: CNN-DCNN autoencoder based emotion classifier. In So- cialNLP@ACL'18, pages 37-44.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Convolutional neural networks for sentence classification", |
| "authors": [ |
| { |
| "first": "Yoon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1746--1751", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoon Kim. 2014. Convolutional neural networks for sentence classification. In EMNLP, pages 1746- 1751.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. CoRR, abs/1412.6980.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Dialogue act sequence labeling using hierarchical encoder with CRF", |
| "authors": [ |
| { |
| "first": "Harshit", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Arvind", |
| "middle": [], |
| "last": "Agarwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Riddhiman", |
| "middle": [], |
| "last": "Dasgupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Sachindra", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Harshit Kumar, Arvind Agarwal, Riddhiman Dasgupta, and Sachindra Joshi. 2018. Dialogue act sequence labeling using hierarchical encoder with CRF. In AAAI.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Recurrent convolutional neural networks for text classification", |
| "authors": [ |
| { |
| "first": "Siwei", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Liheng", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "2267--2273", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siwei Lai, Liheng Xu, Kang Liu, and Jun Zhao. 2015. Recurrent convolutional neural networks for text classification. In AAAI, pages 2267-2273.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Using context information for dialog act classification in DNN framework", |
| "authors": [ |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kun", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhao", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yun", |
| "middle": [], |
| "last": "Lei", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "2170--2178", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yang Liu, Kun Han, Zhao Tan, and Yun Lei. 2017. Us- ing context information for dialog act classification in DNN framework. In EMNLP, pages 2170-2178.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Emotionx-dlc: Self-attentive BiLSTM for detecting sequential emotions in dialogues", |
| "authors": [ |
| { |
| "first": "Linkai", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| }, |
| { |
| "first": "Haiqing", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Francis", |
| "middle": [ |
| "Y L" |
| ], |
| "last": "Chin", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "So-cialNLP@ACL'18", |
| "volume": "", |
| "issue": "", |
| "pages": "32--36", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Linkai Luo, Haiqing Yang, and Francis Y. L. Chin. 2018. Emotionx-dlc: Self-attentive BiLSTM for detecting sequential emotions in dialogues. In So- cialNLP@ACL'18, pages 32-36.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Sentiment analysis algorithms and applications: A survey", |
| "authors": [ |
| { |
| "first": "Walaa", |
| "middle": [], |
| "last": "Medhat", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "Hassan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hoda", |
| "middle": [], |
| "last": "Korashy", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Ain Shams Engineering Journal", |
| "volume": "5", |
| "issue": "4", |
| "pages": "1093--1113", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Walaa Medhat, Ahmed Hassan, and Hoda Korashy. 2014. Sentiment analysis algorithms and applica- tions: A survey. Ain Shams Engineering Journal, 5(4):1093-1113.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Hierarchical RNN with static sentence-level attention for textbased speaker change detection", |
| "authors": [ |
| { |
| "first": "Zhao", |
| "middle": [], |
| "last": "Meng", |
| "suffix": "" |
| }, |
| { |
| "first": "Lili", |
| "middle": [], |
| "last": "Mou", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhi", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "CIKM", |
| "volume": "", |
| "issue": "", |
| "pages": "2203--2206", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhao Meng, Lili Mou, and Zhi Jin. 2017. Hierarchi- cal RNN with static sentence-level attention for text- based speaker change detection. In CIKM, pages 2203-2206.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "From utterance to text: The bias of language in speech and writing. Harvard educational review", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Olson", |
| "suffix": "" |
| } |
| ], |
| "year": 1977, |
| "venue": "", |
| "volume": "47", |
| "issue": "", |
| "pages": "257--281", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Olson. 1977. From utterance to text: The bias of language in speech and writing. Harvard educa- tional review, 47(3):257-281.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Deep convolutional neural network textual features and multiple kernel learning for utterance-level multimodal sentiment analysis", |
| "authors": [ |
| { |
| "first": "Soujanya", |
| "middle": [], |
| "last": "Poria", |
| "suffix": "" |
| }, |
| { |
| "first": "Erik", |
| "middle": [], |
| "last": "Cambria", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "F" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "2539--2544", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Soujanya Poria, Erik Cambria, and Alexander F. Gel- bukh. 2015. Deep convolutional neural network textual features and multiple kernel learning for utterance-level multimodal sentiment analysis. In EMNLP, pages 2539-2544.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Context-dependent sentiment analysis in user-generated videos", |
| "authors": [ |
| { |
| "first": "Soujanya", |
| "middle": [], |
| "last": "Poria", |
| "suffix": "" |
| }, |
| { |
| "first": "Erik", |
| "middle": [], |
| "last": "Cambria", |
| "suffix": "" |
| }, |
| { |
| "first": "Devamanyu", |
| "middle": [], |
| "last": "Hazarika", |
| "suffix": "" |
| }, |
| { |
| "first": "Navonil", |
| "middle": [], |
| "last": "Majumder", |
| "suffix": "" |
| }, |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Zadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Louis-Philippe", |
| "middle": [], |
| "last": "Morency", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "873--883", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Soujanya Poria, Erik Cambria, Devamanyu Hazarika, Navonil Majumder, Amir Zadeh, and Louis-Philippe Morency. 2017. Context-dependent sentiment anal- ysis in user-generated videos. In ACL, pages 873- 883.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Semi-supervised recursive autoencoders for predicting sentiment distributions", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "H" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "151--161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Jeffrey Pennington, Eric H. Huang, Andrew Y. Ng, and Christopher D. Manning. 2011. Semi-supervised recursive autoencoders for predict- ing sentiment distributions. In EMNLP, pages 151- 161.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Document modeling with gated recurrent neural network for sentiment classification", |
| "authors": [ |
| { |
| "first": "Duyu", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1422--1432", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Duyu Tang, Bing Qin, and Ting Liu. 2015. Document modeling with gated recurrent neural network for sentiment classification. In EMNLP, pages 1422- 1432.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "6000--6010", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In NIPS, pages 6000-6010.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Just how mad are you? finding strong and weak opinion clauses", |
| "authors": [ |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Hwa", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "761--769", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Theresa Wilson, Janyce Wiebe, and Rebecca Hwa. 2004. Just how mad are you? finding strong and weak opinion clauses. In AAAI, pages 761-769.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Emotion classification using web blog corpora", |
| "authors": [ |
| { |
| "first": "Changhua", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Hsin-Yih Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Hsin-Hsi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "WI", |
| "volume": "", |
| "issue": "", |
| "pages": "275--278", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Changhua Yang, Kevin Hsin-Yih Lin, and Hsin-Hsi Chen. 2007. Emotion classification using web blog corpora. In WI, pages 275-278.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Hierarchical attention networks for document classification", |
| "authors": [ |
| { |
| "first": "Zichao", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Diyi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "J" |
| ], |
| "last": "Smola", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [ |
| "H" |
| ], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "NAACL HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "1480--1489", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zichao Yang, Diyi Yang, Chris Dyer, Xiaodong He, Alexander J. Smola, and Eduard H. Hovy. 2016. Hi- erarchical attention networks for document classifi- cation. In NAACL HLT, pages 1480-1489.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Emotion detection on TV show transcripts with sequencebased convolutional neural networks", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Sayyed", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinho", |
| "middle": [ |
| "D" |
| ], |
| "last": "Zahiri", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "44--52", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sayyed M. Zahiri and Jinho D. Choi. 2018. Emotion detection on TV show transcripts with sequence- based convolutional neural networks. In AAAI, pages 44-52.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "The word \"okay\" exhibits different emotions in the American television sitcom, Friends." |
| }, |
| "FIGREF2": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Self-attention over the forward hidden states of GRU." |
| }, |
| "TABREF1": { |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Statistics of the textual dialogue datasets." |
| }, |
| "TABREF4": { |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Experimental results on Friends and EmotionPush. In the Train column, F(E) denotes the model is trained on only one training set, Friends or EmotionPush. F+E means the model is trained on the mixed training set while validated and tested individually. The results of SA-BiLSTM and CNN-DCNN are from" |
| }, |
| "TABREF5": { |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Experimental results of UWA on Friends by our proposed models with different scales of utterance encoder." |
| } |
| } |
| } |
| } |