| { |
| "paper_id": "S18-1043", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:44:08.907090Z" |
| }, |
| "title": "TCS Research at SemEval-2018 Task 1: Learning Robust Representations using Multi-Attention Architecture", |
| "authors": [ |
| { |
| "first": "Hardik", |
| "middle": [], |
| "last": "Meisheri", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "TCS Research New Delhi", |
| "location": { |
| "country": "India" |
| } |
| }, |
| "email": "hardik.meisheri@tcs.com" |
| }, |
| { |
| "first": "Lipika", |
| "middle": [], |
| "last": "Dey", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "TCS Research New Delhi", |
| "location": { |
| "country": "India" |
| } |
| }, |
| "email": "lipika.dey@tcs.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper presents system description of our submission to the SemEval-2018 task-1: Affect in tweets for the English language. We combine three different features generated using deep learning models and traditional methods in support vector machines to create a unified ensemble system. A robust representation of a tweet is learned using a multi-attention based architecture which uses a mixture of different pre-trained embeddings. In addition, analysis of different features is also presented. Our system ranked 2 nd , 5 th , and 7 th in different subtasks among 75 teams.", |
| "pdf_parse": { |
| "paper_id": "S18-1043", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper presents system description of our submission to the SemEval-2018 task-1: Affect in tweets for the English language. We combine three different features generated using deep learning models and traditional methods in support vector machines to create a unified ensemble system. A robust representation of a tweet is learned using a multi-attention based architecture which uses a mixture of different pre-trained embeddings. In addition, analysis of different features is also presented. Our system ranked 2 nd , 5 th , and 7 th in different subtasks among 75 teams.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In Natural Language processing, Sentiment analysis refers to the degree of positiveness or negativeness of the information presented in the text. Traditionally sentiment analysis is treated as either a binary classification task (positive, negative) or a multi-class classification task (very negative, negative, neutral, positive, very positive). Affect analysis on the other hand refers to detecting discrete sets of emotions present in the text such as anger, joy, sadness etc (Dalgleish and Power, 2000; Plutchik, 2001) . Predicting intensities of these emotions to fine granularity can help us better understand the sentiment and emotions of the writer.", |
| "cite_spans": [ |
| { |
| "start": 480, |
| "end": 507, |
| "text": "(Dalgleish and Power, 2000;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 508, |
| "end": 523, |
| "text": "Plutchik, 2001)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Detecting sentiments or affect from text have a number of useful applications. For example, the degree of disgust or anger expressed in customer complaints or reviews can help us decide the priorities of issues to look at, or the joy or optimism expressed in customer feedbacks can be a major factor in deciding the marketing strategy for a company.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Sentiment or affect analysis for social media text is a challenging task due to the extensive use of slang, frequent spelling mistakes, innovative and unpredictable use of hashtags and extensive use of emojis and smileys.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "SemEval-2018 Task 1: Affect in tweets, provides data for 3 languages: English, Arabic, and Spanish. For each language, there are 5 subtasks that are presented, 2 Regression tasks, 2 classification tasks and 1 Multi-Label task. Further details of tasks are presented in section 3. This task was similar to the WASSA shared task (Mohammad and Bravo-Marquez, 2017) and dataset presented here is the extension of the data presented for WASSA shared task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we present our approach to solving these tasks for English language tweets. We have proposed a system which uses various pretrained embeddings to handle out-of-vocabulary words and emoji present in the text along with cleaning of raw text. In addition, to create a better representation of the text, we use two sets of embeddings learnt over two different corpus which results in parallel attention mechanism -one set from the twitter space and another from a common crawl corpus. Finally, we combine features generated from the deep learning model with other features to generate an ensemble system.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Major contributions of this paper are: 1. Generating word vector representation of a tweet from three different set of pre-trained embeddings which can handle emoji/smileys and the out of vocabulary words in the dataset. 2. Deep neural network architecture which generates robust representation of the text with the help of parallel attention mechanism.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Rest of the paper is organized as follows, Section 2 presents the preprocessing step to generate mixed set of embeddings and the model architecture. It also presents the different sets of features that are used for final ensemble system. In section 3, data, training and experimentation setup is described for different subtasks. Section 4 states the results of the proposed system and detailed discussion of the feature over development and test data. Finally, section 5 concludes the paper with summary of the approach presented.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Figure 1 describes the overall system architecture used for regression and classification subtasks. We have extracted different types of features from the raw text, which fall under three different categories. Deep learning features are the ones which are generated from the model that is trained and proposed in this paper. Lexicon-based features are generated from training sets. In addition, features from pre-trained models are used. These models were trained over large corpus. Attention mechanism has been successful in sequence to sequence learning problems specifically for neural machine translation (NMT) (Bahdanau et al., 2014) . These mechanism helps model to focus more on the task in hand. The proposed architecture uses two parallel processing towers which use attention as a means of focusing on sentiment specific words. Figure 2 presents a snapshot of the random sample for sadness emotion, first row denotes the output of the attention mechanism and row 2 denotes the output of the model. We observe that the attention mechanism helps in focusing on words which are relevant to the sentiment task in hand, such as crying, dying etc. which further helps in improving the performance of the model. The multi-attention mechanism is inspired from (Lin et al., 2017) where, they have used more multi-attention over the same embedding space to focus on more than one word. In contrast, we use limit our attention to at max 2 words as the tweet is much more compact in nature as well as we do it over different embedding space to encapsulate much more information.", |
| "cite_spans": [ |
| { |
| "start": 615, |
| "end": 638, |
| "text": "(Bahdanau et al., 2014)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1262, |
| "end": 1280, |
| "text": "(Lin et al., 2017)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 838, |
| "end": 846, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Proposed Approach", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For generating features as mentioned above, we employ pre-processing steps to normalize the text with respect to sentiment specific words and its usage.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Approach", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As mentioned earlier, tweets in the raw form are noisy and prone to many distortions in terms of syntactic and semantic structure. These preprocessing steps are common to all the features generated. Deep learning features require additional steps which are explained in respective section. 1. All the characters in the text are converted to lower case. 2. Twitter contains lot of words with more than 2 repeating characters such as happpyyyyyyy, we limited occurrence of each character to maximum of 2 successive times. 3. To handle hashtags, # symbol is removed from all the words. 4. Extra spaces and new line character is deleted from the tweet to ensure the compactness of the tweets. Figure 3 shows the model which was used to generate deep learning features. In this model, we have used different embeddings to enhance the representations of raw text. There are two parallel architectures which take the same raw input but generate the representation from a different embedding space. This helps in encapsulating the word and its usage in twitter space as well as keeping a general semantic and syntactic structure of word intact.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 689, |
| "end": 697, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "For tower one in the figure 3, text is preprocessed using steps mentioned earlier. In addition, following pre-processing steps are performed: 1. Usernames in twitter which starts with @ is replaced by mention token. 2. Punctuations are removed except [,] , [?] , [!] , [.] 3. Words that are most probably used as slangs in twitter are replaced with its corresponding expanded versions such as \"y'all\" is replace by \"you all\". Embedding matrix is generated from the preprocessed text using combination of three pretrained embeddings: Glove (Pennington et al., 2014) trained over common crawl corpus with 300 dimension vector, Character 1 level embeddings trained over common crawl glove corpus providing 300 dimensional vectors for each character and emoji2vec (Eisner et al., 2016) which provides 300 dimension vectors for most commonly used emojis in twitter platform. Procedure to generate a representation of a text using all these embeddings is presented in Algorithm 1, where get vector is a function of token and embedding type and returns the corresponding vector for the token from the pre-trained embedding specified.", |
| "cite_spans": [ |
| { |
| "start": 251, |
| "end": 254, |
| "text": "[,]", |
| "ref_id": null |
| }, |
| { |
| "start": 257, |
| "end": 260, |
| "text": "[?]", |
| "ref_id": null |
| }, |
| { |
| "start": 263, |
| "end": 266, |
| "text": "[!]", |
| "ref_id": null |
| }, |
| { |
| "start": 269, |
| "end": 272, |
| "text": "[.]", |
| "ref_id": null |
| }, |
| { |
| "start": 760, |
| "end": 781, |
| "text": "(Eisner et al., 2016)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Learning Features", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Embedding vectors that are generated for each tweet are then converted to into matrix, with a number of rows being the size of maximum sequence, rest is zero padded. This matrix then forms the input to the Bidirectional LSTM(BiLSTM) layer (Graves and Schmidhuber, 2005) , which helps in generating representations by taking all the words in sequence into account.", |
| "cite_spans": [ |
| { |
| "start": 239, |
| "end": 269, |
| "text": "(Graves and Schmidhuber, 2005)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Learning Features", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "1 https://github.com/minimaxir/char-embeddings word token = Tokenize tweet for each word in word token do if word is in EmojiEmbb then word vector = get vector(EmojiEmbb, word vector) else if word is in Glove then word vector = get vector(Glove, word vector) else if word is in CharEmbb then word vector = get vector(charEmbb, word vector) else chars = tokeinze word token into character n = length(chars) word vector = n 1 get vector(charEmbb, chars) end end Algorithm 1: Embedding Matrix generation", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Learning Features", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The output of each time-step is then fed to Attention Mechanism (Bahdanau et al., 2014) . The core concept behind the attention mechanism forces the model to focus on important words that are related to the task.", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 87, |
| "text": "(Bahdanau et al., 2014)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Learning Features", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Tower 2 in the Figure 3 uses pre-processed mechanism where, all the punctuations are removed, usernames are removed without any replacement with tokens and, special characters including smiley and emojis are removed. Embedding matrix is generated using pre-trained glove embeddings trained over twitter corpus and provides 200-dimensional vectors for each word. These are zero padded as mentioned earlier and is fed into another BiLSTM layer. Maxpooling is applied over the output of BiLSTM to extract the most prominent vector from the rest over the temporal dimension which act as a attention over word sequences. Maximum sequence length for the embedding space is kept at 50, as twitter has a character limit of 140 characters.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 15, |
| "end": 23, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Deep Learning Features", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The output of tower 1 and 2 are then concatenated and then fed into the fully connected network with 2 layers. Final layer contains a different number of neurons and activation functions de-pending on the subtasks which are stated in the experiments section 4. To handle overfitting we have used L2 regularization dropout in layers and batch size is kept at 512.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Learning Features", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We defined features that are used in most of the traditional sentiment analysis techniques are termed as traditional features. As per the baseline system provided in the WASSA Emotion Intensity Task we define baseline features. The knowledge sources that have been used to generate baseline feature are: MPQA subjective lexicon (Wilson et al., 2005) , Bing Liu lexicon (Ding et al., 2008) , AFINN (Nielsen, 2011) , Sentiment140 (Kiritchenko et al., 2014) , NRC Hashtag Sentiment Lexicon (Mohammad and Kiritchenko, 2015), NRC Hashtag Emotion Association Lexicon (Mohammad et al., 2013), NRC Word-Emotion Association Lexicon (Mohammad and Turney, 2013), NRC-10 Expanded Lexicon (Bravo-Marquez et al., 2016) and the SentiWord-Net (Esuli and Sebastiani, 2007) . Two more features are calculated on the basis of emoticons (obtained from AFINN (Nielsen, 2011) ) and negations present in the text. This amounts to 45 features for each tweet.", |
| "cite_spans": [ |
| { |
| "start": 328, |
| "end": 349, |
| "text": "(Wilson et al., 2005)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 369, |
| "end": 388, |
| "text": "(Ding et al., 2008)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 397, |
| "end": 412, |
| "text": "(Nielsen, 2011)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 428, |
| "end": 454, |
| "text": "(Kiritchenko et al., 2014)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 727, |
| "end": 755, |
| "text": "(Esuli and Sebastiani, 2007)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 838, |
| "end": 853, |
| "text": "(Nielsen, 2011)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Traditional Features", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "In addition to this, we have used Vader Sentiment Lexicons (Gilbert, 2014), which provides the positive, negative, neutral and compound score for the text. These lexicons are specifically designed for social media texts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Traditional Features", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We use SentiNeuron feature (Radford et al., 2017) from a model which is trained over 82 million Amazon review dataset. The aim of the model was to predict the next word in the review. They have used LSTM with 4096 units. The 2389 th neuron was found to be specifically focusing on the sentiment for a given sentence. We use output of this 2389 th as a feature. Further more, we have normalized it between 0-1 which helps in performance improvement.", |
| "cite_spans": [ |
| { |
| "start": 27, |
| "end": 49, |
| "text": "(Radford et al., 2017)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features from pre-trained models", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "We participated in all the subtasks of English language, namely: EI-reg (intensity score prediction of 4 emotions), EI-oc (intensity ordinal classification task of 4 emotions), V-reg (intensity score prediction of valence), V-oc (intensity ordinal classifi- cation task for valence) and E-c (Multi-label classification task over 11 emotions). Detailed analysis and distribution of the dataset are presented in the task paper .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data and Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "For each subtask, deep learning model is same as mentioned earlier, although there is a variation in the feature being used for ensemble approach. Data distribution across train, dev and test dataset is given table 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data and Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this task, given a tweet and its corresponding emotion, we need to predict the intensity of the given emotion in 0-1 range. For this task, Deep learning models with sigmoid as activation function and number of hidden unit in last layer as one is used. Official evaluation metric for this is a pearson correlation, so we define a new loss function to train deep learning models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "EI-reg and V-reg: Regression", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Loss = 0.7 \u00d7 (1 \u2212 pearson) + 0.3 \u00d7 M SE (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "EI-reg and V-reg: Regression", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "This is a slightly modified version than used by (Meisheri et al., 2017; Goel et al., 2017) for WASSA dataset, where they use the negative of pearson correlation as the loss function. We observe that using weighted sum of negation of pearson correlation and mean square error improved the performance.", |
| "cite_spans": [ |
| { |
| "start": 49, |
| "end": 72, |
| "text": "(Meisheri et al., 2017;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 73, |
| "end": 91, |
| "text": "Goel et al., 2017)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "EI-reg and V-reg: Regression", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Training data was split into 10 different folds, by using stratified splits which were achieved by generating 10 bins over the continuous bins. Ten different models were generated with permutations of 9 out of 10 folds as training and remaining 1 as validation dataset. Finally, dev dataset is passed over all the models and mean of all the models were considered as output. This can be seen as a variation of weak learners concept in decision trees.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "EI-reg and V-reg: Regression", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For the testing dataset as mentioned earlier, we combine training and development set and then generate 10 folds to create 10 models with 80-20 split for validation. Parameters used for models are stated in table 2. In addition, we have used Adam optimizer with 0.0001 as learning rate. The output of deep learning models is considered as a feature for our ensemble method, where we combine other features as mentioned in section 2.3 and section 2.4. In addition to this, the output of other emotion is also used as a feature for the ensemble model which provides an additional context for the prediction task. So, for each emotion in a task, we get additional features from deep learning model which we define as a cross emotion features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "EI-reg and V-reg: Regression", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "All this features are then passed on to the support vector regression, whose parameters C and Kernel are tuned using 10 fold cross validation over training set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "EI-reg and V-reg: Regression", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Objective of this task was to classify tweet into one of the ordinal classes, given a tweet and its corresponding emotion. Number of classes for EI-oc were four and for V-oc it was seven. Official evaluation metric for this task was provided as pearson correlation. Output layer in the deep learning model contained four and seven neurons for EI-oc and V-oc respectively with softmax as the activation function. Loss function used for classification task was categorical crossentropy. Similar settings of 10-fold as mentioned in regression task earlier was carried out resulting in 10 different models for each emotion and valence. Layer parameters for this task are summarized in table 3. Stochastic gradient descent with nesterov momentum and learning rate 0.01 was used as optimizer for this task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "EI-oc and V-oc: Classification", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Similar to regression task, for classification we create ensemble model by combining output of deep learning models with other features. In addition, we also consider output of regression models as additional features for classification. Support vector classifier is used as a final classifier, with C and Kernel being tuned using 10-fold cross validation. Final submission is done with model being trained by combining training and development dataset and then taking 80-20 split for training and validation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "EI-oc and V-oc: Classification", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In this task, we were provided with tweet and its corresponding labels among 11 emotion: anger, anticipation, disgust, fear, joy, love, optimism, pessimism, sadness, surprise and trust. For this task, output layer of our deep learning model contains 11 neurons and sigmoid as a activation function. Binary cross entropy is used as a loss function with Stochastic gradient descent with Nesterov momentum, 0.01 learning rate and 10 \u22126 learning rate decay as optimizer. Parameters for other layers are presented in table 4. Official evaluation metric for this task was Jaccard similarity score. The output of the deep learning model gives values between 0-1 for each emotion. Since the task was to predict the presence or absence of any emotion continuous value must be converted to binary number. Threshold was applied which was learned from training and development set. For training set we found the threshold to be 0.35, whereas for development set it was found to be 0.30. For testing set, we take the mean of both the values as threshold.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "E-c: Multilabel Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In total 75 teams participated in the task, our system was ranked 2 nd for V-reg task, 5 th for EI-reg task and 7 th for both V-oc and EI-oc task. For Multi-Label classification, our system achieved 2 nd rank among the teams, with Jaccard similarity score of 0.582. Table 6 shows the result for EI-reg, V-reg, EIoc and V-oc task on official evaluation metric i.e. pearson correlation. We also compare the results over the original split and 80-20 split generated after combining training and development dataset. It can be seen that both of these gives similar results while, for classification original split is better, for regression it is other way. Table 5 shows comparison of Ensemble model and Deep learning model for EI-reg and V-reg. We observe improvement in ensemble model over development dataset in both sets of splits. On the contrary, there is relatively less difference in the test set. Table 7 contains results of different deep learning architecture for EI-reg and V-reg task. For both of these task, we can observe what is impact of attention over both the towers. We also present the results for each single tower which helps in understanding the need for two towers. Although adding attention over Tower-1 gives little improvement for EL-reg task it provides significant improvement for V-reg task. It is worthwhile to note that sadness emotion shows no improvement by adding attention over tower-2. In table 8 and table 9 , results on regression task for 80-20 split for each feature when combined with deep learning feature over development and test set respectively. We observe that adding lexicon features marginally increases the performance of the system. We can conclude from this that deep learning model that we presented can encapsulate most of the information regarding the sentiment which was present in traditional features. Including cross-emotion feature shows considerable increase in the performance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 266, |
| "end": 273, |
| "text": "Table 6", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 653, |
| "end": 660, |
| "text": "Table 5", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 902, |
| "end": 909, |
| "text": "Table 7", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 1420, |
| "end": 1442, |
| "text": "In table 8 and table 9", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Inter-feature correlation is presented in figure 4, where we can observe that apart from anger and valence baseline features are weak negatively correlated with other features. Furthermore, vader and sentineuron are less correlated except for valence and yet they provide similar improvement when combined individually with DL features. Although, when both these features are combined together they provide a significant boost. For classification task across four emotion and valence we observed that, using threshold values obtained by comparing continuous values from regression task provides a better result in pearson correlation. Possible reason for this might be the loss function that we trained for classification model was categorical cross entropy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We observe that high difference between the predicted value/class and truth value/class are present at the extreme end of the spectrum. One of the possible reason might be that the distribution of the data shows a Gaussian distribution and there are few samples at the extreme end as described in . In addition, we manually inspect some cases where our model failed, for example for sadness You are MINE, my baby, my headache, my love, my smile, my frown, my wrong, my right, my pain, my happiness, my everything. has truth value of 0.140 and our system predicted 0.568 which is way higher than what the writer is trying to convey. The model is predicting slightly above neutral sentiment. Possible reasons include the presence of both positive and negative words present in the alternate sequence. This kind of discourse and irony detection can help in better prediction if incorporated into the models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error analysis", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In joy emotion, when will i ever be happy with myself? has a truth value of 0.109 and predicted value is 0.491. These kind of rhetorical questions is hard to understand even for humans, for model to understand we need to put in some explicit context. By observing more such samples, we find that adding more context about the different physiological and linguistic phenomenon into the model with appropriate bias can greatly increase the accuracies of the models present. Table 10 shows the error across different emotions in multi-label task. We observe that there is a high error rate in anticipation, pessimism, sur- prise and trust, possible reasons might be that there are already fewer samples available and the ratio of percentage votes received to the percentage of tweets labeled is also high for this emotion as compared to other emotions (Mohammad and . In addition, we observe that there are around 2% of the tweets contained no emotion in test set, where our model predicted atleast one emotion.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 472, |
| "end": 480, |
| "text": "Table 10", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Error analysis", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In this paper, we describe our approach to SemEval-2018 Task-1 for English tweet. We present ensemble system which is capable of handling noisy sentiment dataset over regression, classification as well as multi-label dataset. Use of the mixture of embedding in parallel makes this system unique in terms of generating better representations with respect to sentiment. Our system achieved 2 nd , 5 th and 7 th in different subtasks. Analyzing different feature combinations from individual results and inter-feature correlation over test data reveals that our deep learning model is able to capture most of the information that is provided by lexicon feature. Multi-label classification has proved to be a challenging task among all the subtask that has been provided as the evaluation score of all the team participating has been low. We have also presented some examples where our model has performed poorly and conclude that including context feature for sarcasm, irony and rhetoric question can improve the performance further over all the subtasks presented in SemEval for English language. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1409.0473" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2014. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Determining word-emotion associations from tweets by multilabel classification", |
| "authors": [ |
| { |
| "first": "Felipe", |
| "middle": [], |
| "last": "Bravo-Marquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Eibe", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernhard", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pfahringer", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "WI'16", |
| "volume": "", |
| "issue": "", |
| "pages": "536--539", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felipe Bravo-Marquez, Eibe Frank, Saif M Moham- mad, and Bernhard Pfahringer. 2016. Determining word-emotion associations from tweets by multi- label classification. In WI'16, pages 536-539. IEEE Computer Society.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Handbook of cognition and emotion", |
| "authors": [ |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Dalgleish", |
| "suffix": "" |
| }, |
| { |
| "first": "Mick", |
| "middle": [], |
| "last": "Power", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tim Dalgleish and Mick Power. 2000. Handbook of cognition and emotion. John Wiley & Sons.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A holistic lexicon-based approach to opinion mining", |
| "authors": [ |
| { |
| "first": "Xiaowen", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip S", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 2008 international conference on web search and data mining", |
| "volume": "", |
| "issue": "", |
| "pages": "231--240", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaowen Ding, Bing Liu, and Philip S Yu. 2008. A holistic lexicon-based approach to opinion mining. In Proceedings of the 2008 international conference on web search and data mining, pages 231-240. ACM.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "emoji2vec: Learning emoji representations from their description", |
| "authors": [ |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rockt\u00e4schel", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabelle", |
| "middle": [], |
| "last": "Augenstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Matko", |
| "middle": [], |
| "last": "Bosnjak", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ben Eisner, Tim Rockt\u00e4schel, Isabelle Augenstein, Matko Bosnjak, and Sebastian Riedel. 2016. emoji2vec: Learning emoji representations from their description.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Sentiwordnet: A high-coverage lexical resource for opinion mining. Evaluation", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Esuli", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabrizio", |
| "middle": [], |
| "last": "Sebastiani", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1--26", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrea Esuli and Fabrizio Sebastiani. 2007. Senti- wordnet: A high-coverage lexical resource for opin- ion mining. Evaluation, pages 1-26.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Vader: A parsimonious rule-based model for sentiment analysis of social media text", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Cj Hutto", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gilbert", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Eighth International Conference on Weblogs and Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "CJ Hutto Eric Gilbert. 2014. Vader: A parsimo- nious rule-based model for sentiment analysis of so- cial media text. In Eighth International Confer- ence on Weblogs and Social Media (ICWSM-14).", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Prayas at emoint 2017: An ensemble of deep neural architectures for emotion intensity prediction in tweets", |
| "authors": [ |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Goel", |
| "suffix": "" |
| }, |
| { |
| "first": "Devang", |
| "middle": [], |
| "last": "Kulshreshtha", |
| "suffix": "" |
| }, |
| { |
| "first": "Prayas", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaushal Kumar", |
| "middle": [], |
| "last": "Shukla", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 8th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis", |
| "volume": "", |
| "issue": "", |
| "pages": "58--65", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pranav Goel, Devang Kulshreshtha, Prayas Jain, and Kaushal Kumar Shukla. 2017. Prayas at emoint 2017: An ensemble of deep neural architectures for emotion intensity prediction in tweets. In Pro- ceedings of the 8th Workshop on Computational Ap- proaches to Subjectivity, Sentiment and Social Me- dia Analysis, pages 58-65.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Framewise phoneme classification with bidirectional lstm and other neural network architectures", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Graves", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Neural Networks", |
| "volume": "18", |
| "issue": "5", |
| "pages": "602--610", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Graves and J\u00fcrgen Schmidhuber. 2005. Frame- wise phoneme classification with bidirectional lstm and other neural network architectures. Neural Net- works, 18(5):602-610.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Sentiment analysis of short informal texts", |
| "authors": [ |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Saif M", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Journal of Artificial Intelligence Research", |
| "volume": "50", |
| "issue": "", |
| "pages": "723--762", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Svetlana Kiritchenko, Xiaodan Zhu, and Saif M Mo- hammad. 2014. Sentiment analysis of short in- formal texts. Journal of Artificial Intelligence Re- search, 50:723-762.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A structured self-attentive sentence embedding", |
| "authors": [ |
| { |
| "first": "Zhouhan", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Minwei", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Cicero", |
| "middle": [], |
| "last": "Nogueira", |
| "suffix": "" |
| }, |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Santos", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bowen", |
| "middle": [], |
| "last": "Xiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1703.03130" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhouhan Lin, Minwei Feng, Cicero Nogueira dos San- tos, Mo Yu, Bing Xiang, Bowen Zhou, and Yoshua Bengio. 2017. A structured self-attentive sentence embedding. arXiv preprint arXiv:1703.03130.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Sentiment extraction from consumergenerated noisy short texts", |
| "authors": [ |
| { |
| "first": "Hardik", |
| "middle": [], |
| "last": "Meisheri", |
| "suffix": "" |
| }, |
| { |
| "first": "Kunal", |
| "middle": [], |
| "last": "Ranjan", |
| "suffix": "" |
| }, |
| { |
| "first": "Lipika", |
| "middle": [], |
| "last": "Dey", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of IEEE International Conference on Data Mining Workshops (ICDMW)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hardik Meisheri, Kunal Ranjan, and Lipika Dey. 2017. Sentiment extraction from consumer- generated noisy short texts. In Proceedings of IEEE International Conference on Data Mining Work- shops (ICDMW), New Orleans, USA.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Wassa-2017 shared task on emotion intensity", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Felipe", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bravo-Marquez", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the EMNLP 2017 Workshop on Computational Approaches to Subjectivity, Sentiment, and Social Media (WASSA)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad and Felipe Bravo-Marquez. 2017. Wassa-2017 shared task on emotion intensity. In Proceedings of the EMNLP 2017 Workshop on Computational Approaches to Subjectivity, Senti- ment, and Social Media (WASSA), September 2017, Copenhagen, Denmark.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Semeval-2018 Task 1: Affect in tweets", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Felipe", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Bravo-Marquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Salameh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of International Workshop on Semantic Evaluation (SemEval-2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad, Felipe Bravo-Marquez, Mo- hammad Salameh, and Svetlana Kiritchenko. 2018. Semeval-2018 Task 1: Affect in tweets. In Proceed- ings of International Workshop on Semantic Evalu- ation (SemEval-2018), New Orleans, LA, USA.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Using hashtags to capture fine emotion categories from tweets", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Computational Intelligence", |
| "volume": "31", |
| "issue": "2", |
| "pages": "301--326", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M Mohammad and Svetlana Kiritchenko. 2015. Using hashtags to capture fine emotion cate- gories from tweets. Computational Intelligence, 31(2):301-326.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Understanding emotions: A dataset of tweets to study interactions between affect categories", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 11th Edition of the Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad and Svetlana Kiritchenko. 2018. Understanding emotions: A dataset of tweets to study interactions between affect categories. In Proceedings of the 11th Edition of the Language Resources and Evaluation Conference, Miyazaki, Japan.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Nrc-canada: Building the stateof-the-art in sentiment analysis of tweets", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1308.6242" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M Mohammad, Svetlana Kiritchenko, and Xiao- dan Zhu. 2013. Nrc-canada: Building the state- of-the-art in sentiment analysis of tweets. arXiv preprint arXiv:1308.6242.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Crowdsourcing a word-emotion association lexicon", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Turney", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Computational Intelligence", |
| "volume": "29", |
| "issue": "3", |
| "pages": "436--465", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M Mohammad and Peter D Turney. 2013. Crowd- sourcing a word-emotion association lexicon. Com- putational Intelligence, 29(3):436-465.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A new anew: Evaluation of a word list for sentiment analysis in microblogs", |
| "authors": [ |
| { |
| "first": "Finn\u00e5rup", |
| "middle": [], |
| "last": "Nielsen", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1103.2903" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Finn\u00c5rup Nielsen. 2011. A new anew: Evaluation of a word list for sentiment analysis in microblogs. arXiv preprint arXiv:1103.2903.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christo- pher D. Manning. 2014. Glove: Global vectors for word representation. In Empirical Methods in Nat- ural Language Processing (EMNLP), pages 1532- 1543.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "The nature of emotions: Human emotions have deep evolutionary roots, a fact that may explain their complexity and provide tools for clinical practice", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Plutchik", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "American scientist", |
| "volume": "89", |
| "issue": "4", |
| "pages": "344--350", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert Plutchik. 2001. The nature of emotions: Hu- man emotions have deep evolutionary roots, a fact that may explain their complexity and provide tools for clinical practice. American scientist, 89(4):344- 350.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Learning to Generate Reviews and Discovering Sentiment", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Jozefowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Radford, R. Jozefowicz, and I. Sutskever. 2017. Learning to Generate Reviews and Discovering Sen- timent. ArXiv e-prints.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Recognizing contextual polarity in phraselevel sentiment analysis", |
| "authors": [ |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Hoffmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the conference on human language technology and empirical methods in natural language processing", |
| "volume": "", |
| "issue": "", |
| "pages": "347--354", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Theresa Wilson, Janyce Wiebe, and Paul Hoffmann. 2005. Recognizing contextual polarity in phrase- level sentiment analysis. In Proceedings of the con- ference on human language technology and empiri- cal methods in natural language processing, pages 347-354. Association for Computational Linguis- tics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "System Diagram.", |
| "num": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Attention Example.", |
| "num": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Model Diagram.", |
| "num": null |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Correlation among various features for test set.", |
| "num": null |
| }, |
| "TABREF0": { |
| "content": "<table><tr><td/><td>Train Dev Test</td></tr><tr><td>Anger</td><td>1701 388 1002</td></tr><tr><td>Fear</td><td>2252 389 986</td></tr><tr><td>Joy</td><td>1616 290 1105</td></tr><tr><td>Sadness</td><td>1533 397 975</td></tr><tr><td>Valence</td><td>1181 449 937</td></tr><tr><td colspan=\"2\">Multi-Label 6838 886 3259</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "Data Distribution.", |
| "num": null |
| }, |
| "TABREF1": { |
| "content": "<table><tr><td>Layers</td><td colspan=\"4\">Units Activation Regularization Dropout</td></tr><tr><td>BiLSTM -Tower 1</td><td>70</td><td>Tanh</td><td>L2 -0.05</td><td>0.35</td></tr><tr><td>BiLSTM -Tower 2</td><td>70</td><td>Tanh</td><td>L2 -0.05</td><td>0.35</td></tr><tr><td>Attention</td><td>-</td><td>-</td><td>L2 -0.01</td><td>-</td></tr><tr><td>Max Pooling</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td colspan=\"2\">Fully Connected Layer 1 100</td><td>Selu</td><td>L2 -0.001</td><td>0.5</td></tr><tr><td>Fully Connected Layer 2</td><td>50</td><td>Selu</td><td>L2 -0.001</td><td>0.3</td></tr><tr><td>Output Layer</td><td>1</td><td>Sigmoid</td><td>-</td><td>-</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "Parameters for Regression Task.", |
| "num": null |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td>Layers</td><td colspan=\"4\">Units Activation Regularization Dropout</td></tr><tr><td>BiLSTM -Tower 1</td><td>50</td><td>Tanh</td><td>L2 -0.05</td><td>0.4</td></tr><tr><td>BiLSTM -Tower 2</td><td>50</td><td>Tanh</td><td>L2 -0.05</td><td>0.4</td></tr><tr><td>Attention</td><td>-</td><td>-</td><td>L2 -0.001</td><td>-</td></tr><tr><td>Max Pooling</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Fully Connected Layer 1</td><td>50</td><td>Selu</td><td>L2 -0.01</td><td>0.4</td></tr><tr><td>Fully Connected Layer 2</td><td>20</td><td>Selu</td><td>L2 -0.01</td><td>0.4</td></tr><tr><td>Output Layer</td><td>5/7</td><td>Softmax</td><td>-</td><td>-</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "Parameters for Classification Task.", |
| "num": null |
| }, |
| "TABREF3": { |
| "content": "<table><tr><td>Layers</td><td colspan=\"4\">Units Activation Regularization Dropout</td></tr><tr><td>BiLSTM -Tower 1</td><td>120</td><td>Tanh</td><td>0</td><td>0.3</td></tr><tr><td>BiLSTM -Tower 2</td><td>120</td><td>Tanh</td><td>0</td><td>0.3</td></tr><tr><td>Attention</td><td>-</td><td>-</td><td>0</td><td>-</td></tr><tr><td>Max Pooling</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td colspan=\"2\">Fully Connected Layer 1 100</td><td>relu</td><td>L2 -0.01</td><td>0.3</td></tr><tr><td>Fully Connected Layer 2</td><td>50</td><td>relu</td><td>L2 -0.01</td><td>0.2</td></tr><tr><td>Output Layer</td><td>11</td><td>Softmax</td><td>-</td><td>-</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "Parameters for Multi-Label Classification Task.", |
| "num": null |
| }, |
| "TABREF4": { |
| "content": "<table><tr><td/><td/><td colspan=\"2\">Development Set</td><td/><td/><td colspan=\"2\">Test Set</td><td/></tr><tr><td/><td colspan=\"2\">Original Split</td><td colspan=\"2\">80-20 Split</td><td colspan=\"2\">Original Split</td><td colspan=\"2\">80-20 Split</td></tr><tr><td/><td>Ensemble System</td><td>Deep Learning Model</td><td>Ensemble System</td><td>Deep Learning Model</td><td>Ensemble System</td><td>Deep Learning Model</td><td>Ensemble System</td><td>Deep Learning Model</td></tr><tr><td>Fear</td><td>0.751</td><td>0.707</td><td>0.791</td><td>0.791</td><td>0.745</td><td>0.725</td><td>0.736</td><td>0.74</td></tr><tr><td>Anger</td><td>0.79</td><td>0.718</td><td>0.747</td><td>0.744</td><td>0.775</td><td>0.721</td><td>0.776</td><td>0.749</td></tr><tr><td>Sadness</td><td>0.735</td><td>0.689</td><td>0.77</td><td>0.767</td><td>0.764</td><td>0.723</td><td>0.776</td><td>0.741</td></tr><tr><td>Joy</td><td>0.723</td><td>0.675</td><td>0.812</td><td>0.775</td><td>0.767</td><td>0.724</td><td>0.77</td><td>0.731</td></tr><tr><td>Average</td><td>0.75</td><td>0.697</td><td>0.78</td><td>0.769</td><td>0.763</td><td>0.723</td><td>0.764</td><td>0.74</td></tr><tr><td>Valence</td><td>0.857</td><td>0.804</td><td>0.85</td><td>0.788</td><td>0.858</td><td>0.832</td><td>0.861</td><td>0.84</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "Comparison of proposed deep Learning model and ensemble model over train and development set for regression task.", |
| "num": null |
| }, |
| "TABREF5": { |
| "content": "<table><tr><td/><td>Reg</td><td/><td>OC</td><td/></tr><tr><td/><td colspan=\"4\">Orginal Split 80-20 Split Orginal Split 80-20 Split</td></tr><tr><td>Fear</td><td>0.745</td><td>0.735</td><td>0.595</td><td>0.561</td></tr><tr><td>Anger</td><td>0.775</td><td>0.775</td><td>0.626</td><td>0.641</td></tr><tr><td>Sadness</td><td>0.764</td><td>0.776</td><td>0.618</td><td>0.621</td></tr><tr><td>joy</td><td>0.767</td><td>0.77</td><td>0.65</td><td>0.655</td></tr><tr><td>Average</td><td>0.76275</td><td>0.764</td><td>0.62225</td><td>0.6195</td></tr><tr><td>Valence</td><td>0.858</td><td>0.861</td><td>0.727</td><td>0.777</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "Results of Regression and Classification task over test set.", |
| "num": null |
| }, |
| "TABREF6": { |
| "content": "<table><tr><td/><td colspan=\"3\">Anger Fear Sadness</td><td>Joy</td><td colspan=\"2\">Average Valence</td></tr><tr><td>Proposed Model</td><td>0.749</td><td>0.74</td><td>0.741</td><td>0.731</td><td>0.74</td><td>0.84</td></tr><tr><td>Tower-1</td><td colspan=\"2\">0.727 0.727</td><td>0.704</td><td>0.709</td><td>0.717</td><td>0.825</td></tr><tr><td>Tower-2</td><td colspan=\"2\">0.714 0.719</td><td>0.673</td><td>0.70</td><td>0.705</td><td>0.792</td></tr><tr><td colspan=\"3\">Tower-1 without Attention 0.721 0.709</td><td>0.69</td><td>0.704</td><td>0.711</td><td>0.783</td></tr><tr><td colspan=\"3\">Tower-2 without Attention 0.693 0.692</td><td>0.673</td><td>0.682</td><td>0.685</td><td>0.766</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "Regression Task Results over model different architectures over test set:80-20 Split.", |
| "num": null |
| }, |
| "TABREF7": { |
| "content": "<table><tr><td>Features</td><td colspan=\"3\">Anger Fear Sadness</td><td>Joy</td><td>Valence</td></tr><tr><td>dl</td><td colspan=\"2\">0.744 0.791</td><td>0.767</td><td>0.775</td><td>0.788</td></tr><tr><td>dl+baselines</td><td colspan=\"2\">0.747 0.792</td><td>0.773</td><td>0.778</td><td>0.789</td></tr><tr><td>dl+vader</td><td colspan=\"2\">0.747 0.792</td><td>0.772</td><td>0.775</td><td>0.785</td></tr><tr><td>dl+sentineuron</td><td>0.748</td><td>0.79</td><td>0.773</td><td>0.778</td><td>0.79</td></tr><tr><td>dl+valence</td><td colspan=\"2\">0.751 0.792</td><td>0.77</td><td>0.785</td><td>-</td></tr><tr><td>dl+cross emotion</td><td>0.75</td><td>0.794</td><td>0.773</td><td>0.778</td><td>0.809</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "Results of Individual Features in combination with Deep learning features over development set.", |
| "num": null |
| }, |
| "TABREF8": { |
| "content": "<table><tr><td>Features</td><td colspan=\"3\">Anger Fear Sadness</td><td>Joy</td><td>Valence</td></tr><tr><td>dl</td><td>0.749</td><td>0.74</td><td>0.741</td><td>0.731</td><td>0.84</td></tr><tr><td>dl+baselines</td><td colspan=\"2\">0.755 0.739</td><td>0.742</td><td>0.731</td><td>0.84</td></tr><tr><td>dl+vader</td><td colspan=\"2\">0.753 0.744</td><td>0.746</td><td>0.731</td><td>0.841</td></tr><tr><td>dl+sentineuron</td><td colspan=\"2\">0.753 0.739</td><td>0.752</td><td>0.734</td><td>0.842</td></tr><tr><td>dl+valence</td><td colspan=\"2\">0.756 0.743</td><td>0.745</td><td>0.74</td><td>-</td></tr><tr><td colspan=\"3\">dl+cross emotion 0.759 0.744</td><td>0.753</td><td>0.738</td><td>0.849</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "Results of Individual Features in combination with Deep learning features over test set.", |
| "num": null |
| }, |
| "TABREF9": { |
| "content": "<table><tr><td>Emotion</td><td>Error</td><td>Presence total</td><td>Ratio</td></tr><tr><td>Anger</td><td>521</td><td>1101</td><td>0.473</td></tr><tr><td colspan=\"2\">Anticipation 469</td><td>425</td><td>1.103</td></tr><tr><td>Disgust</td><td>646</td><td>1099</td><td>0.588</td></tr><tr><td>Fear</td><td>251</td><td>485</td><td>0.518</td></tr><tr><td>Joy</td><td>477</td><td>1442</td><td>0.331</td></tr><tr><td>Love</td><td>405</td><td>516</td><td>0.785</td></tr><tr><td>Optimism</td><td>704</td><td>1143</td><td>0.616</td></tr><tr><td>Pessimism</td><td>398</td><td>375</td><td>1.061</td></tr><tr><td>Sadness</td><td>539</td><td>960</td><td>0.561</td></tr><tr><td>Surprise</td><td>167</td><td>170</td><td>0.982</td></tr><tr><td>Trust</td><td>161</td><td>153</td><td>1.052</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "Multi-label Error across Emotions.", |
| "num": null |
| } |
| } |
| } |
| } |