| { |
| "paper_id": "S18-1045", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:45:26.054285Z" |
| }, |
| "title": "DeepMiner at SemEval-2018 Task 1: Emotion Intensity Recognition Using Deep Representation Learning", |
| "authors": [ |
| { |
| "first": "Habibeh", |
| "middle": [], |
| "last": "Naderi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Dalhousie University", |
| "location": { |
| "country": "Canada" |
| } |
| }, |
| "email": "habibeh.naderi@dal.ca" |
| }, |
| { |
| "first": "Behrouz", |
| "middle": [ |
| "H" |
| ], |
| "last": "Soleimani", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Dalhousie University", |
| "location": { |
| "country": "Canada" |
| } |
| }, |
| "email": "behrouz.hajisoleimani@dal.ca" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Research Council", |
| "location": { |
| "country": "Canada" |
| } |
| }, |
| "email": "svetlana.kiritchenko@nrc-cnrc.gc.ca" |
| }, |
| { |
| "first": "Saif", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mohammad", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Research Council", |
| "location": { |
| "country": "Canada" |
| } |
| }, |
| "email": "saif.mohammad@nrc-cnrc.gc.ca" |
| }, |
| { |
| "first": "Stan", |
| "middle": [], |
| "last": "Matwin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Dalhousie University", |
| "location": { |
| "country": "Canada" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper, we propose a regression system to infer the emotion intensity of a tweet. We develop a multi-aspect feature learning mechanism to capture the most discriminative semantic features of a tweet as well as the emotion information conveyed by each word in it. We combine six types of feature groups: (1) a tweet representation learned by an LSTM deep neural network on the training data, (2) a tweet representation learned by an LSTM network on a large corpus of tweets that contain emotion words (a distant supervision corpus), (3) word embeddings trained on the distant supervision corpus and averaged over all words in a tweet, (4) word and character n-grams, (5) features derived from various sentiment and emotion lexicons, and (6) other hand-crafted features. As part of the word embedding training, we also learn the distributed representations of multi-word expressions (MWEs) and negated forms of words. An SVR regressor is then trained over the full set of features. We evaluate the effectiveness of our ensemble feature sets on the SemEval-2018 Task 1 datasets and achieve a Pearson correlation of 72% on the task of tweet emotion intensity prediction.", |
| "pdf_parse": { |
| "paper_id": "S18-1045", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper, we propose a regression system to infer the emotion intensity of a tweet. We develop a multi-aspect feature learning mechanism to capture the most discriminative semantic features of a tweet as well as the emotion information conveyed by each word in it. We combine six types of feature groups: (1) a tweet representation learned by an LSTM deep neural network on the training data, (2) a tweet representation learned by an LSTM network on a large corpus of tweets that contain emotion words (a distant supervision corpus), (3) word embeddings trained on the distant supervision corpus and averaged over all words in a tweet, (4) word and character n-grams, (5) features derived from various sentiment and emotion lexicons, and (6) other hand-crafted features. As part of the word embedding training, we also learn the distributed representations of multi-word expressions (MWEs) and negated forms of words. An SVR regressor is then trained over the full set of features. We evaluate the effectiveness of our ensemble feature sets on the SemEval-2018 Task 1 datasets and achieve a Pearson correlation of 72% on the task of tweet emotion intensity prediction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The widespread use of micro-blogging and social networking websites such as Twitter for conveying information, sharing opinions, and expressing feelings, makes the sentiment analysis of tweets an attractive area of research. However, sentiment analysis is challenging because people often convey their emotions indirectly and creatively, rather than explicitly stating how they feel. Sentiment analysis of tweets is additionally challenging because of the frequent occurrences of nonstandard language and poor grammatical structure. Tweets also often contain misspellings, abbreviations, hashtags, and emoticons.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Various machine learning approaches have been developed for Twitter sentiment classification. Most of these algorithms train a classifier over tweets with manually annotated sentiment intensity labels and learn the most discriminative features. Hence, designing an effective feature engineering algorithm can improve classification performance, greatly. used many different sentiment lexicons (manually created and automatically generated), as well as a variety of hand-crafted features to build the topranked system for Twitter sentiment classification tasks in SemEval-2013 and SemEval-2014. Sentiment lexicons, either hand-crafted or algorithmically generated, consist of words and their associated polarity scores. However, since feature engineering is labour intensive and usually needs domain-specific knowledge, sentiment classification algorithms with less dependency on feature engineering are attracting considerable interest. Socher et al. (2013) proposed a feature learning algorithm to discover explanatory factors in sentiment classification. They consider the representation of a sentence (or document) as a composition of the representations of its constituent words or phrases. This way, the sentiment classification problem reduces to learning an effective word representation (or word embedding) that not only models the syntactic context of words but also captures sentiment information of the sentence. Tang et al. (2014) extended the traditional word embedding methods (Mikolov et al., 2013b; Collobert et al., 2011) by encoding sentiment information into the existing continuous representation of words. They built sentiment-specific word embedding (SSWE) by developing three neural networks wherein the sentiment polarity of the tweet is incorporated in the neural networks' loss functions. Teng et al. (2016) proposed a context-sensitive lexicon-based method using recurrent and simple", |
| "cite_spans": [ |
| { |
| "start": 937, |
| "end": 957, |
| "text": "Socher et al. (2013)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1424, |
| "end": 1442, |
| "text": "Tang et al. (2014)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1491, |
| "end": 1514, |
| "text": "(Mikolov et al., 2013b;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1515, |
| "end": 1538, |
| "text": "Collobert et al., 2011)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1815, |
| "end": 1833, |
| "text": "Teng et al. (2016)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Dev.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Emotion Train", |
| "sec_num": null |
| }, |
| { |
| "text": "Test Total anger 1,701 388 1,002 3,091 fear 2,252 389 986 3,627 joy 1,616 290 1,105 3,011 sadness 1,533 397 975 2,905 Total 7,102 1,464 4,068 12,634 feed-forward neural networks to extract sentiment lexicons and produce a new polarity weight, respectively. Unlike lexicon-based sentiment analysis, deep learning approaches are effective in exploring both linguistic and semantic relations between words (Liu et al., 2015) . However, due to the limited amount of high-quality labeled data, it is difficult to train deep models with a large number of hyperparameters for sentiment analysis tasks. Additionally, manual labeling of data is costly and requires domain expert knowledge, which is not always available.", |
| "cite_spans": [ |
| { |
| "start": 423, |
| "end": 441, |
| "text": "(Liu et al., 2015)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 5, |
| "end": 168, |
| "text": "Total anger 1,701 388 1,002 3,091 fear 2,252 389 986 3,627 joy 1,616 290 1,105 3,011 sadness 1,533 397 975 2,905 Total 7,102 1,464 4,068 12,634", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Emotion Train", |
| "sec_num": null |
| }, |
| { |
| "text": "In this paper, we describe two systems: System I, our official submission to the competition, and System II, our best model. In both systems, we combine deep learning and lexicon-based approaches to extract the most informative semantic and emotion representations of tweets. We train two LSTM models, one on the provided training data and another one on a large corpus of tweets that contain emotion words, to obtain emotionspecific tweet representations. We augment this feature space with word and character n-grams, features derived from several sentiment and emotion lexicons as well as other hand-crafted features. Our best model achieves an average Pearson correlation of 71.96% on the official EI-reg test dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Emotion Train", |
| "sec_num": null |
| }, |
| { |
| "text": "The English training, development, and test datasets used in our experiments were provided as part of the SemEval-2018 Task 1, EI-reg subtask . 1 The data files include tweet id, tweet text, emotion of the tweet, and the emotion intensity. An overview of the data is provided in Table 1. 1 A detailed description of the English datasets and the analysis of various affect dimensions is available in Mohammad and Kiritchenko (2018).", |
| "cite_spans": [ |
| { |
| "start": 144, |
| "end": 145, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 279, |
| "end": 287, |
| "text": "Table 1.", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The following pre-processing steps were applied to each of the training and test tweets:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Remove URLs and usernames.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Lower-case all the tweet text.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Substitute abbreviated phrases such as I've, don't, I'd, etc. with their long forms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Replace tweet-specific acronyms such as gr8, lol, rotfl, etc. with their expanded forms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Substitute the elongated words with the same words but keeping at most two consecutive occurrences of repeated letters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Standardize all the emojis in data to their explanatory phrases using emoji Python package 2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Remove all the HTML character codes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Replace all occurrences of a multi-word expression (MWE) by a unique identifier. We use WikiMWE (Hartmann et al., 2012) , which contains all multi-word expressions from Wikipedia.", |
| "cite_spans": [ |
| { |
| "start": 98, |
| "end": 121, |
| "text": "(Hartmann et al., 2012)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Generate the negated form of all the tokens that occur between any of the negation words, such as no, not, never, etc., and a punctuation mark.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Remove special characters, numbers, non-English words or phrases.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Normalize all adjectives and adverbs in test data that do not exist in train or development data sets with adjective or adverb in the training data which shares the most common Synsets of WordNet with it (if we find more than one candidate, we replace the adjective with the most frequent one in the training data).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Applying WordNet lemmatizer to have the simple singular form of tokens with partof-speech tags of adjective, adverb, verb or noun.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The tweets are now fed to the system.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We created two models: System I, our official submission to the competition, and System II, our best model. Both models address the task of emotion intensity prediction (EI-reg): given a tweet T and an emotion e, predict a real-valued intensity score (in the range [0, 1]) of e that represents the emotional state of the author of the tweet T.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our first model takes advantage of both embedding-based and lexicon-based features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System I", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In particular, the following feature sets are generated:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System I", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 Embedding-based features:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System I", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "-Average word embedding vector; -Representation of a tweet learned by an LSTM neural network on the provided training data;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System I", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 Lexicon-based and n-gram features:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System I", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "- We use bag-of-word (BOW) (Pang et al., 2002) and term frequency-inverse document frequency (tf-idf) methods to extract different word and character n-grams. We train word embeddings on a large corpus of tweets that contain emotion words. Then, we refine our learned word embeddings to build emotion-specific word embeddings for every emotion. Specifically, we assign emotionspecific weights to every word in our learned word embeddings and multiply each word vector by weights. These emotion-specific weights are obtained by calculating the Pearson correlation between the extracted unigram features and intensity labels of the training and development datasets of each emotion.", |
| "cite_spans": [ |
| { |
| "start": 27, |
| "end": 46, |
| "text": "(Pang et al., 2002)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System I", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We concatenate two learned embedding-based tweet representations, word and character ngrams, and the lexicon features in a multimodal feature layer. We train a Random Forest (RF) over this heterogeneous multimodal feature layer to predict emotion intensity of a tweet.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System I", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "This approach was evaluated on the datasets of SemEval-2018 Task 1, EI-reg (an emotion intensity regression task) and EI-oc (an emotion intensity ordinal classification task), for which it obtained Pearson correlations of 57.5% and 48.5% on the test sets, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System I", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Further investigation revealed that our system I was overfitted to the training data and lost its generalization ability over new unseen data. The cause of this problem was the use of development dataset labels in our feature engineering algorithm. So, we modify our model to overcome overfitting and propose system II.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System I", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Similarly to System I, our second model incorporates both embedding-based representations and linguistic knowledge in a unified architecture (see Figure 1 ). We train a Support Vector Regressor (SVR) over the following two categories of features:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 146, |
| "end": 154, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "System II", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 Embedding-based features:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System II", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "-Average word embedding vector; -Representation of a tweet learned by an LSTM neural network on the provided training data; -Emotion-polarized representation of a tweet learned by an LSTM neural network on a distant supervision corpus;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System II", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 Lexicon-based and hand-crafted features:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System II", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "-Word and character n-gram features; -Vector of 43 lexicon-derived features, compiled using the AffectiveTweets package (Mohammad and Bravo-Marquez, 2017); -Hand-crafted features based on either word similarities in learned word embeddings or emotion intensity similarities in accordance to train and development labels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System II", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Below, different components of the two systems are explained in detail. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System II", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Since the input of our model is a sequence of tokens {w 1 , w 2 , ..., w n }, it is crucial to learn an effective word representation for automatic emotion analysis. A word embedding is a dense, low-dimensional and real-valued vector associated with each word w i . We used word2vec (Mikolov et al., 2013a) and SVD-NS (Soleimani and Matwin, 2018) to learn word embeddings and trained it on an unlabeled corpus of 21M tweets provided as part of the SemEval-2018 Affect in Tweets DIstant Supervision Corpus (SemEval-2018 AIT DISC) (Mohammad et al., 2018). SVD-NS works better for word and sentence similarity tasks and is much faster than word2vec. Such distributed word representations learned from massive text data make feature engineering less dependent on the task. However, unsupervised learning of word embeddings cannot thoroughly capture finer context-based semantic information of a specific task. Hence, to incorporate linguistic structure of tweets, we use the following two techniques to improve the word vectors: 1. Our model learns a unique distributed representation for every Multi-Word Expression (MWE). MWEs occur frequently in tweets, and their meanings are often not derivable from the meanings of the constituent words.", |
| "cite_spans": [ |
| { |
| "start": 283, |
| "end": 306, |
| "text": "(Mikolov et al., 2013a)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word embedding layer", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "2. Additionally, our model learns an embedding vector to represent the negated form of every word occurring between a negation word (e.g., no, shouldn't) and the following punc-tuation mark. Due to the significant impact of negation words in changing the sentiment polarity of a sentence, we treat the negated tokens differently . By adding a 'NEG' prefix to them, we consider the negated tokens as different entities and learn separate word representations for them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word embedding layer", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "To learn word embeddings, we applied two methods:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word embedding layer", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "the continuous skip-gram model (Mikolov et al., 2013a) with the window size of 5, and SVD-NS (Soleimani and Matwin, 2018) with the PMI threshold of \u03b1 = \u22122.5. The vector dimensionality was set to d = 100. We also filter words that occur less than 3 times in the corpus.", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 54, |
| "text": "(Mikolov et al., 2013a)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word embedding layer", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "To capture the global context of a tweet, we build a tweet embedding by vertically concatenating embedding vectors of its n words. This yields a tweet embedding matrix X \u2208 R n\u00d7d . Then, we take the mean of these word embeddings across the tweet length. Therefore, an average embedding will add d features (equal to the number of our word embedding dimensions) to our multimodal feature layer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Average embedding layer", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "To learn a semantic representation of a tweet, we use an LSTM neural network, which we found effective in detecting salient words of a sentence while automatically attenuating unimportant words. The LSTM model sequentially takes each word in a sentence, extracts its information, and embeds it into a semantic vector. Due to its ability to capture long-term memory, LSTM accumulates increasingly richer information as it goes through the sentence, and when it reaches the last word, the hidden layer of the network provides a semantic representation of the whole sentence (Palangi et al., 2016) . To be able to train sequential neural networks in batches, we normalize tweet length by zero padding and then feed the zero-padded tweet embedding matrix to an LSTM layer. We apply dropout (Srivastava et al., 2014) on the LSTM layer to prevent network parameters from overfitting and control the co-adaptation of features. Our LSTM layer is then followed by two fully connected hidden layers, and one output layer. Each of these layers computes the transformation f(", |
| "cite_spans": [ |
| { |
| "start": 572, |
| "end": 594, |
| "text": "(Palangi et al., 2016)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 786, |
| "end": 811, |
| "text": "(Srivastava et al., 2014)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tweet embedding vector learned by LSTM layer", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "W i * x i + b i ) for i = {1, 2, 3}", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tweet embedding vector learned by LSTM layer", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": ", where W is the weight matrix, b is the bias vector and f is a Relu non-linear activation function for hidden layers and a Sigmoid neuron for output layer. The full network is trained on the provided training data to predict the intensity score of the input tweet. We consider the representation obtained from the first hidden layer as a sentence embedding vector of an input tweet. The network parameters are learned by minimizing the mean squared error (MSE) between the actual and predicted values of emotion intensity on the training data. We optimize this loss function by back-propagating through layers via mini-batch gradient descent, with batch size of 32, 40 training epochs, and Adam optimization algorithm (Kingma and Ba, 2014) with learning rate of \u03b1 = 0.001. We use one LSTM layer with 64 neurons, followed by a dropout of 0.2, and two hidden layers of sizes 32 and 16, respectively. We use the same network parameters for an LSTM model trained on the distant supervision data (see Section 3.6).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tweet embedding vector learned by LSTM layer", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "We leverage large amount of Twitter data with distant supervision to polarize our word embeddings for each emotion. Hence, we use SemEval-2018 AIT DISC distant supervision corpus of tweets released by the competition organizers, which includes around 100M English tweet ids associated with tweets that contain emotion-related query terms such as '#angry', 'annoyed', 'panic', 'happy', etc. We collected 21M tweets by polling the Twitter API with these tweet ids. Based on the query terms, one or more emotion labels of {'anger', 'fear', 'joy', 'sadness'} have been assigned to every tweet in this dataset. For each emotion, we randomly select 200, 000 tweets labeled with that emotion (e.g., 'anger') and 200, 000 tweets labeled with other emotions ('not anger') to build the emotion-specific word embeddings. Since the four basic emotions are not independent and may be correlated, we build these emotion-polarized word embeddings in two ways: (i) one against all strategy: for example, 'not anger' tweets are selected from tweets labeled with any of the other three emotions, i.e., 'fear', 'joy', or 'sadness'; (ii) considering emotions with similar valence as one group of labels: tweets labeled with 'anger', 'fear', and 'sadness' are treated as they have the same label. So, here 'not anger' tweets are selected from tweets that are labeled only as 'joy'. Then, we train an LSTM neural network using these emotion-specific word embeddings to build emotion-specific representations of tweets. Our final emotion-specific tweet representation obtained by concatenating two hidden state layers learned by the same LSTM neural network trained twice on the same data but with different emotion labeling according to the above two labeling strategies.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Emotion-polarized tweet representation learned by LSTM", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "In addition to the two kinds of tweet representations described above, we use bag-of-word (BOW) representation to extract most and least frequent word n-grams (unigrams, bigrams, and trigrams) as well as character n-grams (three, four, and five consecutive characters) from the training, development, and test datasets. BOW represents each word as a one-hot vector which has the same length as the size of the vocabulary, and only one dimension is 1, with all others being 0. However, the one-hot word representation cannot sufficiently capture the complex linguistic characteristics of words. We augment our feature space by generating additional hand-crafted features. We define a set of binary features by adding n adjectives with highest and lowest intensities for each emotion according to the emotion's training data. The intensity of a word (unigram) is obtained as an average emotion intensity of tweets in the train- ing data that contain that unigram. We also add the weighted average intensity of all extracted unigrams and the intensity of their k nearest neighbors in learned word embeddings (sorted based on cosine similarity) to our feature set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hand-Crafted Features", |
| "sec_num": "3.7" |
| }, |
| { |
| "text": "We train the SVR regressor on the combined set of tweets in the training and development sets and apply the model on the test set. The Pearson correlation between the predictions and the gold labels was used by the competition organizers as the official evaluation measure. The percentage of Pearson correlation scores obtained by all of our individual and combined models on the test set are shown in Table 2 . To make the result table easier to understand, we shortened the feature groups' names as follows: 1) average word embedding vectors \u2192 WE, 2) tweet embedding vectors learned by LSTM \u2192 TE, 3) emotionpolarized tweet embeddings learned by LSTM \u2192 polTE, 4) word and character n-gram features \u2192 ngram, 5) AffectiveTweets lexicon features \u2192 lex, 6) hand-crafted features based on word similarities in emotion intensity \u2192 handcrft. All the results reported in the table use word embeddings that are obtained by SVD-NS (Soleimani and Matwin, 2018) method which was slightly better than word2vec (Mikolov et al., 2013b) . The 'all-features' row shows the results obtained by the model that concatenates all six groups of features including WE, ngram, TE, polTE, lex, and handcrft. This model achieves the highest Pearson correlation score among all of our proposed models. The tweet representation learned by LSTM is the best learned unimodal feature. Considering MWEs as independent semantic units improves the average embedding model's performance by 1.03 percentage points. Learning independent embedding vectors for negated form of words further improves the score by 0.6 percentage points.", |
| "cite_spans": [ |
| { |
| "start": 998, |
| "end": 1021, |
| "text": "(Mikolov et al., 2013b)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 402, |
| "end": 409, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We described a deep learning framework to predict emotion intensity in tweets. We implemented an ensemble of embedding-based feature representations and sentiment lexicon-based feature learning approaches. Our best model obtained a Pearson correlation of 71.96% on Task 1 of SemEval-2018 competition (EI-reg: an emotion intensity regression task). The tweet representation feature vector learned by LSTM was the most effective feature group amongst those that we used. Various sentiment and emotion lexicon features, our handcrafted features and word n-grams features also helped improve prediction quality.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "https://pypi.org/project/emoji/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://affectivetweets.cms.waikato.ac.nz/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank Xiang Jiang for helping us build attentive deep neural networks and fruitful discussions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Determining word-emotion associations from tweets by multilabel classification", |
| "authors": [ |
| { |
| "first": "Felipe", |
| "middle": [], |
| "last": "Bravo-Marquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Eibe", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernhard", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pfahringer", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Web Intelligence (WI)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felipe Bravo-Marquez, Eibe Frank, Saif M Moham- mad, and Bernhard Pfahringer. 2016. Determining word-emotion associations from tweets by multi- label classification. In Web Intelligence (WI), 2016", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "ACM International Conference on", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "536--539", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "IEEE/WIC/ACM International Conference on, pages 536-539. IEEE.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Natural language processing (almost) from scratch", |
| "authors": [ |
| { |
| "first": "Ronan", |
| "middle": [], |
| "last": "Collobert", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "L\u00e9on", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Karlen", |
| "suffix": "" |
| }, |
| { |
| "first": "Koray", |
| "middle": [], |
| "last": "Kavukcuoglu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Kuksa", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2493--2537", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel Kuksa. 2011. Natural language processing (almost) from scratch. Journal of Machine Learning Research, 12(Aug):2493-2537.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Mining multiword terms from wikipedia", |
| "authors": [ |
| { |
| "first": "Silvana", |
| "middle": [], |
| "last": "Hartmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Gy\u00f6rgy", |
| "middle": [], |
| "last": "Szarvas", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Semi-Automatic Ontology Development: Processes and Resources", |
| "volume": "", |
| "issue": "", |
| "pages": "226--258", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Silvana Hartmann, Gy\u00f6rgy Szarvas, and Iryna Gurevych. 2012. Mining multiword terms from wikipedia. In Semi-Automatic Ontology Develop- ment: Processes and Resources, pages 226-258. IGI Global.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Mining and summarizing customer reviews", |
| "authors": [ |
| { |
| "first": "Minqing", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the tenth ACM SIGKDD international conference on Knowledge discovery and data mining", |
| "volume": "", |
| "issue": "", |
| "pages": "168--177", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minqing Hu and Bing Liu. 2004. Mining and summa- rizing customer reviews. In Proceedings of the tenth ACM SIGKDD international conference on Knowl- edge discovery and data mining, pages 168-177. ACM.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Sentiment analysis of short informal texts", |
| "authors": [ |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Saif M", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Journal of Artificial Intelligence Research", |
| "volume": "50", |
| "issue": "", |
| "pages": "723--762", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Svetlana Kiritchenko, Xiaodan Zhu, and Saif M Mo- hammad. 2014. Sentiment analysis of short in- formal texts. Journal of Artificial Intelligence Re- search, 50:723-762.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Finegrained opinion mining with recurrent neural networks and word embeddings", |
| "authors": [ |
| { |
| "first": "Pengfei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shafiq", |
| "middle": [], |
| "last": "Joty", |
| "suffix": "" |
| }, |
| { |
| "first": "Helen", |
| "middle": [], |
| "last": "Meng", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1433--1443", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pengfei Liu, Shafiq Joty, and Helen Meng. 2015. Fine- grained opinion mining with recurrent neural net- works and word embeddings. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 1433-1443.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1301.3781" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jef- frey Dean. 2013a. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013b. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in neural information processing systems, pages 3111-3119.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Emotion intensities in tweets", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Felipe", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bravo-Marquez", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the sixth joint conference on lexical and computational semantics (*Sem)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad and Felipe Bravo-Marquez. 2017. Emotion intensities in tweets. In Proceedings of the sixth joint conference on lexical and computational semantics (*Sem), Vancouver, Canada.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Semeval-2018 Task 1: Affect in tweets", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Felipe", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Bravo-Marquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Salameh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of International Workshop on Semantic Evaluation (SemEval-2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad, Felipe Bravo-Marquez, Mo- hammad Salameh, and Svetlana Kiritchenko. 2018. Semeval-2018 Task 1: Affect in tweets. In Proceed- ings of International Workshop on Semantic Evalu- ation (SemEval-2018), New Orleans, LA, USA.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Understanding emotions: A dataset of tweets to study interactions between affect categories", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 11th Edition of the Language Resources and Evaluation Conference (LREC-2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad and Svetlana Kiritchenko. 2018. Understanding emotions: A dataset of tweets to study interactions between affect categories. In Pro- ceedings of the 11th Edition of the Language Re- sources and Evaluation Conference (LREC-2018), Miyazaki, Japan.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "NRC-Canada: Building the stateof-the-art in sentiment analysis of tweets", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad, Svetlana Kiritchenko, and Xiao- dan Zhu. 2013. NRC-Canada: Building the state- of-the-art in sentiment analysis of tweets. In Pro- ceedings of the International Workshop on Semantic Evaluation, Atlanta, GA, USA.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Crowdsourcing a word-emotion association lexicon", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Turney", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Computational Intelligence", |
| "volume": "29", |
| "issue": "3", |
| "pages": "436--465", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M Mohammad and Peter D Turney. 2013. Crowd- sourcing a word-emotion association lexicon. Com- putational Intelligence, 29(3):436-465.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A new anew: Evaluation of a word list for sentiment analysis in microblogs", |
| "authors": [ |
| { |
| "first": "Finn\u00e5rup", |
| "middle": [], |
| "last": "Nielsen", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Workshop on'Making Sense of Microposts: Big things come in small packages", |
| "volume": "", |
| "issue": "", |
| "pages": "93--98", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Finn\u00c5rup Nielsen. 2011. A new anew: Evaluation of a word list for sentiment analysis in microblogs. In Workshop on'Making Sense of Microposts: Big things come in small packages, pages 93-98.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Deep sentence embedding using long short-term memory networks: Analysis and application to information retrieval", |
| "authors": [ |
| { |
| "first": "Hamid", |
| "middle": [], |
| "last": "Palangi", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Yelong", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianshu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xinying", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Rabab", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "IEEE/ACM Transactions on Audio, Speech and Language Processing (TASLP)", |
| "volume": "24", |
| "issue": "4", |
| "pages": "694--707", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hamid Palangi, Li Deng, Yelong Shen, Jianfeng Gao, Xiaodong He, Jianshu Chen, Xinying Song, and Rabab Ward. 2016. Deep sentence embedding using long short-term memory networks: Analysis and ap- plication to information retrieval. IEEE/ACM Trans- actions on Audio, Speech and Language Processing (TASLP), 24(4):694-707.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Thumbs up?: sentiment classification using machine learning techniques", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Shivakumar", |
| "middle": [], |
| "last": "Vaithyanathan", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the ACL-02 conference on Empirical methods in natural language processing", |
| "volume": "10", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Pang, Lillian Lee, and Shivakumar Vaithyanathan. 2002. Thumbs up?: sentiment classification using machine learning techniques. In Proceedings of the ACL-02 conference on Empirical methods in natural language processing-Volume 10, pages 79-86. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Recursive deep models for semantic compositionality over a sentiment treebank", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Perelygin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Chuang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 conference on empirical methods in natural language processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1631--1642", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D Manning, Andrew Ng, and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment tree- bank. In Proceedings of the 2013 conference on empirical methods in natural language processing, pages 1631-1642.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Spectral word embedding with negative sampling", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Behrouz", |
| "suffix": "" |
| }, |
| { |
| "first": "Stan", |
| "middle": [], |
| "last": "Soleimani", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Matwin", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Behrouz H Soleimani and Stan Matwin. 2018. Spectral word embedding with negative sampling. In Pro- ceedings of the Thirty-Second AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Dropout: A simple way to prevent neural networks from overfitting", |
| "authors": [ |
| { |
| "first": "Nitish", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "15", |
| "issue": "1", |
| "pages": "1929--1958", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: A simple way to prevent neural networks from overfitting. The Journal of Machine Learning Research, 15(1):1929-1958.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Learning sentimentspecific word embedding for twitter sentiment classification", |
| "authors": [ |
| { |
| "first": "Duyu", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1555--1565", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Duyu Tang, Furu Wei, Nan Yang, Ming Zhou, Ting Liu, and Bing Qin. 2014. Learning sentiment- specific word embedding for twitter sentiment clas- sification. In Proceedings of the 52nd Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 1555- 1565.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Context-sensitive lexicon features for neural sentiment analysis", |
| "authors": [ |
| { |
| "first": "Zhiyang", |
| "middle": [], |
| "last": "Teng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Duy Tin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Vo", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1629--1638", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhiyang Teng, Duy Tin Vo, and Yue Zhang. 2016. Context-sensitive lexicon features for neural senti- ment analysis. In Proceedings of the 2016 Con- ference on Empirical Methods in Natural Language Processing, pages 1629-1638.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Sentiment strength detection for the social web", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Thelwall", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevan", |
| "middle": [], |
| "last": "Buckley", |
| "suffix": "" |
| }, |
| { |
| "first": "Georgios", |
| "middle": [], |
| "last": "Paltoglou", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Journal of the Association for Information Science and Technology", |
| "volume": "63", |
| "issue": "1", |
| "pages": "163--173", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Thelwall, Kevan Buckley, and Georgios Pal- toglou. 2012. Sentiment strength detection for the social web. Journal of the Association for Informa- tion Science and Technology, 63(1):163-173.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Recognizing contextual polarity in phraselevel sentiment analysis", |
| "authors": [ |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Hoffmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the conference on human language technology and empirical methods in natural language processing", |
| "volume": "", |
| "issue": "", |
| "pages": "347--354", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Theresa Wilson, Janyce Wiebe, and Paul Hoffmann. 2005. Recognizing contextual polarity in phrase- level sentiment analysis. In Proceedings of the con- ference on human language technology and empir- ical methods in natural language processing, pages 347-354.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "An empirical study on the effect of negation words on sentiment", |
| "authors": [ |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongyu", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Saif", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "304--313", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaodan Zhu, Hongyu Guo, Saif Mohammad, and Svetlana Kiritchenko. 2014. An empirical study on the effect of negation words on sentiment. In Pro- ceedings of the 52nd Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 304-313.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Word and character n-gram features; -Vector of 43 lexicon-derived features, compiled using the AffectiveTweets package (Mohammad and Bravo-Marquez, 2017). 3 The lexicons used include those created by Nielsen (2011); Mohammad and Turney (2013); Kiritchenko et al. (2014); Hu and Liu (2004); Bravo-Marquez et al. (2016); Thelwall et al. (2012); Wilson et al. (2005).", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "text": "The architecture of our model (System II).", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "text": "Number of instances provided in the Tweet", |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Emotion Intensity dataset (SemEval-2018 Task 1, EI-reg English). The data was divided into train, develop-ment, and test sets.</td></tr></table>", |
| "html": null |
| }, |
| "TABREF3": { |
| "text": "Pearson correlation (r) % obtained on the test sets. The highest score in each emotion is shown in bold. System I indicates the results of our first overfitted model and System II shows the results of our modified model. In every experiment on system II, we train SVR regressor with linear kernel to predict emotion intensity of a tweet while in system I experiments, we use RF regressor and SVM classifier for SemEval-2018 Task 1 and 2, respectively. The all-features experiment represents the model built on concatenation of all six groups of features including WE, ngram, TE, polTE, lex, and handcrft.", |
| "num": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |