| { |
| "paper_id": "Y14-1026", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:44:54.587670Z" |
| }, |
| "title": "Sentiment Lexicon Interpolation and Polarity Estimation of Objective and Out-Of-Vocabulary Words to Improve Sentiment Classification on Microblogging", |
| "authors": [ |
| { |
| "first": "Yongyos", |
| "middle": [], |
| "last": "Kaewpitakkun", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Japan Advanced Institute of Science and Technology", |
| "location": { |
| "addrLine": "1-1", |
| "postCode": "923-1292", |
| "settlement": "Asahidai, Ishikawa", |
| "region": "Nomi City", |
| "country": "Japan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Kiyoaki", |
| "middle": [], |
| "last": "Shirai", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Japan Advanced Institute of Science and Technology", |
| "location": { |
| "addrLine": "1-1", |
| "postCode": "923-1292", |
| "settlement": "Asahidai, Ishikawa", |
| "region": "Nomi City", |
| "country": "Japan" |
| } |
| }, |
| "email": "kshirai@jaist.ac.jp" |
| }, |
| { |
| "first": "Masnizah", |
| "middle": [], |
| "last": "Mohd", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Japan Advanced Institute of Science and Technology", |
| "location": { |
| "addrLine": "1-1", |
| "postCode": "923-1292", |
| "settlement": "Asahidai, Ishikawa", |
| "region": "Nomi City", |
| "country": "Japan" |
| } |
| }, |
| "email": "masnizah@jaist.ac.jp" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Sentiment analysis has become an important classification task because a large amount of user-generated content is published over the Internet. Sentiment lexicons have been used successfully to classify the sentiment of user review datasets. More recently, microblogging services such as Twitter have become a popular data source in the domain of sentiment analysis. However, analyzing sentiments on tweets is still difficult because tweets are very short and contain slang, informal expressions, emoticons, mistyping and many words not found in a dictionary. In addition, more than 90 percent of the words in public sentiment lexicons, such as SentiWordNet, are objective words, which are often considered less important in a classification module. In this paper, we introduce a hybrid approach that incorporates sentiment lexicons into a machine learning approach to improve sentiment classification in tweets. We automatically construct an Add-on lexicon that compiles the polarity scores of objective words and out-ofvocabulary (OOV) words from tweet corpora. We also introduce a novel feature weighting method by interpolating sentiment lexicon score into uni-gram vectors in the Support Vector Machine (SVM). Results of our experiment show that our method is effective and significantly improves the sentiment classification accuracy compared to a baseline unigram model.", |
| "pdf_parse": { |
| "paper_id": "Y14-1026", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Sentiment analysis has become an important classification task because a large amount of user-generated content is published over the Internet. Sentiment lexicons have been used successfully to classify the sentiment of user review datasets. More recently, microblogging services such as Twitter have become a popular data source in the domain of sentiment analysis. However, analyzing sentiments on tweets is still difficult because tweets are very short and contain slang, informal expressions, emoticons, mistyping and many words not found in a dictionary. In addition, more than 90 percent of the words in public sentiment lexicons, such as SentiWordNet, are objective words, which are often considered less important in a classification module. In this paper, we introduce a hybrid approach that incorporates sentiment lexicons into a machine learning approach to improve sentiment classification in tweets. We automatically construct an Add-on lexicon that compiles the polarity scores of objective words and out-ofvocabulary (OOV) words from tweet corpora. We also introduce a novel feature weighting method by interpolating sentiment lexicon score into uni-gram vectors in the Support Vector Machine (SVM). Results of our experiment show that our method is effective and significantly improves the sentiment classification accuracy compared to a baseline unigram model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Sentiment analysis and opinion mining is the field of study that analyzes people's opinions, sentiments, evaluations, attitudes and emotions from written language (Liu, 2010) . Recently, Twitter has become an important resource for sentiment analysis. People express their opinions and feelings using Twitter and these data can be grabbed publicly through Twitter API. There are two main approaches to sentiment analysis: lexicon-based and machine learningbased techniques. Several researchers have combined these two techniques (Kumar et al., 2012; Mudinas et al., 2012; Saif et al., 2012; Fang et al., 2011; Hung et al., 2013) . This study adopts a similar approach; we seek to combine the prior polarity knowledge from the lexicon-based method and the powerful classification algorithm from the machine learning-based method. Two main motivations of this approach are discussed below.", |
| "cite_spans": [ |
| { |
| "start": 163, |
| "end": 174, |
| "text": "(Liu, 2010)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 529, |
| "end": 549, |
| "text": "(Kumar et al., 2012;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 550, |
| "end": 571, |
| "text": "Mudinas et al., 2012;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 572, |
| "end": 590, |
| "text": "Saif et al., 2012;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 591, |
| "end": 609, |
| "text": "Fang et al., 2011;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 610, |
| "end": 628, |
| "text": "Hung et al., 2013)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The initial motivation is to revise the polarity of objective and out-of-vocabulary words in the public sentiment lexicon to improve Twitter sentiment classification. In the lexicon-based approach, sentiment classification is done by comparing the group of positive and negative words looked up from the public lexicon. For example, if the document contains more positive words than negative words, it will be classified as positive. Several public lexical resources such as ANEW 1 , OpinionFinder 2 , SentiStrength 3 , SentiWordNet 4 and SenticNet 5 lexicon are available for this type of analysis. SentiWordNet or \"SWN\" (Esuli et al., 2010) has become one of the most fa-PACLIC 28 ! 205 mous and widely used sentiment lexicons because of its huge vocabulary coverage. SentiWordNet is an extended version of WordNet 6 , where words and synsets in WordNet are augmented with their sentiment score. SWN 3.0 contains more than 100,000 synsets. However, more than 90% of these are classified as objective words (Hung et al., 2013) ; which are usually considered less important in the classification process. Furthermore, lexicon-based sentiment analysis over Twitter faces several challenges due to the short informal language used. Tweets are usually short and contain lots of slang, emoticons, abbreviations or mistyped words. Most of them are not contained in the public lexicon, which are called out-of-vocabulary (OOV) words. Both objective and OOV words may have implicit sentiment, especially in some specific domains or group of users; thus, it could be better to modify an existing public sentiment lexicon, such as SentiWord-Net, by incorporating the polarity of objective and OOV words. One possible way to revise SentiWord-Net is to estimate the polarity scores of sentiment unknown words based on the polarity of the sentences including them in the corpus. For example, let us suppose that the objective word \"birthday\" appears many more times in positive tweets than in objective or negative tweets. This word could be revised as a positive word in the sentiment lexicon. On the other hand, when the OOV word \"ugh\" appears many more times in negative tweets than in objective or positive tweets, it could be newly classified as a negative word. In this work, we aim to build an addon lexicon covering the estimated polarity scores for both objective words and OOV words in the Senti-WordNet.", |
| "cite_spans": [ |
| { |
| "start": 622, |
| "end": 642, |
| "text": "(Esuli et al., 2010)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 817, |
| "end": 818, |
| "text": "6", |
| "ref_id": null |
| }, |
| { |
| "start": 1008, |
| "end": 1027, |
| "text": "(Hung et al., 2013)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The secondary motivation is to incorporate the prior polarity knowledge from the sentiment lexicon into powerful machine learning classifier, such as the Support Vector Machine (SVM), as extra information. Among many machine learning techniques, SVM has achieved the great performance in the sentiment classification task. The uni-gram feature has been widely and successfully used in sentiment analysis, especially in user review datasets. Since tweets are much shorter than user reviews, however, the use of only the uni-gram feature may cause a data sparseness problem. One possible way to solve this problem is to integrate the information from the sentiment lexicon to supervised algorithms as extra knowledge. Recently, some researchers incorporate information derived from a lexicon into machine learning by augmenting sentiment lexicon as extra polarity group feature to uni-gram (O'Keefe et al., 2009) or simply replacing uni-gram with a lexicon score (Hung et al., 2013) . In this work, we present an alternative way to incorporate lexical information into a machine learning algorithm by interpolating a score in the sentiment lexicon into a score of uni-gram feature in vector weighting. Our experiment results show that the proposed lexicon interpolation weighting method with revised polarity estimation of objective and OOV words is effective and significantly improves the sentiment classification accuracy compared to the baseline uni-gram model.", |
| "cite_spans": [ |
| { |
| "start": 888, |
| "end": 910, |
| "text": "(O'Keefe et al., 2009)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 961, |
| "end": 980, |
| "text": "(Hung et al., 2013)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of the paper is organized as follows. Section 2 discusses related work. Section 3 describes our proposed method and framework including data pre-processing, polarity estimation technique and sentiment lexicon incorporation and feature weighting method. Section 4 describes results of the experiments and discussion. Finally, conclusions and direction for future work are discussed in Section 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Early work on Twitter sentiment analysis used two approaches in traditional sentiment analysis on normal texts: machine learning-based and lexiconbased approaches. Recently, some studies have combined these two approaches and achieved relatively better performance in two ways. The first is to develop two classifiers based on these two approaches separately and then integrate them into one system. The second is to incorporate lexicon information directly into a machine learning classification algorithm. In the first way, Kumar et al. (2012) used a machine learning-based method to find the semantic orientation of adjectives and used a lexicon-based method to find the semantic orientation of verbs and adverbs. The overall tweet sentiment is then calculated using a linear interpolation of the results from both methods. Mudinas et al. (2012) presents concept-level sentiment analy- sis system, which are called pSenti. Their system used a lexicon for detecting the sentiment of words and used these sentiment words as features in the machine learning-based method. Results from both lexicon and machine learning were combined together to calculate the final overall sentiment scoring. In the second way, Saif et al. (2012) utilized knowledge of not only words but also semantic concepts obtained from a lexicon as features to train a Naive Bayes classifier. Fang et al. (2011) automatically generated domain-specific sentiment lexicon and incorporated it into the SVM classifier. They applied this method for identifying sentiment classification in a product reviews. Recently, Hung et al. (2013) reported that more than 90 percent of words in SentiWordNet are objective words that are often considered useless in sentiment classification. So, they reassigned proper sentiment values and tendency of such objective words in a movie review corpus and incorporated these sentiment scores into the machine learning-based method. In this paper, we reevaluate the sentiment score of not only objective words but also out-of-vocabulary (OOV) words; which are common in tweets due to informal message used. We also propose an alternative way to incorporate the sentiment lexicon knowledge into the machine learning algorithm. We will propose sen-timent interpolation weighting method that interpolates lexicon scores into uni-gram scores in the vector representation of the SVM classifier. Our method is described in detail in the next section.", |
| "cite_spans": [ |
| { |
| "start": 526, |
| "end": 545, |
| "text": "Kumar et al. (2012)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 827, |
| "end": 848, |
| "text": "Mudinas et al. (2012)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1365, |
| "end": 1383, |
| "text": "Fang et al. (2011)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1585, |
| "end": 1603, |
| "text": "Hung et al. (2013)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our two-step hybrid sentiment analysis system has been developed by combining lexicon-based and machine learning-based approaches. In the first step, the add-on lexicon has been created by reevaluating the polarity scores of objective words and out-ofvocabulary (OOV) words extracted from a specific tweet corpus. After that, the score from both the public lexicon and add-on lexicon will be incorporated into a feature vector as extra prior knowledge in four different ways that will be described in Subsection 3.3. The main advantage of our approach is the extra sentiment polarity information from both the public and add-on lexicon will be incorporated to the powerful machine learning algorithm. It can help the supervised learned classifier to identify the sentiment of tweets more precisely, even when tweets contain words that are not found in the public lexicon or less frequently appeared in the training set. The overall system framework is shown in Figure 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 961, |
| "end": 969, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "! 207", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PACLIC 28", |
| "sec_num": null |
| }, |
| { |
| "text": "The data preprocessing process consists of part-ofspeech tagging, lemmatizing, and stop word and URL removal. In the first step, tweets are POS tagged by the TweetNLP POS Tagger 7 , which is trained specially from Twitter data. After that, all words are lemmatized by the Stanford lemmatizer 8 . We also reduce the number of letters that are repeated more than two times, i.e. \"heellllooooo\" is replaced by \"heelloo\". Finally, the common stop words and URL are removed because they represent neither sentiment nor semantic concept.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preprocessing", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "As discussed above, SentiWordNet has become a famous and useful lexicon for sentiment analysis due to its broad coverage; however more than 90 percent of words in SentiWordNet are objective words. Moreover, lots of words in tweets are slang, informal or mistyped words that are not included in the lexicon. Based on this observation, we aim to build an add-on lexicon by compiling both objective and OOV words with their newly estimated sentiment score. Word scores are estimated based on the assumption that the polarities of words are coincident with the polarity of their associated sentences, which seems reasonable due to the short length of tweet messages. In other words, if the word frequently appears in the positive (or negative) tweets, its polarity might be positive (or negative).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Add-on lexicon creation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the creation of the add-on lexicon, the sentiment score of a word is calculated based on the probability that the word appears in positive or negative sentences in a sentiment tagged corpus. There are two steps. In the first step, the words from preprocessing step are extracted with their score in Sen-tiWordNet by using Equation (1). As we will describe in Subsection 3.3, this score is used as the weight of the feature vector. In the add-on lexicon creation, SentiWordNet is just used to check if the word is an objective word (SW N Score(w i ) = 0) or OOV word, then objective and OOV words will be sent to the revised polarity estimation step. The revised scores for these words are calculated by Equation (2). 7 http://www.ark.cs.cmu.edu/TweetNLP/ 8 http://nlp.stanford.edu/software/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Add-on lexicon creation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "SW N Score(W i ) = SW NScore P OS (w i ) SW N Score NEG (w i ) (1) Score(w i ) = 8 > > > > < > > > > :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Add-on lexicon creation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Score P OS (w i ), if Score P OS (w i ) > Score NEG (w i ). ( 1) \u21e5 Score NEG (w i ), if Score P OS (w i ) < Score NEG (w i ).", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Add-on lexicon creation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where, In the second step, since scores in SentiWordNet are in the range of -1 to 1, we have to convert the revised word scores into the same interval. In this case, we use a Bipolar sigmoid function (Fausett, 1994) because it is continuous and returns a value from -1 to 1. The conversion formula is shown in Equation (3).", |
| "cite_spans": [ |
| { |
| "start": 200, |
| "end": 215, |
| "text": "(Fausett, 1994)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Add-on lexicon creation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Score P OS (w i ) = P (positive|w i ) P (positive) Score NEG (w i ) = P (negative|w i ) P", |
| "eq_num": "(" |
| } |
| ], |
| "section": "Add-on lexicon creation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Score(w i ) ' = sigmoid(Score(w i ))", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Add-on lexicon creation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where, sigmoid(x) =", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Add-on lexicon creation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "2 (1+e x ) 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Add-on lexicon creation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The revised polarity score may be unreliable if the frequency of the word is too low, or the difference between positive and negative tendency is not great enough. Therefore, two thresholds are introduced. Threshold 1 (T1) is the minimum number of words in the dataset and threshold 2 (T2) is the minimum difference between positive and negative word orientation scores (Score P OS (w i ) and Score NEG (w i )). The objective and OOV words with their scores are added to the add-on lexicon only when equation (4) is fulfilled.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Add-on lexicon creation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "F requency of w i in dataset T 1 |Score P OS (w i ) Score NEG (w i )| T 2", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Add-on lexicon creation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "3.3 Lexicon score incorporation and feature weighting methods", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Add-on lexicon creation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In this subsection, the word scores from both Sen-tiWordNet and the add-on lexicon will be incor-porated into the SVM classification features as extra prior information in four different ways: sentiment weighting, sentiment augmentation, sentiment interpolation and sentiment interpolation plus. We start with the baseline uni-gram features, followed by our proposed sentiment lexicon incorporation method. Note that we ignore word sense disambiguation problem although the sentiment score is associated not with a word but with a synset in SWN. When SWN is consulted to obtain a sentiment score for a polysemous word, the first word sense in SWN is always chosen because it is the most representative sense of each word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Add-on lexicon creation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Uni-gram and POS features are common and widely used in the domain of sentiment analysis. There are many feature weighting schemes for the uni-gram. In this work, we use the combination of uni-gram and POS features with term presence weighting as the baseline method. As a result, the weight value of words(POSs) is 1 if they are present, otherwise 0.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Uni-gram and POS Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "In this method, the feature weights of uni-gram binary vectors will be simply replaced with the word sentiment scores (Equation (1) or (3)) from the lexicon. Note that the weight is set to 0 if the word does not appear in the tweet.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Weighting Features", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "In this method, words will be classified into 3 groups: positive, objective and negative, based on their scores in the lexicon. Then, these sentiment group features are augmented to the original unigram vector. There are three additional features that are the percentage of positive, objective and negative words in a tweet, where the sum of the weights of these three features would be equal to one.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Augmentation Features", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "In this method, we proposed a new incorporation method where the word score from the lexicon will be interpolated into the original uni-gram feature weight. The weight of the new interpolated vector is shown in Equation (5). Note that uni-gram score is always 1 in our model. The parameter \u21b5 (0 \uf8ff \u21b5 \uf8ff 1) is used for controlling the influence between the uni-gram model and the sentiment lexicon model. When \u21b5 is equal to 1, the weight is the fully uni-gram model, and when \u21b5 is 0, the weight is the fully sentiment weighting model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Interpolation Features", |
| "sec_num": "3.3.4" |
| }, |
| { |
| "text": "In this method, we combine sentiment interpolation and sentiment augmentation together. Therefore, three additional augmentation features (Subsection 3.3.3) will be added to the sentiment interpolation vector (Subsection 3.3.4) as the extra features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Interpolation Plus Features", |
| "sec_num": "3.3.5" |
| }, |
| { |
| "text": "The summary of all features and weight values are shown in Table 1 . Please note that the weight of the feature is always 0 if it does not appear in the tweets.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 59, |
| "end": 66, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentiment Interpolation Plus Features", |
| "sec_num": "3.3.5" |
| }, |
| { |
| "text": "In this section, we present the results of two experiments. The first experiment was conducted with Positive-Neutral-Negative classification over full datasets (3-way classification). In the second experiment, we discarded neutral tweets and conducted the experiment with Positive-Negative classification over datasets of only positive and negative tweets. The detailed results are shown in Section 4.3. In addition, we used LIBLINEAR 9 developed by Fan et al. (2008) The Sanders corpus 10 consists of 5,512 tweets on four different topics (Apple, Google, Microsoft, and Twitter). Each tweet was manually labeled as positive, negative, neutral or irrelevant. After removing irrelevant and duplicate tweets, 2,661 tweets remained. Then, the dataset was randomly divided into two subsets. The first sub-dataset was used for the add-on lexicon creation part and training part, while the second was used for the testing (evaluation) part. Detailed information on this corpus is shown in Table 2 . We used the Sanders dataset as a representative of small and domain-specific corpus.", |
| "cite_spans": [ |
| { |
| "start": 450, |
| "end": 467, |
| "text": "Fan et al. (2008)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 983, |
| "end": 990, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The SemEval 2013 corpus (Nakov et al., 2013) consists of about 15,000 tweets that were created for Twitter sentiment analysis (task 2) in the Semantic Evaluation of Systems Challenge 2013. Each tweet was manually labeled as positive, negative or neutral by Amazon Mechanical Turk workers. This dataset consists of a variety of topics. Among the full dataset, only 10,534 tweets could be downloaded, because some of them were protected or deleted. This dataset was also randomly divided into three subsets. Detailed information on this corpus is shown in Table 3 . Note that the development set was used for parameter tuning. We used the SemEval 2013 dataset as a representative of a large and general corpus.", |
| "cite_spans": [ |
| { |
| "start": 24, |
| "end": 44, |
| "text": "(Nakov et al., 2013)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 554, |
| "end": 561, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "SemEval 2013 Dataset", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "In addition, the percentages of objective words and OOV words after data preprocessing in both corpora are shown in Table 4 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 116, |
| "end": 123, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "SemEval 2013 Dataset", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "As described in Subsection 3.2, in the add-on lexicon creation process, two thresholds can play an 10 http://www.sananalytics.com/lab/twitter-sentiment/ Table 4 ). Note that the threshold T2 was set to 0.2 by the preliminary experiment. Figures 2 a and b show the accuracy of our method for various values of T1 using interpolation plus weighting method in a 3-way and a positive-negative classification, respectively. In these graphs, the horizontal axis indicates the ratio of the number of words in the add-on lexicon to that of the corpus. The results show that, in 3-way classification, the classifier achieved better performance when the numbers of revised polarity words were smaller than the case of positive-negative classification. The accuracy reached its peak with the percentage of revised polarity words set around 0.5% (in 3-way classification) and 1.2% (in positive-negative classification). We did not investigate the optimum for the threshold T1 in the Sanders corpus due to the insufficient number of tweets, but set T1 so that the percentage of the number of the add-on lexicon is the same as in the optimized value in the SemEval 2013 dataset. Based on this observation, two thresholds were set as shown in Table 5 . In the experiment, the coefficient \u21b5 in Equation (5) was initially set to 0.5 for maintaining the balance of uni-gram and lexicon score. The sensitivity of \u21b5 will be investigated in Subsection 4.6.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 153, |
| "end": 160, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 237, |
| "end": 255, |
| "text": "Figures 2 a and b", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 1229, |
| "end": 1236, |
| "text": "Table 5", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Parameter optimization", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In this section, we compare the performance of the add-on lexicon to the original SentiWordNet lexicon. Figure 3 shows the accuracy (the average of both 3-way and positive-negative classification tasks and both datasets) of the models with original SWN and SWN plus the add-on lexicon using four different feature weighting methods. It indicates that the add-on lexicon significantly improved the accuracy in the sentiment weighting and slightly improved the accuracy in the sentiment interpolation and sentiment interpolation plus. In the case of sentiment augmentation, the accuracies were almost the same. In addition, the combination of sentiment interpolation plus the add-on lexicon achieved the highest accuracy. When the add-on lexicon was applied, the performance improved more in positive-negative classification than in positive-neutral-negative (3-way) classification. Table 8 shows the average of both datasets of accuracy improvement in 3-way and positive-negative classification with and without the add-on lexicon when using the interpolation plus weighting method. The result shows that when the add-on lexicon was applied, the accuracy was increased about 2% compared to applying only SWN in positive-negative classification, while only 0.25% in 3-way classification. Therefore the add-on lexicon is more suitable for positive-negative sentiment classification than positive-neutral-negative sentiment classification. The reason may be that in the case of 3-way classification, some objective tweets were misclassified as subjective tweets when objective or OOV words were revised to subjective words. Table 9 shows the performance of the add-on lexicon over the Sanders vs. SemEval 2013 corpus when using sentiment interpolation plus weighting method. It seems that the add-on lexicon performed better over the domain specific corpus (Sanders) than the general corpus (SemEval 2013). Using the add-on lexicon, the average accuracy of both 3-way and positive-negative classification tasks were improved by 1.49% on the Sanders corpus and 0.82% on the SemEval 2013 corpus. Table 10 and Table 11 show examples of the revised positive and negative words with their POSs and scores obtained from the Sanders and SemEval 2013 corpora, respectively. It can be observed that the revised polarity words in the Sanders corpus are more domain-specific than those in the SemEval 2013 corpus since the Sanders corpus is a collection of tweets associated with only four keywords: Apple, Android, Microsoft and Twitter. Table 12 shows the comparison among four feature weighting methods and the baseline uni-gram. It reveals the average accuracy of the methods on both Sanders and SemEval corpora in both 3-way classification and positive-negative classification tasks, where both SentiWordNet and the add-on lexicon are used as the sentiment lexicon. First, the accu- racy of the sentiment weighting method (the score in the lexicon is used as the weight) was 4.51% worse than the uni-gram method. It may be because, unlike uni-gram weighting, the weights of objective and OOV words were set to 0 even when they appeared in the tweets. It means that the classifier loses the information about these words. Sentiment augmentation, where three lexicon scores were added to original uni-gram as extra features, improved the accuracy 1.43%. Sentiment interpolation, where lexicon scores were interpolated into uni-gram vector weights, further improved the accuracy 2.05% compared to baseline. Finally, the combination of sentiment interpolation and sentiment augmentation, called sentiment interpolation plus, achieved the highest accuracy among all methods with average accuracy improvement 4.08% compared to baseline uni-gram.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 104, |
| "end": 112, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 881, |
| "end": 888, |
| "text": "Table 8", |
| "ref_id": "TABREF7" |
| }, |
| { |
| "start": 1620, |
| "end": 1627, |
| "text": "Table 9", |
| "ref_id": "TABREF10" |
| }, |
| { |
| "start": 2090, |
| "end": 2111, |
| "text": "Table 10 and Table 11", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 2524, |
| "end": 2532, |
| "text": "Table 12", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of the add-on lexicon", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "In the sentiment interpolation method, the \u21b5 parameter in Equation (5) plays an important role for controlling the influence of uni-gram and sentiment lexicon scores. To analyze the effect of the \u21b5 parameter, different values of the \u21b5 parameter were applied. Note that when \u21b5 is equal to 1, the vector weight becomes a fully uni-gram model (only term presence are used as feature weight) and when \u21b5 is equal to 0, the vector weight value becomes a fully sentiment weighting model (only lexicon score are used as feature weight). Figures 4 a) and b) show the change of the average accuracy and F1-measure of the sentiment interpolation plus method on two datasets in the 3-way and positive-negative classification, respectively. In the positive-negative classi- fication, the result clearly shows that the integration of uni-gram and lexicon score outperformed either uni-gram or sentiment weighting. The sentiment interpolation plus method performed well with large rage of \u21b5 values (0.2 to 0.7). On the other hand, in the 3-way classification, it seems that the sentiment interpolation plus method only slightly increased the performance compared to uni-gram or sentiment weighting in most of the \u21b5 values. As discussed earlier, the sentiment interpolation plus method was more suitable for the positive-negative classification than the 3-way classification task.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 529, |
| "end": 541, |
| "text": "Figures 4 a)", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "The sensitivity of \u21b5 parameter", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "In this paper, we have shown an alternative hybrid method that incorporated sentiment lexicon information into the machine learning method to improve the performance of Twitter sentiment classification. There are two main contributions of this paper. First, we estimated the implicit polarity of objective and OOV words and used these words as additional information for the public sentiment lexicon. We described how we revised the polarity of objective and OOV words based on the assumption that the polarities of words are coincident with the polarity of their associated sentences, which seem reasonable due to the short length of tweets. Second, we proposed an alternative way to incorporate sentiment lexi-con knowledge into a machine learning algorithm. We proposed the sentiment interpolation weighting method that interpolated lexicon score into uni-gram score in the feature vectors of SVM.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our results indicate that the add-on lexicon improved the classification accuracy on average compared to using only the original public lexicon. The proposed sentiment interpolation weighting method performed well and the combination of sentiment interpolation and sentiment augmentation, called sentiment interpolation plus, with SentiWordNet and the add-on lexicon achieved the best performance and significantly improved the classification accuracy compared to the uni-gram model. The experiments show that the add-on lexicon performed better over the domain-specific corpus than the general corpus. In addition, our results indicate that the proposed approach was more appropriate for positivenegative classification than positive-neutral-negative (3-way) classification. Therefore, we plan to apply the subjective classification as our future work in order to filter the objective tweets before the polarity classification. Since negation words such as \"not\" and \"less\" are simply treated as uni-gram features in this work, another interesting issue is investigation on how special treatments of negation affect the polarity classification. Furthermore, we plan to find a method to reestimate the word polarity from unlabeled data or noisy label data instead of labeled data that is time consuming to create.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "http://wordnet.princeton.edu/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.csie.ntu.edu.tw/ cjlin/liblinear/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Robust sentiment detection on twitter from biased and noisy data", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Barbosa", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of Coling", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "L. Barbosa and J. Feng. 2010. Robust sentiment detec- tion on twitter from biased and noisy data. In Proceed- ings of Coling.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Combining strengths, emotions and polarities for boosting Twitter sentiment analysis", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Bravo-Marquez", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Mendoza", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Poblete", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of WISDOM", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "F. Bravo-Marquez, M. Mendoza, and B. Poblete. 2013. Combining strengths, emotions and polarities for boosting Twitter sentiment analysis. In Proceedings of WISDOM.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Senti-WordNet 3.0: An Enhanced Lexical Resource for Sentiment Analysis and Opinion Mining", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Esuli", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Baccianella", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Sebastiani", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Esuli, S. Baccianella, and F. Sebastiani. 2010. Senti- WordNet 3.0: An Enhanced Lexical Resource for Sen- timent Analysis and Opinion Mining. In Proceedings of LREC.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "LIBLINEAR: A Library for Large Linear Classification", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [ |
| "E" |
| ], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "W" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "J" |
| ], |
| "last": "Hsieh", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [ |
| "R" |
| ], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "J" |
| ], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "9", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. E. Fan, K. W. Chang, C. J. Hsieh, X. R. Wang, and C. J. Lin. 2008. LIBLINEAR: A Library for Large Lin- ear Classification. The Journal of Machine Learning Research, volume 9", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Incorporating Lexicon Knowledge into SVM Learning to Improve Sentiment Classification", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of IJCNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Fang and B. Chen. 2011. Incorporating Lexicon Knowledge into SVM Learning to Improve Sentiment Classification. In Proceedings of IJCNLP.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Fundamentals of Neural Networks", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Fausett", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G. Y. Fausett. 1994. Fundamentals of Neural Networks. Prentice Hall PTR.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Twitter sentiment classification using distant supervision", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Go", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Bhayani", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Go, R. Bhayani, and L. Huang 2009. Twitter senti- ment classification using distant supervision. CS224N Project Report, Stanford.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Using Objective Words in SentiWordNet to Improve Word-of-Mouth Sentiment Classification", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Hung", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "Kai" |
| ], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "IEEE Intelligent Systems", |
| "volume": "28", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "C. Hung and H. Kai Lin. 2013. Using Objective Words in SentiWordNet to Improve Word-of-Mouth Sentiment Classification. IEEE Intelligent Systems, volume 28.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Twitter Sentiment Analysis: The Good the Bad and the OMG!", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Kouloumpis", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Moore", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of ICWSM", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. Kouloumpis, T. Wilson, and J. Moore. 2011. Twitter Sentiment Analysis: The Good the Bad and the OMG!. In Proceedings of ICWSM.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Sentiment Analysis on Twitter IJCSI", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "M" |
| ], |
| "last": "Sebastian", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "9", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Kumar and T. M. Sebastian 2012. Sentiment Analysis on Twitter IJCSI, volume 9.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Sentiment analysis and subjectivity. Handbook of natural language processing", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "B. Liu. 2010. Sentiment analysis and subjectivity. Handbook of natural language processing, volume 2.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Emoticon Smoothed Language Models for Twitter Sentiment Analysis", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [ |
| "L" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "J" |
| ], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K. L. Liu , W. J. Li, and M. Guo 2012. Emoti- con Smoothed Language Models for Twitter Sentiment Analysis. In Proceedings of AAAI.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Combining lexicon and learning based approaches for conceptlevel sentiment analysis", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mudinas", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Levene", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of WISDOM", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Mudinas, D. Zhang, and M. Levene 2012. Combin- ing lexicon and learning based approaches for concept- level sentiment analysis. In Proceedings of WISDOM.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Semeval-2013 task 2: Sentiment analysis in twitter", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Kozareva", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of SemEval", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. Nakov, Z. Kozareva, A. Ritter, S. Rosenthal, V. Stoy- anov, and T. Wilson. 2013. Semeval-2013 task 2: Sen- timent analysis in twitter. In Proceedings of SemEval.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Feature Selection and Weighting Methods in Sentiment Analysis", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "O'keefe", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Koprinska", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of ADCS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. O'Keefe and I. Koprinska 2009. Feature Selection and Weighting Methods in Sentiment Analysis. In Pro- ceedings of ADCS.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Twitter as a Corpus for Sentiment Analysis and Opinion Mining", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Pak", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Paroubek", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Pak and P. Paroubek 2010. Twitter as a Corpus for Sentiment Analysis and Opinion Mining. In Proceed- ings of LREC.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Semantic sentiment analysis of twitter", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Fernandez", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Alani", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of ISWC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. Saif, M. Fernandez, Y. He, and H. Alani. 2012. Se- mantic sentiment analysis of twitter. In Proceedings of ISWC.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Evaluation Datasets for Twitter Sentiment Analysis", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Fernandez", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Alani", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of ESSEM", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. Saif, M. Fernandez, Y. He, and H. Alani. 2013. Evalu- ation Datasets for Twitter Sentiment Analysis. In Pro- ceedings of ESSEM.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Sentiment strength detection for the social web", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Thelwall", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Buckley", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Paltoglou", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Journal of the American Society for Information Science and Technology", |
| "volume": "9", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Thelwall, K. Buckley, and G. Paltoglou. 2012. Sen- timent strength detection for the social web. Journal of the American Society for Information Science and Technology, volume 9.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "System framework." |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "negative) P (positive|w i ) = No. of w i in positive tweets No. of w i in dataset P (negative|w i ) = No. ofw i in negative tweets No. of w i in dataset P (postitive) = No. of positive tweets No. of all tweets P (negative) = No. of negative tweets No. of all tweets" |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "The classification accuracy vs. number of revised polarity words on the development dataset." |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Average accuracy of SentiWordNet vs. Senti-WordNet plus the add-on lexicon" |
| }, |
| "FIGREF4": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Effect of the \u21b5 parameter in the sentiment interpolation plus method" |
| }, |
| "TABREF0": { |
| "num": null, |
| "text": "Summary of feature and weighting methods.", |
| "content": "<table><tr><td>Methods</td><td>Feature weight value</td><td>Additional features</td></tr><tr><td>Uni-gram + POS</td><td>1</td><td>No</td></tr><tr><td>Sentiment Weighting</td><td>Lexicon score</td><td>No</td></tr><tr><td/><td/><td>percentage of</td></tr><tr><td>Sentiment Augmentation</td><td>1</td><td>positive, objective and negative word</td></tr><tr><td/><td/><td>in a tweet</td></tr><tr><td>Sentiment Interpolation</td><td>Equation (5)</td><td>No</td></tr><tr><td>Sentiment Interpolation Plus</td><td>Equation (5)</td><td>percentage of positive, objective and negative word in a tweet</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF2": { |
| "num": null, |
| "text": "Sanders corpus.", |
| "content": "<table><tr><td>Subset</td><td>Used for</td><td># Pos</td><td># Neu</td><td># Neg</td><td># Total</td></tr><tr><td>1</td><td>Add-on lexicon</td><td>319</td><td>1,319</td><td>345</td><td>1,983</td></tr><tr><td/><td>creation, Training</td><td/><td/><td/><td/></tr><tr><td>2</td><td>Testing</td><td>109</td><td>455</td><td>114</td><td>678</td></tr><tr><td colspan=\"2\">4.1 Data set</td><td/><td/><td/><td/></tr><tr><td colspan=\"3\">4.1.1 Sanders Dataset</td><td/><td/><td/></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF3": { |
| "num": null, |
| "text": "SemEval 2013 corpus.", |
| "content": "<table><tr><td>Subset</td><td>Used for</td><td># Pos</td><td># Neu</td><td># Neg</td><td># Total</td></tr><tr><td>0</td><td>Development</td><td>1,297</td><td>1,401</td><td>475</td><td>3,173</td></tr><tr><td>1</td><td>Add-on lexicon</td><td>2,272</td><td>3,083</td><td>884</td><td>6,239</td></tr><tr><td/><td>creation, Training</td><td/><td/><td/><td/></tr><tr><td>2</td><td>Testing</td><td>372</td><td>441</td><td>187</td><td>1,000</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF4": { |
| "num": null, |
| "text": "Percentages of objective and OOV words in the two corpora.", |
| "content": "<table><tr><td>Corpus</td><td>Objective words</td><td>OOV words</td></tr><tr><td>Sanders</td><td>26.61%</td><td>57.73%</td></tr><tr><td>SemEval 2013</td><td>24.01%</td><td>66.55%</td></tr><tr><td colspan=\"3\">important role to control the number of revised po-</td></tr><tr><td colspan=\"3\">larity words. The objective and OOV words should</td></tr><tr><td colspan=\"3\">not be revised if their estimated scores are not re-</td></tr><tr><td colspan=\"3\">liable enough. To investigate an optimal value for</td></tr><tr><td colspan=\"3\">the threshold T1, we conducted a sensitivity test</td></tr><tr><td colspan=\"3\">on the SemEval 2013 development dataset (subset</td></tr><tr><td>0 in</td><td/><td/></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF5": { |
| "num": null, |
| "text": "", |
| "content": "<table><tr><td>and 7 show the results of the 3-way and</td></tr><tr><td>positive-negative classification, respectively. They</td></tr><tr><td>reveal the average of precision, recall and F1-</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF6": { |
| "num": null, |
| "text": "Threshold parameter setting based on % of revised words.", |
| "content": "<table><tr><td>Corpus</td><td>Task</td><td>T1</td><td>T2</td><td>Vocab. size</td><td>*1</td><td>*2</td></tr><tr><td>Sanders</td><td>3-way pos-neg</td><td>45 25</td><td>0.20 0.20</td><td>5,145 5,145</td><td>24 60</td><td>0.46% 1.17%</td></tr><tr><td>SemEval</td><td>3-way</td><td>60</td><td>0.20</td><td>15,366</td><td>78</td><td>0.50%</td></tr><tr><td>2013</td><td>pos-neg</td><td>35</td><td>0.20</td><td colspan=\"2\">15,366 173</td><td>1.12%</td></tr><tr><td colspan=\"6\">*1 = No. of revised words, *2 = % of revised words</td><td/></tr><tr><td colspan=\"7\">measure over positive and negative classes as well as</td></tr><tr><td colspan=\"7\">accuracy (Acc) for both Sanders and SemEval 2013</td></tr><tr><td colspan=\"7\">datasets. Five methods (including the baseline) de-</td></tr><tr><td colspan=\"7\">scribed in Subsection 3.3 with and without the add-</td></tr><tr><td colspan=\"4\">on lexicon are compared.</td><td/><td/><td/></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF7": { |
| "num": null, |
| "text": "Average accuracy improvement when using SWN vs. SWN plus the add-on lexicon in 3-way and positive-negative classification.", |
| "content": "<table><tr><td>Classification</td><td>Sentiment Interpolation</td><td>Sentiment Interpolation Plus</td></tr><tr><td>3-Way</td><td>+0.27%</td><td>+0.25%</td></tr><tr><td>Positive-Negative</td><td>+2.42%</td><td>+2.06%</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF8": { |
| "num": null, |
| "text": "Results of 3-way classification task over the Sanders and SemEval 2013 corpora.", |
| "content": "<table><tr><td>Methods</td><td/><td/><td>Sanders</td><td/><td/><td/><td>SemEval 2013</td></tr><tr><td>Feature</td><td colspan=\"3\">Lexicon Precision Recall</td><td>F1</td><td>Acc</td><td colspan=\"2\">Precision Recall</td><td>F1</td><td>Acc</td></tr><tr><td>Uni-gram + POS</td><td>No</td><td>0.454</td><td colspan=\"3\">0.444 0.446 0.667</td><td>0.575</td><td>0.482 0.518 0.617</td></tr><tr><td>Sentiment Weighting</td><td>SWN +Addon</td><td>0.306 0.323</td><td colspan=\"3\">0.392 0.306 0.423 0.315 0.300 0.541</td><td>0.485 0.554</td><td>0.478 0.464 0.531 0.425 0.472 0.606</td></tr><tr><td>Sentiment Augmentation</td><td>SWN +Addon</td><td>0.496 0.485</td><td colspan=\"3\">0.452 0.471 0.690 0.452 0.466 0.684</td><td>0.611 0.620</td><td>0.487 0.536 0.628 0.491 0.542 0.635</td></tr><tr><td>Sentiment Interpolation Sentiment Interpolation Plus</td><td>SWN +Addon SWN +Addon</td><td>0.451 0.467 0.511 0.522</td><td colspan=\"3\">0.407 0.427 0.671 0.425 0.443 0.676 0.439 0.471 0.702 0.430 0.469 0.705</td><td>0.588 0.595 0.646 0.650</td><td>0.471 0.514 0.621 0.476 0.519 0.622 0.484 0.547 0.644 0.487 0.550 0.646</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF9": { |
| "num": null, |
| "text": "Results of positive-negative classification task over the Sanders and SemEval 2013 corpora.", |
| "content": "<table><tr><td>Methods</td><td/><td/><td>Sanders</td><td/><td/><td/><td>SemEval 2013</td></tr><tr><td>Feature</td><td colspan=\"3\">Lexicon Precision Recall</td><td>F1</td><td>Acc</td><td colspan=\"2\">Precision Recall</td><td>F1</td><td>Acc</td></tr><tr><td>Uni-gram + POS</td><td>No</td><td>0.767</td><td colspan=\"3\">0.764 0.762 0.762</td><td>0.699</td><td>0.688 0.692 0.733</td></tr><tr><td>Sentiment Weighting</td><td>SWN +Addon</td><td>0.741 0.723</td><td colspan=\"3\">0.734 0.733 0.735 0.722 0.722 0.722</td><td>0.642 0.697</td><td>0.642 0.642 0.682 0.661 0.670 0.730</td></tr><tr><td>Sentiment Augmentation</td><td>SWN +Addon</td><td>0.776 0.765</td><td colspan=\"3\">0.773 0.771 0.771 0.763 0.762 0.762</td><td>0.719 0.725</td><td>0.700 0.707 0.750 0.712 0.717 0.755</td></tr><tr><td>Sentiment Interpolation</td><td>SWN +Addon</td><td>0.772 0.800</td><td colspan=\"3\">0.772 0.771 0.771 0.799 0.798 0.798</td><td>0.712 0.740</td><td>0.695 0.701 0.744 0.715 0.724 0.766</td></tr><tr><td>Sentiment Interpolation Plus</td><td>SWN +Addon</td><td>0.785 0.813</td><td colspan=\"3\">0.785 0.785 0.785 0.812 0.812 0.812</td><td>0.740 0.759</td><td>0.715 0.724 0.766 0.728 0.739 0.780</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF10": { |
| "num": null, |
| "text": "Performance of the add-on lexicon on the Sanders vs. SemEval 2013 corpus.", |
| "content": "<table><tr><td>Corpus</td><td>SWN</td><td>+Add-on</td><td>Improvement</td></tr><tr><td>Sanders</td><td>74.34%</td><td>75.83%</td><td>1.49%</td></tr><tr><td>SemEval 2013</td><td>70.48%</td><td>71.30%</td><td>0.82%</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF11": { |
| "num": null, |
| "text": "Examples of revised positive / negative words in the Sanders corpus.", |
| "content": "<table><tr><td>Positive word</td><td>Revised score</td><td>Negative word</td><td>Revised score</td></tr><tr><td>#ics#OTHER</td><td>0.9223</td><td>battery#N</td><td>-0.9526</td></tr><tr><td>look#V</td><td>0.9211</td><td>customer#N</td><td>-0.9253</td></tr><tr><td>power#N</td><td>0.8926</td><td>update#N</td><td>-0.9109</td></tr><tr><td>:)#OTHER</td><td>0.8851</td><td>dear#OTHER</td><td>-0.9074</td></tr><tr><td>#android#N</td><td>0.8698</td><td>lot#N</td><td>-0.8931</td></tr><tr><td>help#V</td><td>0.8698</td><td>send#V</td><td>-0.8931</td></tr><tr><td>user#N</td><td>0.8664</td><td>#ios#OTHER</td><td>-0.8776</td></tr><tr><td>great#A</td><td>0.8252</td><td>service#N</td><td>-0.8049</td></tr><tr><td>game#N</td><td>0.8041</td><td>wait#V</td><td>-0.7434</td></tr><tr><td>thank#V</td><td>0.7994</td><td>ass#N</td><td>-0.7086</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF12": { |
| "num": null, |
| "text": "Examples of revised positive / negative words in the SemEval 2013 corpus.", |
| "content": "<table><tr><td>Positive word</td><td>Revised score</td><td>Negative word</td><td>Revised score</td></tr><tr><td>thank#V</td><td>0.8637</td><td>:(#OTHER</td><td>-0.9920</td></tr><tr><td>fun#A</td><td>0.8628</td><td>fuck#N</td><td>-0.9900</td></tr><tr><td>luck#N</td><td>0.8560</td><td>cancel#V</td><td>-0.9872</td></tr><tr><td>great#A</td><td>0.8442</td><td>damn#OTHER</td><td>-0.9864</td></tr><tr><td>:D#OTHER</td><td>0.8421</td><td>niggas#N</td><td>-0.9690</td></tr><tr><td>yay#OTHER</td><td>0.8341</td><td>die#V</td><td>-0.9554</td></tr><tr><td>pakistan#OTHER</td><td>0.8265</td><td>dont#V</td><td>-0.9329</td></tr><tr><td>:)#OTHER</td><td>0.8170</td><td>ass#N</td><td>-0.9272</td></tr><tr><td>yeah#OTHER</td><td>0.7999</td><td>cry#V</td><td>-0.9168</td></tr><tr><td>celebrate#V</td><td>0.7928</td><td>russia#OTHER</td><td>-0.9039</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF13": { |
| "num": null, |
| "text": "Average accuracy comparison among four feature weighting methods and baseline uni-gram.", |
| "content": "<table><tr><td>Methods</td><td>Avg. Acc</td><td>Improvement</td></tr><tr><td>Uni-gram + POS</td><td>69.49%</td><td>-</td></tr><tr><td>Sentiment Weighting</td><td>64.98%</td><td>-4.51%</td></tr><tr><td>Sentiment Augmentation Sentiment Interpolation Sentiment Interpolation Plus</td><td>70.92% 71.53% 73.57%</td><td>1.43% 2.05% 4.08%</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |