| { |
| "paper_id": "S16-1031", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:26:14.843051Z" |
| }, |
| "title": "DiegoLab16 at SemEval-2016 Task 4: Sentiment Analysis in Twitter using Centroids, Clusters, and Sentiment Lexicons", |
| "authors": [ |
| { |
| "first": "Abeed", |
| "middle": [], |
| "last": "Sarker", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "abeed.sarker@asu.edu" |
| }, |
| { |
| "first": "Graciela", |
| "middle": [], |
| "last": "Gonzalez", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "graciela.gonzalez@asu.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We present our supervised sentiment classification system which competed in SemEval-2016 Task 4: Sentiment Analysis in Twitter. Our system employs a Support Vector Machine (SVM) classifier trained using a number of features including n-grams, synset expansions, various sentiment scores, word clusters, and term centroids. Using weighted SVMs, to address the issue of class imbalance, our system obtains positive class F-scores of 0.694 and 0.650, and negative class F-scores of 0.391 and 0.493 over the training and test sets, respectively.", |
| "pdf_parse": { |
| "paper_id": "S16-1031", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We present our supervised sentiment classification system which competed in SemEval-2016 Task 4: Sentiment Analysis in Twitter. Our system employs a Support Vector Machine (SVM) classifier trained using a number of features including n-grams, synset expansions, various sentiment scores, word clusters, and term centroids. Using weighted SVMs, to address the issue of class imbalance, our system obtains positive class F-scores of 0.694 and 0.650, and negative class F-scores of 0.391 and 0.493 over the training and test sets, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Social media has evolved into a data source that is massive and growing rapidly. One of the most popular micro-blogging social networks, for example, is Twitter, which has over 645,750,000 users, and grows by an estimated 135,000 users every day, generating 9,100 tweets per second. 1 Users tend to use social networks to broadcast the latest events, and also to share personal opinions and experiences. Therefore, social media has become a focal point for data science research, and social media data is being actively used to perform a range of tasks from personalized advertising to public health monitoring and surveillance (Sarker et al., 2015a) . Because of its importance and promise, social media data has been the subject of recent large-scale annotation projects, and shared tasks have been designed around social media for solving problems in complex domains (e.g., Sarker et al. (2016a) ) While the benefits of using a resource such as Twitter include large volumes of data and direct access to enduser sentiments, there are several obstacles associated with the use of social media data. These include the use of non-standard terminologies, misspellings, short and ambiguous posts, and data imbalance, to name a few.", |
| "cite_spans": [ |
| { |
| "start": 628, |
| "end": 650, |
| "text": "(Sarker et al., 2015a)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 877, |
| "end": 898, |
| "text": "Sarker et al. (2016a)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we present a supervised learning approach, using Support Vector Machines (SVMs) for the task of automatic sentiment classification of Twitter posts. Our system participated in the SemEval-2016 task Sentiment Analysis in Twitter, and is an extension of our system for SemEval2015 (Sarker et al., 2015b) . The goal of the task was to automatically classify the polarity of a Twitter post into one of three predefined categories-positive, negative and neutral. In our approach, we apply a small set of carefully extracted lexical, semantic, and distributional features. The features are used to train a SVM learner, and the issue of data imbalance is addressed by using distinct weights for each of the three classes. The results of our system are promising, with positive class F-scores of 0.694 and 0.650, and negative class F-scores of 0.391 and 0.493 over the training and test sets, respectively.", |
| "cite_spans": [ |
| { |
| "start": 294, |
| "end": 316, |
| "text": "(Sarker et al., 2015b)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Following the pioneering work on sentiment analysis by Pang et. al. (2002) , similar research has been carried out under various umbrella terms such as: se-mantic orientation (Turney, 2002) , opinion mining (Pang and Lee, 2008) , polarity classification (Sarker et al., 2013) , and many more. Pang et al. (2002) utilized machine learning models to predict sentiments in text, and their approach showed that SVM classifiers trained using bag-of-words features produced promising results. Similar approaches have been applied to texts of various granularities-documents, sentences, and phrases.", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 74, |
| "text": "Pang et. al. (2002)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 175, |
| "end": 189, |
| "text": "(Turney, 2002)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 207, |
| "end": 227, |
| "text": "(Pang and Lee, 2008)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 254, |
| "end": 275, |
| "text": "(Sarker et al., 2013)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 293, |
| "end": 311, |
| "text": "Pang et al. (2002)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Due to the availability of vast amounts of data, there has been growing interest in utilizing social media mining for obtaining information directly from users (Liu and Zhang, 2012) . However, social media sources, such as Twitter posts, present various natural language processing (NLP) and machine learning challenges. The NLP challenges arise from factors, such as, the use of informal language, frequent misspellings, creative phrases and words, abbreviations, short text lengths and others. From the perspective of machine learning, some of the key challenges include data imbalance, noise, and feature sparseness. In recent research, these challenges have received significant attention (Jansen et al., 2009; Barbosa and Feng, 2010; Davidov et al., 2010; Kouloumpis et al., 2011; Sarker et al., 2016b) .", |
| "cite_spans": [ |
| { |
| "start": 160, |
| "end": 181, |
| "text": "(Liu and Zhang, 2012)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 693, |
| "end": 714, |
| "text": "(Jansen et al., 2009;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 715, |
| "end": 738, |
| "text": "Barbosa and Feng, 2010;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 739, |
| "end": 760, |
| "text": "Davidov et al., 2010;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 761, |
| "end": 785, |
| "text": "Kouloumpis et al., 2011;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 786, |
| "end": 807, |
| "text": "Sarker et al., 2016b)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our training and test data consists of the data made available for SemEval 2016 task 4, and additional eligible training data from past Semeval sentiment analysis tasks. Each instance of the data set made available consisted of a tweet ID, a user ID, and a sentiment category for the tweet. For training, we downloaded all the annotated tweets that were publicly available at the time of development of the system. We obtained all the training and devtest set tweets, and also the training sets from past SemEval tasks. In total, we used over 19,000 unique tweets for training. The data is heavily imbalanced with particularly small number of negative instances.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We derive a set of lexical, semantic, and distributional features from the training data. Brief descriptions are provided below. Some of these features were used in our 2015 submission to the SemEval sentiment analysis task (Sarker et al., 2015b) . In short: we have removed uninformative features such as syntactic parses of tweets, and have added features learned using distributional semantics-oriented techniques.", |
| "cite_spans": [ |
| { |
| "start": 224, |
| "end": 246, |
| "text": "(Sarker et al., 2015b)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We perform standard preprocessing such as tokenization, lowercasing and stemming of all the terms using the Porter stemmer 2 (Porter, 1980) . Our preliminary investigations suggested that stop words can play a positive effect on classifier performances by their presence in word 2-grams and 3-grams; so, we do not remove stop words from the texts.", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 139, |
| "text": "(Porter, 1980)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "Our first feature set consists of word n-grams. A word n-gram is a sequence of contiguous n words in a text segment, and this feature enables us to represent a document using the union of its terms. We use 1-, 2-, and 3-grams as features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "N-grams", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "It has been shown in past research that certain terms, because of their prior polarities, play important roles in determining the polarities of sentences (Sarker et al., 2013) . Certain adjectives, and sometimes nouns and verbs, or their synonyms, are almost invariably associated with positive or non-positive polarities. For each adjective, noun or verb in a tweet, we use WordNet 3 to identify the synonyms of that term and add the synonymous terms as features.", |
| "cite_spans": [ |
| { |
| "start": 154, |
| "end": 175, |
| "text": "(Sarker et al., 2013)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synset", |
| "sec_num": "3.2.3" |
| }, |
| { |
| "text": "We assign three sets of scores to sentences based on three different measures of sentiment. For the first set of scores, we used the positive and negative terms list from Hu and Bing (2004) . For each tweet, the numbers of positive and negative terms are counted and divided by the total number of tokens in the tweet to generate two scores.", |
| "cite_spans": [ |
| { |
| "start": 171, |
| "end": 189, |
| "text": "Hu and Bing (2004)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Scores", |
| "sec_num": "3.2.4" |
| }, |
| { |
| "text": "For the second sentiment feature, we incorporate a score that attempts to represent the general sentiment of a tweet using the prior polarities of its terms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Scores", |
| "sec_num": "3.2.4" |
| }, |
| { |
| "text": "Each word-POS pair in a comment is assigned a score and the overall score assigned to the comment is equal to the sum of all the individual term-POS sentiment scores divided by the length of the sentence in words. For term-POS pairs with multiple senses, the score for the most common sense is chosen. To obtain a score for each term, we use the lexicon proposed by Guerini et al. (2013) . The lexicon contains approximately 155,000 English words associated with a sentiment score between -1 and 1. The overall score a sentence receives is therefore a floating point number with the range [-1:1].", |
| "cite_spans": [ |
| { |
| "start": 366, |
| "end": 387, |
| "text": "Guerini et al. (2013)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Scores", |
| "sec_num": "3.2.4" |
| }, |
| { |
| "text": "For the last set of scores in this set, we used the Multi-Perspective Question Answering (MPQA) subjectivity lexicon (Wiebe et al., 2005) .", |
| "cite_spans": [ |
| { |
| "start": 117, |
| "end": 137, |
| "text": "(Wiebe et al., 2005)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Scores", |
| "sec_num": "3.2.4" |
| }, |
| { |
| "text": "In the lexicon, tokens are assigned a polarity (positive/negative), and a strength for the subjectivity (weak/strong). We assign a score of -1 to a token for having negative subjectivity, and +1 for having positive subjectivity. Tokens having weak subjectivity are multiplied with 0.5, and the total subjectivity score of the tweet is divided by the number of tokens to generate the final score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Scores", |
| "sec_num": "3.2.4" |
| }, |
| { |
| "text": "Our past research shows that incorporating word cluster features improve classification accuracy (Nikfarjam et al., 2014) . These clusters are generated from vector representations of words, which are learned from large, unlabeled data sets. For our word clusters, the vector representations were learned from over 56 million tweets, using a Hidden Markov Model-based algorithm that partitions words into a base set of 1000 clusters, and induces a hierarchy among those 1000 clusters (Owoputi et al., 2012) . To generate features from these clusters, for each tweet, we identify the cluster number of each token, and use all the cluster numbers in a bag-of-words manner. Thus, every tweet is represented with a set of cluster numbers, with semantically similar tokens having the same cluster number. More information about generating the embeddings can be found in the related papers (Bengio et al., 2003; Turian et al., 2010; Mikolov et al., 2013) .", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 121, |
| "text": "(Nikfarjam et al., 2014)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 484, |
| "end": 506, |
| "text": "(Owoputi et al., 2012)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 884, |
| "end": 905, |
| "text": "(Bengio et al., 2003;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 906, |
| "end": 926, |
| "text": "Turian et al., 2010;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 927, |
| "end": 948, |
| "text": "Mikolov et al., 2013)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Cluster Features", |
| "sec_num": "3.2.5" |
| }, |
| { |
| "text": "We collected a large set of automatically 'annotated' sentiment corpus (Go et al., 2009) . Using the negative and positive polarity tweets separately, we generated two distributional semantics models using the Word2Vec tool. 4 We then applied K-means clustering to the two distributional models to generate 100 clusters each. Finally, we compute the centroid vectors for each of the clusters in the two sets.", |
| "cite_spans": [ |
| { |
| "start": 71, |
| "end": 88, |
| "text": "(Go et al., 2009)", |
| "ref_id": null |
| }, |
| { |
| "start": 225, |
| "end": 226, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Centroid Features", |
| "sec_num": "3.2.6" |
| }, |
| { |
| "text": "Two feature vectors are generated from each tweet based on these centroid vectors. For each tweet, the centroid of the tweet is computed by averaging the individual word vectors in the tweet. The cosine similarities of the tweet centroid are then computed with each of the two sets of 100 centroid vectors. The vectors of similarities are then used as features. Our intuition is that these vectors will indicate similarities of tweets with posts of negative or positive sentiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Centroid Features", |
| "sec_num": "3.2.6" |
| }, |
| { |
| "text": "We use a set of features which represent simple structural properties of the tweets. These include: length, number of sentences, and average sentence length.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structural Features", |
| "sec_num": "3.2.7" |
| }, |
| { |
| "text": "Using the abovementioned features, we trained SVM classifiers for the classification task. The performance of SVMs can vary significantly based on the kernel and specific parameter values. For our work, based on past research on this type of data, we used the RBF kernel. We computed optimal values for the cost and \u03b3 parameters via grid-search and 10-fold cross validation over the training set. To address the problem of data imbalance, we utilized the weighted SVM feature of the LibSVM library (Chang and Lin, 2011) , and we attempted to find optimal values for the weights in the same way using 10-fold cross validation over the training set. We found that cost = 64.0, \u03b3 = 0.0, \u03c9 1 = 1.2, and \u03c9 2 = 2.6 to produce the best results, where \u03c9 1 and \u03c9 2 are the weights for the positive and negative classes, respectively. Table 1 presents the performance of our system on the training and test data sets. The table presents the positive and negative class F-scores for the system, and the average of the two scores-the metric that is used for ranking systems in the SemEval evaluations for this task. The training set results are obtained via training on the training set and evaluating on the devtest set. The test results are the final SemEval results. ", |
| "cite_spans": [ |
| { |
| "start": 498, |
| "end": 519, |
| "text": "(Chang and Lin, 2011)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 825, |
| "end": 832, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "To assess the contribution of each feature towards the final score, we performed leave-one-out feature and single feature experiments. Tables 2 and 3 show the P +N 2 values for the training and the test sets for the two set of experiments. The first row of the tables present the results when all the features are used, and the following rows show the results when a specific feature is removed or when a single feature is used. The tables suggest that almost all the features play important roles in classification. As shown in Table 3 , n-grams, word clusters, and centroids give the highest classification scores when employed individually. Table 2 illustrates similar information, by showing which features cause the largest drops in performance when removed. For all the other feature sets, the drops in the evaluation scores shown in Table 3 are very low, meaning that their contribution to the final evaluation score is quite limited. The experiments suggest that the classifier settings (i.e., the parameter values and the class weights) play a more important role in our final approach, as greater deviations from the scores presented can be achieved by fine tuning the parameter values than by adding, removing, or modifying the feature sets. Further experimentation is required to identify useful features and to configure existing features to be more effective. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 135, |
| "end": 168, |
| "text": "Tables 2 and 3 show the P +N 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 532, |
| "end": 539, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 647, |
| "end": 654, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 843, |
| "end": 850, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Feature Analysis", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Our system achieved moderate performance on the SemEval sentiment analysis task utilizing very basic settings. The F-scores were particularly low for the negative class, which can be attributed to the class imbalance. Considering that the performance of our system was achieved by very basic settings, there is promise of better performance via the utilization of feature generation and engineering techniques.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We have several planned future tasks to improve the classification performance on this data set, and for social media based sentiment analysis in general. Following on from our past work on social media data Sarker et al., 2016b) , our primary goal to improve performance in the future is to employ preprocessing techniques that can normalize the texts and better prepare them for the feature generation stage. We will also attempt to optimize our distributional semantics models further.", |
| "cite_spans": [ |
| { |
| "start": 208, |
| "end": 229, |
| "text": "Sarker et al., 2016b)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "http://www.statisticbrain.com/ twitter-statistics/ Accessed on: 23rd December, 2015.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We use the implementation provided by the NLTK toolkit http://www.nltk.org/.3 http://wordnet.princeton.edu/. Accessed on December 13, 2015.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://code.google.com/archive/p/ word2vec/. Accessed Feb-22-2016.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by NIH National Library of Medicine under grant number NIH NLM 1R01LM011176. The content is solely the responsibility of the authors and does not necessarily represent the official views of the NLM or NIH.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Robust Sentiment Detection on Twitter from Biased and Noisy Data", |
| "authors": [ |
| { |
| "first": "Luciano", |
| "middle": [], |
| "last": "Barbosa", |
| "suffix": "" |
| }, |
| { |
| "first": "Junlan", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "36--44", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luciano Barbosa and Junlan Feng. 2010. Robust Sen- timent Detection on Twitter from Biased and Noisy Data. In Proceedings of COLING, pages 36-44.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A Neural Probabilistic Language Model", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00e9jean", |
| "middle": [], |
| "last": "Ducharme", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Jauvin", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "3", |
| "issue": "", |
| "pages": "1137--1155", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, R\u00e9jean Ducharme, Pascal Vincent, and Christian Jauvin. 2003. A Neural Probabilistic Lan- guage Model. Journal of Machine Learning Research, 3:1137-1155.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "LIBSVM: A library for support vector machines", |
| "authors": [ |
| { |
| "first": "Chih-Chung", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chih-Jen", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "ACM Transactions on Intelligent Systems and Technology", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chih-Chung Chang and Chih-Jen Lin. 2011. LIBSVM: A library for support vector machines. ACM Transac- tions on Intelligent Systems and Technology, 2:27:1- 27:27. Software available at http://www.csie. ntu.edu.tw/\u02dccjlin/libsvm.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Enhanced Sentiment Learning Using Twitter Hashtags and Smileys", |
| "authors": [ |
| { |
| "first": "Dmitry", |
| "middle": [], |
| "last": "Davidov", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Tsur", |
| "suffix": "" |
| }, |
| { |
| "first": "Ari", |
| "middle": [], |
| "last": "Rappoport", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "241--249", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dmitry Davidov, Oren Tsur, and Ari Rappoport. 2010. Enhanced Sentiment Learning Using Twitter Hashtags and Smileys. In Proceedings of COLING, pages 241- 249.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Twitter Sentiment Classification using Distant Supervision", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Twitter Sentiment Classification using Dis- tant Supervision. https://www-cs.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Sentiment Analysis: How to Derive Prior Polarities from SentiWordNet", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Guerini", |
| "suffix": "" |
| }, |
| { |
| "first": "Lorenzo", |
| "middle": [], |
| "last": "Gatti", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Turchi", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1259--1269", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Guerini, Lorenzo Gatti, and Marco Turchi. 2013. Sentiment Analysis: How to Derive Prior Polarities from SentiWordNet. In Proceedings of Empirical Methods in Natural Language Processing (EMNLP), pages 1259-1269.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "mining and summarizing customer reviews", |
| "authors": [ |
| { |
| "first": "Minqing", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minqing Hu and Bing. 2004. \"mining and sum- marizing customer reviews\". In Proceedings of the ACM SIGKDD International Conference on Knowl- edge Discovery and Data Mining.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Twitter Power: Tweets as Electronic Word of Mouth", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Bernard", |
| "suffix": "" |
| }, |
| { |
| "first": "Mimi", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Kate", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdur", |
| "middle": [], |
| "last": "Sobel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chowdury", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Journal of the American Society for Information Science and Technology", |
| "volume": "60", |
| "issue": "11", |
| "pages": "2169--2188", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bernard J Jansen, Mimi Zhang, Kate Sobel, and Ab- dur Chowdury. 2009. Twitter Power: Tweets as Electronic Word of Mouth. Journal of the Ameri- can Society for Information Science and Technology, 60(11):2169-2188.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Twitter Sentiment Analysis: The Good the Bad and the OMG!", |
| "authors": [ |
| { |
| "first": "Efthymios", |
| "middle": [], |
| "last": "Kouloumpis", |
| "suffix": "" |
| }, |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Johanna", |
| "middle": [], |
| "last": "Moore", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Fifth International AAAI Conference on Weblogs and Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Efthymios Kouloumpis, Theresa Wilson, and Johanna Moore. 2011. Twitter Sentiment Analysis: The Good the Bad and the OMG! In Proceedings of the Fifth In- ternational AAAI Conference on Weblogs and Social Media.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A survey of opinion mining and sentiment analysis", |
| "authors": [ |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Mining Text Data", |
| "volume": "", |
| "issue": "", |
| "pages": "415--463", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bing Liu and Lei Zhang. 2012. A survey of opinion mining and sentiment analysis. In Charu C. Aggar- wal and ChengXiang Zhai, editors, Mining Text Data, pages 415-463.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Efficient Estimation of Word Representations in Vector Space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1301.3781v3" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient Estimation of Word Represen- tations in Vector Space. arXiv:1301.3781v3.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Pharmacovigilance from social media: mining adverse drug reaction mentions using sequence labeling with word embedding cluster features", |
| "authors": [ |
| { |
| "first": "Azadeh", |
| "middle": [], |
| "last": "Nikfarjam", |
| "suffix": "" |
| }, |
| { |
| "first": "Abeed", |
| "middle": [], |
| "last": "Sarker", |
| "suffix": "" |
| }, |
| { |
| "first": "O'", |
| "middle": [], |
| "last": "Karen", |
| "suffix": "" |
| }, |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Connor", |
| "suffix": "" |
| }, |
| { |
| "first": "Graciela", |
| "middle": [], |
| "last": "Ginn", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gonzalez", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Journal of the American Medical Informatics Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Azadeh Nikfarjam, Abeed Sarker, Karen O'Connor, Rachel Ginn, and Graciela Gonzalez. 2014. Pharma- covigilance from social media: mining adverse drug reaction mentions using sequence labeling with word embedding cluster features. Journal of the American Medical Informatics Association (JAMIA).", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Part-of-Speech Tagging for Twitter: Word Clusters and Other Advances", |
| "authors": [ |
| { |
| "first": "Olutobi", |
| "middle": [], |
| "last": "Owoputi", |
| "suffix": "" |
| }, |
| { |
| "first": "O'", |
| "middle": [], |
| "last": "Brendan", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Connor", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dyer Kevin", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Schneider", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "School of Computer Science", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Olutobi Owoputi, Brendan O'Connor, Chris Dyer Kevin Gimpel, and Nathan Schneider. 2012. Part-of-Speech Tagging for Twitter: Word Clusters and Other Ad- vances. Technical report, School of Computer Sci- ence, Carnegie Mellon University.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Opinion Mining and Sentiment Analysis. Foundations and Trends in Information Retrieval", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "1--135", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Pang and Lillian Lee. 2008. Opinion Mining and Sentiment Analysis. Foundations and Trends in Infor- mation Retrieval, 2(1):1-135.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Thumbs up? Sentiment Classification using Machine Learning Techniques", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Shivakumar", |
| "middle": [], |
| "last": "Vaithyanathan", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the ACL conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Pang, Lillian Lee, and Shivakumar Vaithyanathan. 2002. Thumbs up? Sentiment Classification using Machine Learning Techniques. In Proceedings of the ACL conference on Empirical Methods in Natural Language Processing (EMNLP), pages 79-86.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "An algorithm for suffix stripping", |
| "authors": [ |
| { |
| "first": "Martin", |
| "middle": [ |
| "F" |
| ], |
| "last": "Porter", |
| "suffix": "" |
| } |
| ], |
| "year": 1980, |
| "venue": "", |
| "volume": "14", |
| "issue": "", |
| "pages": "130--137", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin F. Porter. 1980. An algorithm for suffix stripping. Program, 14(3):130-137.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Portable Automatic Text Classification for Adverse Drug Reaction Detection via Multi-corpus Training", |
| "authors": [ |
| { |
| "first": "Abeed", |
| "middle": [], |
| "last": "Sarker", |
| "suffix": "" |
| }, |
| { |
| "first": "Graciela", |
| "middle": [], |
| "last": "Gonzalez", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Journal of Biomedical Informatics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abeed Sarker and Graciela Gonzalez. 2014. Portable Automatic Text Classification for Adverse Drug Reac- tion Detection via Multi-corpus Training. Journal of Biomedical Informatics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Automatic Prediction of Evidence-based Recommendations via Sentence-level Polarity Classification", |
| "authors": [ |
| { |
| "first": "Abeed", |
| "middle": [], |
| "last": "Sarker", |
| "suffix": "" |
| }, |
| { |
| "first": "Diego", |
| "middle": [], |
| "last": "Molla", |
| "suffix": "" |
| }, |
| { |
| "first": "Cecile", |
| "middle": [], |
| "last": "Paris", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the International Joint Conference on Natural Language Processing (IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "712--718", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abeed Sarker, Diego Molla, and Cecile Paris. 2013. Automatic Prediction of Evidence-based Recommen- dations via Sentence-level Polarity Classification. In Proceedings of the International Joint Conference on Natural Language Processing (IJCNLP), pages 712- 718.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Utilizing social media data for pharmacovigilance: A review", |
| "authors": [ |
| { |
| "first": "Abeed", |
| "middle": [], |
| "last": "Sarker", |
| "suffix": "" |
| }, |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Ginn", |
| "suffix": "" |
| }, |
| { |
| "first": "Azadeh", |
| "middle": [], |
| "last": "Nikfarjam", |
| "suffix": "" |
| }, |
| { |
| "first": "O'", |
| "middle": [], |
| "last": "Karen", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Connor", |
| "suffix": "" |
| }, |
| { |
| "first": "Swetha", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Tejaswi", |
| "middle": [], |
| "last": "Jayaraman", |
| "suffix": "" |
| }, |
| { |
| "first": "Graciela", |
| "middle": [], |
| "last": "Upadhaya", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gonzalez", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Journal of Biomedical Informatics", |
| "volume": "54", |
| "issue": "", |
| "pages": "202--212", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abeed Sarker, Rachel Ginn, Azadeh Nikfarjam, Karen O'Connor, Karen Smith, Swetha Jayaraman, Tejaswi Upadhaya, and Graciela Gonzalez. 2015a. Utilizing social media data for pharmacovigilance: A review. Journal of Biomedical Informatics, 54:202-212.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Diegolab: An approach for message-level sentiment classification in twitter", |
| "authors": [ |
| { |
| "first": "Abeed", |
| "middle": [], |
| "last": "Sarker", |
| "suffix": "" |
| }, |
| { |
| "first": "Azadeh", |
| "middle": [], |
| "last": "Nikfarjam", |
| "suffix": "" |
| }, |
| { |
| "first": "Davy", |
| "middle": [], |
| "last": "Weissenbacher", |
| "suffix": "" |
| }, |
| { |
| "first": "Graciela", |
| "middle": [], |
| "last": "Gonzalez", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 9th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "510--514", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abeed Sarker, Azadeh Nikfarjam, Davy Weissenbacher, and Graciela Gonzalez. 2015b. Diegolab: An ap- proach for message-level sentiment classification in twitter. In Proceedings of the 9th International Work- shop on Semantic Evaluation (SemEval 2015), pages 510-514, Denver, Colorado, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Social Media Mining Shared Task Workshop", |
| "authors": [ |
| { |
| "first": "Abeed", |
| "middle": [], |
| "last": "Sarker", |
| "suffix": "" |
| }, |
| { |
| "first": "Azadeh", |
| "middle": [], |
| "last": "Nikfarjam", |
| "suffix": "" |
| }, |
| { |
| "first": "Graciela", |
| "middle": [], |
| "last": "Gonzalez", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Pacific Symposium on Biocomputing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abeed Sarker, Azadeh Nikfarjam, and Graciela Gonza- lez. 2016a. Social Media Mining Shared Task Work- shop. In Proceedings of the Pacific Symposium on Bio- computing.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Social media mining for toxicovigilance: Automatic monitoring of prescription medication abuse from twitter", |
| "authors": [ |
| { |
| "first": "Abeed", |
| "middle": [], |
| "last": "Sarker", |
| "suffix": "" |
| }, |
| { |
| "first": "O'", |
| "middle": [], |
| "last": "Karen", |
| "suffix": "" |
| }, |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Connor", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Ginn", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Scotch", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Graciela", |
| "middle": [], |
| "last": "Malone", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gonzalez", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Drug Safety", |
| "volume": "39", |
| "issue": "3", |
| "pages": "231--240", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abeed Sarker, Karen O'Connor, Rachel Ginn, Matthew Scotch, Karen Smith, Dan Malone, and Graciela Gon- zalez. 2016b. Social media mining for toxicovigi- lance: Automatic monitoring of prescription medica- tion abuse from twitter. Drug Safety, 39(3):231-240.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Word representations: A simple and general method for semi-supervised learning", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Turian", |
| "suffix": "" |
| }, |
| { |
| "first": "Lev", |
| "middle": [], |
| "last": "Ratinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "384--394", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph Turian, Lev Ratinov, and Yoshua Bengio. 2010. Word representations: A simple and general method for semi-supervised learning. In Proceedings of the 48th Annual Meeting of the Association for Computa- tional Linguistics, pages 384-394.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Thumbs up or thumbs down? Semantic orientation applied to unsupervised classification of reviews", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Turney", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of ACL-02, 40th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "417--424", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Turney. 2002. Thumbs up or thumbs down? Se- mantic orientation applied to unsupervised classifica- tion of reviews. In Proceedings of ACL-02, 40th An- nual Meeting of the Association for Computational Linguistics, pages 417-424.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Annotating expressions opinion and emotion in language. Language Resources and Evaluation", |
| "authors": [ |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "39", |
| "issue": "", |
| "pages": "165--210", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Janyce Wiebe, Theresa Wilson, and Claire Cardie. 2005. Annotating expressions opinion and emotion in lan- guage. Language Resources and Evaluation, 39:165- 210.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "text": "Classification results for the DIEGOLab16 system over the training and test sets.", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>: Leave-one-out P +N 2 ing and test sets.</td><td>feature scores for the train-</td></tr><tr><td>Feature</td><td>P + N 2</td></tr><tr><td>All N-grams Synsets Sentiment Scores Word Clusters Centroids Other</td><td>0.542 0.515 0.494 0.472 0.531 0.535 0.254</td></tr></table>", |
| "text": "", |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "text": "Single feature P +N 2 scores for the training and test sets.", |
| "type_str": "table" |
| } |
| } |
| } |
| } |