| { |
| "paper_id": "E17-1046", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T10:50:47.872075Z" |
| }, |
| "title": "TDParse: Multi-target-specific sentiment recognition on Twitter", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Warwick Coventry", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "bo.wang@warwick.ac.uk" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Liakata", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Warwick Coventry", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "m.liakata@warwick.ac.uk" |
| }, |
| { |
| "first": "Arkaitz", |
| "middle": [], |
| "last": "Zubiaga", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Warwick Coventry", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "a.zubiaga@warwick.ac.uk" |
| }, |
| { |
| "first": "Rob", |
| "middle": [], |
| "last": "Procter", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Warwick Coventry", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Existing target-specific sentiment recognition methods consider only a single target per tweet, and have been shown to miss nearly half of the actual targets mentioned. We present a corpus of UK election tweets, with an average of 3.09 entities per tweet and more than one type of sentiment in half of the tweets. This requires a method for multi-target specific sentiment recognition, which we develop by using the context around a target as well as syntactic dependencies involving the target. We present results of our method on both a benchmark corpus of single targets and the multi-target election corpus, showing state-of-the art performance in both corpora and outperforming previous approaches to multi-target sentiment task as well as deep learning models for singletarget sentiment.", |
| "pdf_parse": { |
| "paper_id": "E17-1046", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Existing target-specific sentiment recognition methods consider only a single target per tweet, and have been shown to miss nearly half of the actual targets mentioned. We present a corpus of UK election tweets, with an average of 3.09 entities per tweet and more than one type of sentiment in half of the tweets. This requires a method for multi-target specific sentiment recognition, which we develop by using the context around a target as well as syntactic dependencies involving the target. We present results of our method on both a benchmark corpus of single targets and the multi-target election corpus, showing state-of-the art performance in both corpora and outperforming previous approaches to multi-target sentiment task as well as deep learning models for singletarget sentiment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Recent years have seen increasing interest in mining Twitter to assess public opinion on political affairs and controversial issues (Tumasjan et al., May 2010; Wang et al., 2012) as well as products and brands (Pak and Paroubek, 2010) . Opinion mining from Twitter is usually achieved by determining the overall sentiment expressed in an entire tweet. However, inferring the sentiment towards specific targets (e.g. people or organisations) is severely limited by such an approach since a tweet may contain different types of sentiment expressed towards each of the targets mentioned. An early study by Jiang et al. (2011) showed that 40% of classification errors are caused by using tweetlevel approaches that are independent of the target. Consider the tweet: \"I will b voting 4 Greens ... 1st reason: 2 remove 2 party alt. of labour or conservative every 5 years. 2nd: fracking\"", |
| "cite_spans": [ |
| { |
| "start": 132, |
| "end": 159, |
| "text": "(Tumasjan et al., May 2010;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 160, |
| "end": 178, |
| "text": "Wang et al., 2012)", |
| "ref_id": null |
| }, |
| { |
| "start": 210, |
| "end": 234, |
| "text": "(Pak and Paroubek, 2010)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 603, |
| "end": 622, |
| "text": "Jiang et al. (2011)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The overall sentiment is positive but there is a negative sentiment towards \"labour\", \"conservative\" and \"fracking\" and a positive sentiment towards \"Greens\". Examples like this are common in tweets discussing topics like politics. As has been demonstrated by the failure of election polls in both referenda and general elections (Burnap et al., 2016) , it is important to understand not only the overall mood of the electorate, but also to distinguish and identify sentiment towards different key issues and entities, many of which are discussed on social media on the run up to elections.", |
| "cite_spans": [ |
| { |
| "start": 330, |
| "end": 351, |
| "text": "(Burnap et al., 2016)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recent developments on target-specific Twitter sentiment classification have explored different ways of modelling the association between target entities and their contexts. Jiang et al. (2011) propose a rule-based approach that utilises dependency parsing and contextual tweets. Dong et al. (2014) , Tang et al. (2016a) and Zhang et al. (2016) have studied the use of different recurrent neural network models for such a task but the gain in performance from the complex neural architectures is rather unclear 1 In this work we introduce the multi-targetspecific sentiment recognition task, building a corpus of tweets from the 2015 UK general election campaign suited to the task. In this dataset, target entities have been semi-automatically selected, and sentiment expressed towards multiple target entities as well as high-level topics in a tweet have been manually annotated. Unlike all existing studies on target-specific Twitter sentiment analysis, we move away from the assumption that each tweet mentions a single target; we introduce a more realistic and challenging task of identifying sentiment towards multiple targets within a tweet. To tackle this task, we propose TDParse, a method that divides a tweet into different segments building on the approach introduced by Vo and Zhang (2015) . TDParse exploits a syntactic dependency parser designed explicitly for tweets (Kong et al., 2014) , and combines syntactic information for each target with its left-right context.", |
| "cite_spans": [ |
| { |
| "start": 174, |
| "end": 193, |
| "text": "Jiang et al. (2011)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 280, |
| "end": 298, |
| "text": "Dong et al. (2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 301, |
| "end": 320, |
| "text": "Tang et al. (2016a)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 325, |
| "end": 344, |
| "text": "Zhang et al. (2016)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 511, |
| "end": 512, |
| "text": "1", |
| "ref_id": null |
| }, |
| { |
| "start": 1283, |
| "end": 1302, |
| "text": "Vo and Zhang (2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1383, |
| "end": 1402, |
| "text": "(Kong et al., 2014)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We evaluate and compare our proposed system both on our new multi-target UK election dataset, as well as on the benchmarking dataset for single-target dependent sentiment (Dong et al., 2014) . We show a clear state-of-the-art performance of TDParse over existing approaches for tweets with multiple targets, which encourages further research on the multi-target-specific sentiment recognition task. 2", |
| "cite_spans": [ |
| { |
| "start": 171, |
| "end": 190, |
| "text": "(Dong et al., 2014)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The 2015 Semeval challenge introduced a task on target-specific Twitter sentiment (Rosenthal et al., 2015) which most systems (Boag et al., 2015; Plotnikova et al., 2015) treated in the same way as tweet level sentiment. The best performing system in the 2016 Semeval Twitter challenge substask B (Nakov et al., 2016) , named Tweester, also performs on tweet level sentiment classification. This is unsurprising since tweets in both tasks only contain a single predefined target entity and as a result often a tweet-level approach is sufficient. An exception to tweet level approaches for this task, showing promise, is Townsend et al. (2015) , who trained a SVM classifier for tweet segmentation, then used a phrase-based sentiment classifier for assigning sentiment around the target. The Semeval aspect-based sentiment analysis task (Pontiki et al., 2015; Pateria and Choubey, 2016) aims to identify sentiment towards entityattribute pairs in customer reviews. This differs from our goal in the following way: both the entities and attributes are limited to a predefined inventory of limited size; they are aspect categories reflected in the reviews rather than specific targets, while each review only has one target entity, e.g. a laptop or a restaurant. Also sentiment classification in formal text such as product reviews 2 The data and code can be found at https://goo.gl/ S2T1GO is very different from that in tweets. Recently Vargas et al. (2016) analysed the differences between the overall and target-dependent sentiment of tweets for three events containing 30 targets, showing many significant differences between the corresponding overall and target-dependent sentiment labels, thus confirming that these are distinct tasks.", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 106, |
| "text": "(Rosenthal et al., 2015)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 126, |
| "end": 145, |
| "text": "(Boag et al., 2015;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 146, |
| "end": 170, |
| "text": "Plotnikova et al., 2015)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 297, |
| "end": 317, |
| "text": "(Nakov et al., 2016)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 620, |
| "end": 642, |
| "text": "Townsend et al. (2015)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 836, |
| "end": 858, |
| "text": "(Pontiki et al., 2015;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 859, |
| "end": 885, |
| "text": "Pateria and Choubey, 2016)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1329, |
| "end": 1330, |
| "text": "2", |
| "ref_id": null |
| }, |
| { |
| "start": 1436, |
| "end": 1456, |
| "text": "Vargas et al. (2016)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work: Target-dependent Sentiment Classification on Twitter", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Early work tackling target-dependent sentiment in tweets (Jiang et al., 2011) designed targetdependent features manually, relying on the syntactic parse tree and a set of grammar-based rules, and incorporating the sentiment labels of related tweets to improve the classification performance. Recent work (Dong et al., 2014) used recursive neural networks and adaptively chose composition functions to combine child feature vectors according to their dependency type, to reflect sentiment signal propagation to the target. Their datadriven composition selection approach replies on the dependency types as features and a small set of rules for constructing target-dependent trees. Their manually annotated dataset contains only one target per tweet and has since been used for benchmarking by several subsequent studies (Vo and Zhang, 2015; Tang et al., 2016a; Zhang et al., 2016) . Vo and Zhang (2015) exploit the left and right context around a target in a tweet and combine low-dimensional embedding features from both contexts and the full tweet using a number of different pooling functions. Despite not fully capturing semantic and syntactic information given the target entity, they show a much better performance than Dong et al. (2014) , indicating useful signals in relation to the target can be drawn from such context representation. Both Tang et al. (2016a) and Zhang et al. (2016) adopt and integrate left-right target-dependent context into their recurrent neural network (RNN) respectively. While Tang et al (2016a) propose two long shortterm memory (LSTM) models showing competitive performance to Vo and Zhang (2015) , Zhang et al (2016) design a gated neural network layer between the left and right context in a deep neural network structure but require a combination of three corpora for training and evaluation. Results show that conventional neural network models like LSTM are incapable of explicitly capturing important context information of a target (Tang et al., 2016b) . Tang et al. (2016a) also experiment with adding attention layers for LSTM but fail to achieve competitive results possibly due to the small training corpus.", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 77, |
| "text": "(Jiang et al., 2011)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 304, |
| "end": 323, |
| "text": "(Dong et al., 2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 819, |
| "end": 839, |
| "text": "(Vo and Zhang, 2015;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 840, |
| "end": 859, |
| "text": "Tang et al., 2016a;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 860, |
| "end": 879, |
| "text": "Zhang et al., 2016)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 882, |
| "end": 901, |
| "text": "Vo and Zhang (2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1225, |
| "end": 1243, |
| "text": "Dong et al. (2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1350, |
| "end": 1369, |
| "text": "Tang et al. (2016a)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1374, |
| "end": 1393, |
| "text": "Zhang et al. (2016)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1512, |
| "end": 1530, |
| "text": "Tang et al (2016a)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1614, |
| "end": 1633, |
| "text": "Vo and Zhang (2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1636, |
| "end": 1654, |
| "text": "Zhang et al (2016)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1976, |
| "end": 1996, |
| "text": "(Tang et al., 2016b)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1999, |
| "end": 2018, |
| "text": "Tang et al. (2016a)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work: Target-dependent Sentiment Classification on Twitter", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Going beyond the existing work we study the more challenging task of classifying sentiment towards multiple target entities within a tweet. Using the syntactic information drawn from tweetspecific parsing, in conjunction with the left-right contexts, we show the state-of-the-art performance in both single and multi-target classification tasks. We also show that the tweet level approach that many sentiment systems adopted in both Semeval challenges, fail to capture all target-sentiments in a multi-target scenario (Section 5.1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work: Target-dependent Sentiment Classification on Twitter", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We describe the design, collection and annotation of a corpus of tweets about the 2015 UK election.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Creating a Corpus for Target Specific Sentiment in Twitter", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We collected a corpus of tweets about the UK elections, as we wanted to select a political event that would trigger discussions on multiple entities and topics. Collection was performed through Twitter's streaming API and tracking 14 hashtags 3 . Data harvesting was performed between 7th February and 30th March 2015. This led to the collection of 712k tweets, from which a subset was sampled for manual annotation of targetspecific sentiment. We also created a list of 438 topic keywords relevant to 9 popular election issues 4 for data sampling. The initial list of 438 seed words provided by a team of journalists was augmented by searching for similar words within a vector space on the basis of cosine similarity. Keywords are used both in order to identify thematically relevant tweets and also targets. We also consider named entities as targets. Sampling of tweets was performed by removing retweets and making sure each tweet contained at least one topic keyword from one of the 9 election issues, leading to 52,190 highly relevant tweets. For the latter we ranked tweets based on a \"similarity\" relation, where \"similarity\" is measured as a function of content overlap (Mihalcea, 2004) . Formally, given a tweet S i being represented by the set of N words that appear in the tweet:", |
| "cite_spans": [ |
| { |
| "start": 1180, |
| "end": 1196, |
| "text": "(Mihalcea, 2004)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Harvesting and Entity Recognition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "S i = W 1 i , W 2 i , ..", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Harvesting and Entity Recognition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "., W N i and our list of curated topic keywords T , the ranking function is defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Harvesting and Entity Recognition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "log(|S i |) * |W i \u2208 S i \u2229 W i \u2208 T | (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Harvesting and Entity Recognition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where |S i | is the total number of words in the tweet; unlike Mihalcea (2004) we prefer longer tweets. We used exact matching with flexibility on the special characters at either end. TF-IDF normalisation and cosine similarity were then applied to the dataset to remove very similar tweets (empirically we set the cosine similarity threshold to 0.6). We also collected all external URLs mentioned in our dataset and their web content throughout the data harvesting period, filtering out tweets that only contain an external link or snippets of a web page. Finally we sampled 4,500 top-ranked tweets keeping the representation of tweets mentioning each election issue proportionate to the original dataset. For annotation we considered sentiment towards two types of targets: entities and topic keywords. Entities were processed in two ways: firstly, named entities (people, locations, and organisations) were automatically annotated by combining the output of Stanford Named Entity Recognition (NER) (Finkel et al., 2005) , NLTK NER (Bird, 2006 ) and a Twitter-specific NER (Ritter et al., 2011) . All three were combined for a more complete coverage of entities mentioned in tweets and subsequently corrected by removing wrongly marked entities through manual annotation. Secondly, to make sure we covered all key entities in the tweets, we also matched tweets against a manually curated list of 7 political-party names and added users mentioned therein as entities. The second type of targets matched the topic keywords from our curated list.", |
| "cite_spans": [ |
| { |
| "start": 1001, |
| "end": 1022, |
| "text": "(Finkel et al., 2005)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1034, |
| "end": 1045, |
| "text": "(Bird, 2006", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1075, |
| "end": 1096, |
| "text": "(Ritter et al., 2011)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Harvesting and Entity Recognition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We developed a tool for manual annotation of sentiment towards the targets (i.e. entities and topic keywords) mentioned in each tweet. The annotation was performed by nine PhD-level journalism students, each of them annotating approximately a ninth of the dataset, i.e. 500 tweets. Additionally, they annotated a common subset of 500 tweets consistign of 2,197 target entities, which was used to measure inter-annotator agreement (IAA). An- Figure 1 : Annotation tool for human annotation of target specific sentiment analysis notators were shown detailed guidelines 5 before taking up the task, after which they were redirected to the annotation tool itself (see Figure 1 ). Tweets were shown to annotators one by one, and they had to complete the annotation of all targets in a tweet to proceed. The tool shows a tweet with the targets highlighted in bold. Possible annotation actions consisted in: (1) marking the sentiment for a target as being positive, negative, or neutral, (2) marking a target as being mistakenly highlighted (i.e. 'doesnotapply') and hence removing it, and (3) highlighting new targets that our preprocessing step had missed, and associating a sentiment value with them. In this way we obtained a corrected list of targets for each tweet, each with an associated sentiment value.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 441, |
| "end": 449, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 664, |
| "end": 672, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We measure inter-annotator agreement in two different ways. On the one hand, annotators achieved \u03ba = 0.345 (z = 92.2, p < 0.0001) (fair agreement) 6 when choosing targets to be added or removed. On the other hand, they achieved a similar score of \u03ba = 0.341 (z = 77.7, p < 0.0001) (fair agreement) when annotating the sentiment of the resulting targets. It is worth noting that the sentiment annotation for each target also involves choosing among not only positive/negative/neutral but also a fourth category 'doesnotapply'. The resulting dataset contains 4,077 tweets, with an average of 3.09 entity mentions (targets) per tweet. As many as 3,713 tweets have more than a single entity mention (target) per tweet, which makes the task different from 2015 Semeval 10 subtask C (Rosenthal et al., 2015 ) and a target-dependent benchmarking dataset of Dong et al. (2014) where each tweet has only one target annotated and thus one sentiment label assigned. The number of targets in the 4,077 tweets to be annotated originally amounted to 12,874. However, the annotators unhighlighted 975 of them, and added 688 new ones, so that the final number of targets in the dataset is 12,587. These are distributed as follows: 1,865 are positive, 4,707 are neutral, and 6,015 are negative. This distribution shows the tendency of a theme like politics, where users tend to have more negative opinions. This is different from the Semeval dataset, which has a majority of neutral sentiment. Looking at the annotations provided for different targets within each tweet, we observe that 2,051 tweets (50.3%) have all their targets consistently annotated with a single sentiment value, 1,753 tweets (43.0%) have two different sentiments, and 273 tweets (6.7%) have three different sentiment values. These statistics suggest that providing a single sentiment for the entire tweet would not be appropriate in nearly half of the cases confirming earlier observations (Jiang et al., 2011) .", |
| "cite_spans": [ |
| { |
| "start": 776, |
| "end": 799, |
| "text": "(Rosenthal et al., 2015", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 849, |
| "end": 867, |
| "text": "Dong et al. (2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1945, |
| "end": 1965, |
| "text": "(Jiang et al., 2011)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We also labelled each tweet containing one or more topics from the 9 election issues, and asked the annotators to mark the author's sentiment towards the topic. Unlike entities, topics may not be directly present in tweets. We compare topic sentiment with target/entity sentiment for 3963 tweets from our dataset adopting the approach by Vargas et al. (2016) . Table 1 reports ) shows the discrepancies between target and topic sentiments. We observe marked differences between the two sentiment labels. For example it shows the topic sentiment is more neutral (1438.7 vs. 1104.1) and less negative (1930.7 vs. 2285.5) than the target sen-timent. There is also a number of tweets expressing neutrality towards the topics mentioned but polarised sentiment towards targets (i.e. we observe c(s topic = neu \u2229 s targets = neg) = 258.6 also c(s topic = neu \u2229 s targets = pos) = 101.4), and vice versa. This emphasises the importance of distinguishing target entity sentiment not only on the basis of overall tweet sentiment but also in terms of sentiment towards a topic. Firstly we adopt the context-based approach by Vo and Zhang (2015) , which divides each tweet into three parts (left context, target and right context), and where the sentiment towards a target entity results from the interaction between its left and right contexts. Such sentiment signal is drawn by mapping all the words in each context into lowdimensional vectors (i.e. word embeddings), using pre-trained embedding resources, and applying neural pooling functions to extract useful features. Such context set-up does not fully capture the syntactic information of the tweet and the given target entity, and by adding features from the full tweet (as done by Vo and Zhang (2015) ) interactions between the left and right context are only implicitly modeled. Here we use a syntactic dependency parser designed explicitly for tweets (Kong et al., 2014) to find the syntactically connected parts of the tweet to each target. We then extract word embedding features from these syntactically dependent tokens [D 1 , ..., D n ] along its dependency path in the parsing tree to the target 7 , as well as from the left-target-right contexts (i.e. L \u2212 T \u2212 R). Feature vectors generated from different contexts are concatenated into a final feature vector as shown in (2), where P (X) presents a list of k different pooling functions on an embedding matrix X. Not only does this proposed framework make the learning process efficient without labor intensive manual feature engineering and heavy architecture engineering for neural models, it has also shown that complex syntactic and semantic information can be effectively drawn by simply concatenating different types of context together without the use of deep learning (other than pretrained word embeddings).", |
| "cite_spans": [ |
| { |
| "start": 338, |
| "end": 358, |
| "text": "Vargas et al. (2016)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1114, |
| "end": 1133, |
| "text": "Vo and Zhang (2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1729, |
| "end": 1748, |
| "text": "Vo and Zhang (2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1901, |
| "end": 1920, |
| "text": "(Kong et al., 2014)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 361, |
| "end": 376, |
| "text": "Table 1 reports", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 377, |
| "end": 378, |
| "text": ")", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "F = [P (D), P (L), P (T ), P (R)];", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "with", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (X) = [f 1 (X), ..., f k (X)]", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Data set: We evaluate and compare our proposed system to the state-of-the-art baselines on a benchmarking corpus (Dong et al., 2014) that has been used by several previous studies (Vo and Zhang, 2015; Tang et al., 2016a; Zhang et al., 2016) . This corpus contains 6248 training tweets and 692 testing tweets with a sentiment class balance of 25% negative, 50% neutral and 25% positive. Although the original corpus has only annotated one target per tweet, without specifying the location of the target, we expand this notion to consider cases where the target entity may appear more than once at different locations in the tweet, e.g.:", |
| "cite_spans": [ |
| { |
| "start": 113, |
| "end": 132, |
| "text": "(Dong et al., 2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 180, |
| "end": 200, |
| "text": "(Vo and Zhang, 2015;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 201, |
| "end": 220, |
| "text": "Tang et al., 2016a;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 221, |
| "end": 240, |
| "text": "Zhang et al., 2016)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\"Nicki Minaj has brought back the female rapper. -really? Nicki Minaj is the biggest parody in popular music since the Lonely Island.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Semantically it is more appropriate and meaningful to consider both target appearances when determining the sentiment polarity of \"Nicki Minaj\" expressed in this tweet. While it isn't clear if Dong et al. (2014) and Tang et al. (2016a) have considered this realistic same-target-multiappearance scenario, Vo et al. (2015) and Zhang et al. (2016) do not take it into account when extracting target-dependent contexts. Contrary to these studies we extend our system to fully incorporate the situation where a target appears multiple times at different locations in the tweet. We add another pooling layer in (2) where we apply a medium pooling function to combine extracted feature vectors from each target appearance together into the final feature vector for the sentiment classification of such targets. Now the feature extraction function P (X) in (2) becomes:", |
| "cite_spans": [ |
| { |
| "start": 193, |
| "end": 211, |
| "text": "Dong et al. (2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 216, |
| "end": 235, |
| "text": "Tang et al. (2016a)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 305, |
| "end": 321, |
| "text": "Vo et al. (2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 326, |
| "end": 345, |
| "text": "Zhang et al. (2016)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "P (X) = [P medium ([f 1 (X 1 ), ..., f 1 (X m )]), ... ... , P medium ([f k (X 1 ), ..., f k (X m )])] (3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where m is the number of appearances of the target and P medium represents the dimension-wise medium pooling function.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Models: To investigate different ways of modelling target-specific context and evaluate the benefit of incorporating the same-target-multiappearance scenario, we build these models:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 Semeval-best: is a tweet-level model using various types of features, namely ngrams, lexica and word embeddings with extensive data pre-processing and feature engineering. We use this model as a target-independent baseline as it approximates and beats the best performing system (Boag et al., 2015) in Semeval 2015 task 10. It also outperforms the highest ranking system, Tweester, on the Semeval 2016 corpus (by +4.0% in macro-averaged recall) and therefore constitutes a state-of-the art tweet level baseline.", |
| "cite_spans": [ |
| { |
| "start": 281, |
| "end": 300, |
| "text": "(Boag et al., 2015)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 Naive-seg models: Naive-seg-slices each tweet into a sequence of sub-sentences by using punctuation (i.e. ',' '.' '?' '!'). Embedding features are extracted from each subsentence and pooling functions are applied to combine word vectors. Naive-seg extends it by adding features extracted from the lefttarget-right contexts, while Naive-seg+ extends Naive-seg by adding lexicon filtered sentiment features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 TDParse models: as described in Section 4.1. TDParse-uses a dependency parser to extract a syntactic parse tree to the target and map all child nodes to low-dimensional vectors. Final feature vectors for each target are generated using neural pooling functions. While TDParse extends it by adding features extracted from the left-target-right contexts, TD-Parse+ uses three sentiment lexica for filtering words. TDParse+ (m) differs from TDParse+ by taking into account the 'sametarget-multi-appearance' scenario. Both TD-Parse+ and TDParse+ (m) outperform stateof-the-art target-specific models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 TDPWindow-N: the same as TDParse+ with a window to constrain the left-right context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "For example if N = 3 then we only consider 3 tokens on each side of the target when extracting features from the left-right context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Annotation of Target Specific Sentiment", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "To compare our proposed models with Vo & Zhang (2015) , we have used the same pre-trained embedding resources and pooling functions (i.e. max, min, mean, standard deviation and product).", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 53, |
| "text": "Vo & Zhang (2015)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For classification we have used LIBLINEAR (Fan et al., 2008) , which approximates a linear SVM. In tuning the cost factor C we perform five-fold cross validation on the training data over the same set of parameter values for both Vo and Zhang (2015) 's implementation and our system. This makes sure our proposed models are comparable with those of Vo and Zhang (2015) . Evaluation metrics: We follow previous work on target-dependent Twitter sentiment classification, and report our performance in accuracy, 3-class macro-averaged (i.e. negative, neutral and positive) F 1 score as well as 2-class macroaveraged (i.e. negative and positive) F 1 score 8 , as used by the Semeval competitions (Rosenthal et al., 2015) for measuring Twitter sentiment classification performance.", |
| "cite_spans": [ |
| { |
| "start": 32, |
| "end": 60, |
| "text": "LIBLINEAR (Fan et al., 2008)", |
| "ref_id": null |
| }, |
| { |
| "start": 230, |
| "end": 249, |
| "text": "Vo and Zhang (2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 349, |
| "end": 368, |
| "text": "Vo and Zhang (2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 692, |
| "end": 716, |
| "text": "(Rosenthal et al., 2015)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We report our experimental results in Table 2 on the single-target benchmarking corpus (Dong et al., 2014) , with three model categories: 1) tweet-level target-independent models, 2) targetdependent models without considering the 'sametarget-multi-appearance' scenario and 3) targetdependent models incorporating the 'same-targetmulti-appearance' scenario. We include the models presented in the previous section as well as models for target specific sentiment from the literature where possible. Among the target-independent baseline models Target-ind (Vo and Zhang, 2015) and Semevalbest have shown strong performance compared with SSWE and SVM-ind (Jiang et al., 2011) as they use more features, especially rich automatic features using the embeddings of Mikolov et al. (2013) . Interestingly they also perform better than some of the targetdependent baseline systems, namely SVM-dep (Jiang et al., 2011) , Recursive NN and AdaRNN (Dong et al., 2014) , showing the difficulty of fully extracting and incorporating target information in tweets. Basic LSTM models (Tang et al., 2016a) completely ignore such target information and as a result do not perform as well.", |
| "cite_spans": [ |
| { |
| "start": 87, |
| "end": 106, |
| "text": "(Dong et al., 2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 553, |
| "end": 573, |
| "text": "(Vo and Zhang, 2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 651, |
| "end": 671, |
| "text": "(Jiang et al., 2011)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 758, |
| "end": 779, |
| "text": "Mikolov et al. (2013)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 887, |
| "end": 907, |
| "text": "(Jiang et al., 2011)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 934, |
| "end": 953, |
| "text": "(Dong et al., 2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1065, |
| "end": 1085, |
| "text": "(Tang et al., 2016a)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 38, |
| "end": 45, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental results and comparison with other baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Among the target-dependent systems neural network baselines have shown varying results. The adaptive recursive neural network, namely AdaRNN (Dong et al., 2014) , adaptively selects composition functions based on the input data and thus performs better than a standard recursive neural network model (Recursive NN (Dong et al., 2014) ). TD-LSTM and TC-LSTM from Tang et al. (2016a) model left-target-right contexts using two LSTM neural networks and by doing so incorporate target-dependent information. TD-LSTM uses two LSTM neural networks for modeling the left and right contexts respectively. TC-LSTM differs from (and outperforms) TD-LSTM in that it concatenates target word vectors with embedding vectors of each context word. We also test the Gated recurrent neural network models proposed by Zhang et al. (2016) on the same dataset. The gated models include: GRNN, that includes gates in its recurrent hidden layers, G3 that connects left-right context using a gated NN structure, and a combination of the two -GRNN+G3. Results show these gated neural network models do not achieve state-of-theart performance. When we compare our targetdependent model TDParse+, which incorporates target-dependent features from syntactic parses, against the target-dependent models proposed by Vo and Zhang (2015) , namely Target-dep which combines full tweet (pooled) word embedding features with features extracted from left-targetright contexts and Target-dep+ that adds targetdependent sentiment features on top of Targetdep, we see that our method beats both of these, without using full tweet features 9 . TDParse+ also outperforms the state-of-the-art TC-LSTM.", |
| "cite_spans": [ |
| { |
| "start": 141, |
| "end": 160, |
| "text": "(Dong et al., 2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 314, |
| "end": 333, |
| "text": "(Dong et al., 2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 362, |
| "end": 381, |
| "text": "Tang et al. (2016a)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 800, |
| "end": 819, |
| "text": "Zhang et al. (2016)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1287, |
| "end": 1306, |
| "text": "Vo and Zhang (2015)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental results and comparison with other baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "When considering the 'same-target-multiappearance' scenario, our best model -TDParse+ improves its performance further (shown as TD-Parse+ (m) in Table 2 ). Even though TDParse doesn't use lexica, it shows competitive results to Target-dep+ which uses lexicon filtered sen- Table 2 : Performance comparison on the benchmarking data (Dong et al., 2014) timent features. In the case of TDParse-, which uses exclusively features from syntactic parses, while it performs significantly worse than Targetind, that uses only full tweet features, when the former is used in conjunction with features from left-target-right contexts it achieves better results than the equivalent Target-dep and Target-dep+. This indicates that syntactic target information derived from parses complements well with the left-target-right context representation. Clausal segmentation of tweets or sentences can provide a simple approximation to parse-tree based models (Li et al., 2015) . In Table 2 we can see our naive tweet segmentation models Naive-seg and Naive-seg+ also achieve competitive performance suggesting to some extent that such simple parse-tree approximation preserves the semantic structure of text and that useful target-specific information can be drawn from each segment or clause rather than the entire tweet. and applying our models described in Section 4.1. We compare the results with our other developed baseline models in Section 4.1, including a tweet-level model Semeval-best and clausalsegmentation models that provide simple parsetree approximation, as well as state-of-the-art target-dependent models by Vo and Zhang (2015) and Zhang et al. (2016) . The experimentation setup is the same as described in Section 4.2 10 . Data set: Our election data has a training/testing ratio of 3.70, containing 3210 training tweets with 9912 target entities and 867 testing tweets with 2675 target entities.", |
| "cite_spans": [ |
| { |
| "start": 332, |
| "end": 351, |
| "text": "(Dong et al., 2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 942, |
| "end": 959, |
| "text": "(Li et al., 2015)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1610, |
| "end": 1629, |
| "text": "Vo and Zhang (2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1634, |
| "end": 1653, |
| "text": "Zhang et al. (2016)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 146, |
| "end": 153, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 274, |
| "end": 281, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 965, |
| "end": 972, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental results and comparison with other baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Models: In order to limit our use of external resources we do not include Naive-seg+ and TD-Parse+ for evaluation as they both use lexica for feature generation. Since most of our tweets here contain N > 1 targets and the target-independent classifiers produce a single output per tweet, we evaluate its result N times against the ground truth labels, to make different models comparable.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental results and comparison with other baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Results: Overall the models perform much poorer than for the single-target benchmarking corpus, especially in 2-class F 1 score, indicating the challenge of the multi-target-specific sentiment recognition. As seen in Table 3 though the feature-rich tweet-level model Semeval-best gives a reasonably strong baseline performance (same as in Table 2 ), both it and Target-ind perform worse than the target-dependent baseline models Target-dep/Target-dep+ (Vo and Zhang, 2015) , indicating the need to capture and utilise target-dependent signals in the sentiment classification model. The Gated neural network models -G3/GRNN/GRNN+G3 (Zhang et al., 2016 ) also perform worse than Target-dep+ while the combined model -GRNN+G3 fails to boost performance, presumably due to the small corpus size.", |
| "cite_spans": [ |
| { |
| "start": 452, |
| "end": 472, |
| "text": "(Vo and Zhang, 2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 631, |
| "end": 650, |
| "text": "(Zhang et al., 2016", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 217, |
| "end": 224, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 339, |
| "end": 346, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental results and comparison with other baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Our final model TDParse achieves the best performance especially in 3-class F 1 and 2-class F 1 scores in comparison with other target-dependent and target-independent models. This indicates that our proposed models can provide better and more balanced performance between precision and recall. It also shows the target-dependent syntactic information acquired from parse-trees is beneficial to determine the target's sentiment particularly when used in conjunction with the left- Table 4 : Performance analysis in S1, S2 and S3", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 481, |
| "end": 488, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental results and comparison with other baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "target-right contexts originally proposed by Vo and Zhang (2015) and in a scenario of multiple targets per tweet. Our clausal-segmentation baseline -Naive-seg models approximate such parse-trees by identifying segments of the tweet relevant to the target, and as a result Naive-seg achieves competitive performance compared to other baselines.", |
| "cite_spans": [ |
| { |
| "start": 45, |
| "end": 64, |
| "text": "Vo and Zhang (2015)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental results and comparison with other baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "5.1 State-of-the-art tweet level sentiment vs target-specific sentiment in a multi-target setting", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental results and comparison with other baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "To fully compare our multi-target-specific models against other target-dependent and targetindependent baseline methods, we conduct an additional experiment by dividing our election data test set into three disjoint subsets, on the basis of number of distinct target sentiment values per tweet: (S1) contains tweets having only one target sentiment, where the sentiment towards each target is the same; (S2) and (S3) contain two and three different types of targeted sentiment respec-tively (i.e. in S3, positive, neutral and negative sentiment are all expressed in each tweet). As described in Section 3.2, there are 2,051, 1,753 and 273 tweets in S1, S2 and S3 respectively. Table 4 shows results achieved by the tweetlevel target-independent model -Semeval-best, the state-of-the-art target-dependent baseline model -Target-dep+, and our proposed final model -TDParse, in each of the three subsets. We observe Semeval-best performs the best in S1 compared to the two other models but its performance gets worse when different types of target sentiment are mentioned in the tweet. It has the worst performance in S2 and S3, which again emphasises the need for multi-target-specific sentiment classification. Finally, our proposed final model TDParse achieves better performance than Target-dep+ consistently over all subsets indicating its effectiveness even in the most difficult scenario S3.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 677, |
| "end": 684, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental results and comparison with other baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In this work we introduce the challenging task of multi-target-specific sentiment classification for tweets. To help the study we have generated a multi-target Twitter corpus on UK elections which will be made publicly available. We develop a state-of-the-art approach which utilises the syntactic information from parse-tree in conjunction with the left-right context of the target. Our method outperforms previous approaches on a benchmarking single-target corpus as well as our new multi-target election data. Future work could investigate sentiment connections among all targets appearing in the same tweet as a multi-target learning task, as well as a hybrid approach that applies either Semeval-best or TDParse depending on the number of targets detected in the tweet.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "They have yet to show a clear out-performance on a benchmarking dataset and our multi-target corpus, possibly because they usually require large amount of training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "#ukelection2015, #ge2015, #ukge2015, #ukgeneralelec-tion2015, #bbcqt, #bbcsp, #bbcdp, #marrshow, #generalelec-tion2015, #ge15, #generalelection, #electionuk, #ukelection and #electionuk2015 4 EU and immigration, economy, NHS, education, crime, housing, defense, public spending, environment and energy", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "This guidelines can be found along with our released corpus: https://goo.gl/CjuHzd6 We report the strength of agreement using the benchmarks byLandis and Koch (1977) for interpreting Fleiss' kappa.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Empirically the proximity/location of such syntactic relations have not made much difference when used in feature weighting and is thus ignored.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Note that this isn't a binary classification task; the F1 score is still effected by the neutral tweets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Note that the results reported in Vo and Zhang (2015) (71.1 in accuracy and 69.9 in F1) were not possible to reproduce by running their code with very fine parameter tuning, as suggested by the authors", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Evaluating Baselines for target-specific sentiment in a multi-target settingWe perform multi-target-specific sentiment classification on our election dataset by extending", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Class weight parameter is not optimised for all experiments, though better performances can be achieved here by tuning the class weight due to the class imbalance nature of this dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Any further results will be shared on our Github page: https://goo.gl/S2T1GO", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank Duy-Tin Vo, Meishan Zhang and Duyu Tang for sharing their implementation code respectively, which we have used for system performance comparison. We would also like to thank Li Dong for sharing their data, and City University London for recruiting PhD students for the annotation of our election corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Nltk: the natural language toolkit", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bird", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the COLING/ACL on Interactive presentation sessions, COLING-ACL '06", |
| "volume": "", |
| "issue": "", |
| "pages": "69--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Bird. 2006. Nltk: the natural language toolkit. In Proceedings of the COLING/ACL on In- teractive presentation sessions, COLING-ACL '06, pages 69-72, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Twitterhawk: A feature bucket based approach to sentiment analysis", |
| "authors": [ |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Boag", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Potash", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rumshisky", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 9th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "640--646", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William Boag, Peter Potash, and Anna Rumshisky. 2015. Twitterhawk: A feature bucket based ap- proach to sentiment analysis. In Proceedings of the 9th International Workshop on Semantic Evaluation (SemEval 2015), pages 640-646, Denver, Colorado, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "140 characters to victory?: Using twitter to predict the uk 2015 general election", |
| "authors": [ |
| { |
| "first": "Pete", |
| "middle": [], |
| "last": "Burnap", |
| "suffix": "" |
| }, |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Gibson", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Sloan", |
| "suffix": "" |
| }, |
| { |
| "first": "Rosalynd", |
| "middle": [], |
| "last": "Southern", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Electoral Studies", |
| "volume": "41", |
| "issue": "", |
| "pages": "230--233", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pete Burnap, Rachel Gibson, Luke Sloan, Rosalynd Southern, and Matthew Williams. 2016. 140 char- acters to victory?: Using twitter to predict the uk 2015 general election. Electoral Studies, 41:230- 233.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Adaptive recursive neural network for target-dependent twitter sentiment classification", |
| "authors": [ |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Chuanqi", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Duyu", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Ke", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "49--54", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li Dong, Furu Wei, Chuanqi Tan, Duyu Tang, Ming Zhou, and Ke Xu. 2014. Adaptive recursive neural network for target-dependent twitter sentiment clas- sification. In Proceedings of the 52nd Annual Meet- ing of the Association for Computational Linguistics (Volume 2: Short Papers), pages 49-54, Baltimore, Maryland, June. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Liblinear: A library for large linear classification", |
| "authors": [ |
| { |
| "first": "Kai-Wei", |
| "middle": [], |
| "last": "Rong-En Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Cho-Jui", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiang-Rui", |
| "middle": [], |
| "last": "Hsieh", |
| "suffix": "" |
| }, |
| { |
| "first": "Chih-Jen", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Journal of machine learning research", |
| "volume": "9", |
| "issue": "", |
| "pages": "1871--1874", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rong-En Fan, Kai-Wei Chang, Cho-Jui Hsieh, Xiang- Rui Wang, and Chih-Jen Lin. 2008. Liblinear: A library for large linear classification. Journal of ma- chine learning research, 9(Aug):1871-1874.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Incorporating non-local information into information extraction systems by gibbs sampling", |
| "authors": [ |
| { |
| "first": "Jenny", |
| "middle": [ |
| "Rose" |
| ], |
| "last": "Finkel", |
| "suffix": "" |
| }, |
| { |
| "first": "Trond", |
| "middle": [], |
| "last": "Grenager", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL'05)", |
| "volume": "", |
| "issue": "", |
| "pages": "363--370", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jenny Rose. Finkel, Trond Grenager, and Christopher Manning. 2005. Incorporating non-local informa- tion into information extraction systems by gibbs sampling. In Proceedings of the 43rd Annual Meet- ing of the Association for Computational Linguis- tics (ACL'05), pages 363-370, Ann Arbor, Michi- gan, June. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Target-dependent twitter sentiment classification", |
| "authors": [ |
| { |
| "first": "Long", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaohua", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tiejun", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "151--160", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Long Jiang, Mo Yu, Ming Zhou, Xiaohua Liu, and Tiejun Zhao. 2011. Target-dependent twitter sen- timent classification. In Proceedings of the 49th An- nual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 151-160, Portland, Oregon, USA, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A dependency parser for tweets", |
| "authors": [ |
| { |
| "first": "Lingpeng", |
| "middle": [], |
| "last": "Kong", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Schneider", |
| "suffix": "" |
| }, |
| { |
| "first": "Swabha", |
| "middle": [], |
| "last": "Swayamdipta", |
| "suffix": "" |
| }, |
| { |
| "first": "Archna", |
| "middle": [], |
| "last": "Bhatia", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1001--1012", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lingpeng Kong, Nathan Schneider, Swabha Swayamdipta, Archna Bhatia, Chris Dyer, and Noah A. Smith. 2014. A dependency parser for tweets. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1001-1012, Doha, Qatar, October. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "The measurement of observer agreement for categorical data", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Landis", |
| "suffix": "" |
| }, |
| { |
| "first": "Gary", |
| "middle": [ |
| "G" |
| ], |
| "last": "Koch", |
| "suffix": "" |
| } |
| ], |
| "year": 1977, |
| "venue": "Biometrics", |
| "volume": "", |
| "issue": "", |
| "pages": "159--174", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J Richard Landis and Gary G. Koch. 1977. The mea- surement of observer agreement for categorical data. Biometrics, pages 159-174.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "When are tree structures necessary for deep learning of representations?", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2304--2314", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Thang Luong, Dan Jurafsky, and Eduard Hovy. 2015. When are tree structures necessary for deep learning of representations? In Proceed- ings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 2304-2314, Lisbon, Portugal, September. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Graph-based ranking algorithms for sentence extraction, applied to text summarization", |
| "authors": [ |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "The Companion Volume to the Proceedings of 42st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "170--173", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rada Mihalcea. 2004. Graph-based ranking algo- rithms for sentence extraction, applied to text sum- marization. In The Companion Volume to the Pro- ceedings of 42st Annual Meeting of the Associa- tion for Computational Linguistics, pages 170-173, Barcelona, Spain, July. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "CoRR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word represen- tations in vector space. CoRR, abs/1301.3781.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Semeval-2016 task 4: Sentiment analysis in twitter", |
| "authors": [ |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabrizio", |
| "middle": [], |
| "last": "Sebastiani", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--18", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Preslav Nakov, Alan Ritter, Sara Rosenthal, Fabrizio Sebastiani, and Veselin Stoyanov. 2016. Semeval- 2016 task 4: Sentiment analysis in twitter. In Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016), pages 1-18, San Diego, California, June. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Twitter as a corpus for sentiment analysis and opinion mining", |
| "authors": [ |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Pak", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Paroubek", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC'10)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander Pak and Patrick Paroubek. 2010. Twit- ter as a corpus for sentiment analysis and opinion mining. In Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC'10), Valletta, Malta, may. European Lan- guage Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "AK-TSKI at semeval-2016 task 5: Aspect based sentiment analysis for consumer reviews", |
| "authors": [ |
| { |
| "first": "Shubham", |
| "middle": [], |
| "last": "Pateria", |
| "suffix": "" |
| }, |
| { |
| "first": "Prafulla", |
| "middle": [], |
| "last": "Choubey", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 10th International Workshop on Semantic Evaluation, SemEval@NAACL-HLT 2016", |
| "volume": "", |
| "issue": "", |
| "pages": "318--324", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shubham Pateria and Prafulla Choubey. 2016. AK- TSKI at semeval-2016 task 5: Aspect based senti- ment analysis for consumer reviews. In Proceed- ings of the 10th International Workshop on Seman- tic Evaluation, SemEval@NAACL-HLT 2016, San Diego, CA, USA, June 16-17, 2016, pages 318-324.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Klueless: Polarity classification and association", |
| "authors": [ |
| { |
| "first": "Nataliia", |
| "middle": [], |
| "last": "Plotnikova", |
| "suffix": "" |
| }, |
| { |
| "first": "Micha", |
| "middle": [], |
| "last": "Kohl", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Volkert", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Evert", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Lerner", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 9th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "619--625", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nataliia Plotnikova, Micha Kohl, Kevin Volkert, Stefan Evert, Andreas Lerner, Natalie Dykes, and Heiko Er- mer. 2015. Klueless: Polarity classification and as- sociation. In Proceedings of the 9th International Workshop on Semantic Evaluation (SemEval 2015), pages 619-625, Denver, Colorado, June. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Semeval-2015 task 12: Aspect based sentiment analysis", |
| "authors": [ |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Pontiki", |
| "suffix": "" |
| }, |
| { |
| "first": "Dimitris", |
| "middle": [], |
| "last": "Galanis", |
| "suffix": "" |
| }, |
| { |
| "first": "Haris", |
| "middle": [], |
| "last": "Papageorgiou", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 9th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "486--495", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maria Pontiki, Dimitris Galanis, Haris Papageorgiou, Suresh Manandhar, and Ion Androutsopoulos. 2015. Semeval-2015 task 12: Aspect based sentiment anal- ysis. In Proceedings of the 9th International Work- shop on Semantic Evaluation (SemEval 2015), pages 486-495, Denver, Colorado, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Named entity recognition in tweets: An experimental study", |
| "authors": [ |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Mausam", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing, EMNLP '11", |
| "volume": "", |
| "issue": "", |
| "pages": "1524--1534", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alan Ritter, Sam Clark, Mausam, and Oren Etzioni. 2011. Named entity recognition in tweets: An ex- perimental study. In Proceedings of the Conference on Empirical Methods in Natural Language Pro- cessing, EMNLP '11, pages 1524-1534, Strouds- burg, PA, USA. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Semeval-2015 task 10: Sentiment analysis in twitter", |
| "authors": [ |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Saif", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 9th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "451--463", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sara Rosenthal, Preslav Nakov, Svetlana Kiritchenko, Saif Mohammad, Alan Ritter, and Veselin Stoyanov. 2015. Semeval-2015 task 10: Sentiment analysis in twitter. In Proceedings of the 9th International Workshop on Semantic Evaluation (SemEval 2015), pages 451-463, Denver, Colorado, June. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Learning sentimentspecific word embedding for twitter sentiment classification", |
| "authors": [ |
| { |
| "first": "Duyu", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1555--1565", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Duyu Tang, Furu Wei, Nan Yang, Ming Zhou, Ting Liu, and Bing Qin. 2014. Learning sentiment- specific word embedding for twitter sentiment clas- sification. In Proceedings of the 52nd Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1555-1565, Bal- timore, Maryland, June. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Effective lstms for target-dependent sentiment classification", |
| "authors": [ |
| { |
| "first": "Duyu", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaocheng", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "3298--3307", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Duyu Tang, Bing Qin, Xiaocheng Feng, and Ting Liu. 2016a. Effective lstms for target-dependent sen- timent classification. In Proceedings of COLING 2016, the 26th International Conference on Compu- tational Linguistics: Technical Papers, pages 3298- 3307, Osaka, Japan, December. The COLING 2016 Organizing Committee.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Aspect level sentiment classification with deep memory network", |
| "authors": [ |
| { |
| "first": "Duyu", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "214--224", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Duyu Tang, Bing Qin, and Ting Liu. 2016b. Aspect level sentiment classification with deep memory net- work. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Process- ing, pages 214-224, Austin, Texas, November. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Warwickdcs: From phrase-based to target-specific sentiment recognition", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Townsend", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Tsakalidis", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiwei", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Liakata", |
| "suffix": "" |
| }, |
| { |
| "first": "Arkaitz", |
| "middle": [], |
| "last": "Zubiaga", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 9th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "657--663", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Townsend, Adam Tsakalidis, Yiwei Zhou, Bo Wang, Maria Liakata, Arkaitz Zubiaga, Alexan- dra Cristea, and Rob Procter. 2015. Warwick- dcs: From phrase-based to target-specific sentiment recognition. In Proceedings of the 9th International Workshop on Semantic Evaluation (SemEval 2015), pages 657-663, Denver, Colorado, June. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Predicting elections with twitter: What 140 characters reveal about political sentiment", |
| "authors": [ |
| { |
| "first": "Andranik", |
| "middle": [], |
| "last": "Tumasjan", |
| "suffix": "" |
| }, |
| { |
| "first": "Timm", |
| "middle": [], |
| "last": "Oliver", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [ |
| "G" |
| ], |
| "last": "Sprenger", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabell", |
| "middle": [ |
| "M" |
| ], |
| "last": "Sandner", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Welpe", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "ICWSM", |
| "volume": "10", |
| "issue": "", |
| "pages": "178--185", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andranik Tumasjan, Timm Oliver. Sprenger, Philipp G. Sandner, and Isabell M. Welpe. May 2010. Pre- dicting elections with twitter: What 140 characters reveal about political sentiment. ICWSM, 10:178- 185.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Comparing overall and targeted sentiments in social media during crises", |
| "authors": [ |
| { |
| "first": "Sa\u00fal", |
| "middle": [], |
| "last": "Vargas", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Mccreadie", |
| "suffix": "" |
| }, |
| { |
| "first": "Craig", |
| "middle": [], |
| "last": "Macdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "Iadh", |
| "middle": [], |
| "last": "Ounis", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Tenth International Conference on Web and Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "695--698", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sa\u00fal Vargas, Richard McCreadie, Craig Macdonald, and Iadh Ounis. 2016. Comparing overall and tar- geted sentiments in social media during crises. In Proceedings of the Tenth International Conference on Web and Social Media, Cologne, Germany, May 17-20, 2016., pages 695-698.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Target-dependent twitter sentiment classification with rich automatic features", |
| "authors": [ |
| { |
| "first": "Duy-Tin", |
| "middle": [], |
| "last": "Vo", |
| "suffix": "" |
| }, |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 24th International Conference on Artificial Intelligence, IJCAI'15", |
| "volume": "", |
| "issue": "", |
| "pages": "1347--1353", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Duy-Tin Vo and Yue Zhang. 2015. Target-dependent twitter sentiment classification with rich automatic features. In Proceedings of the 24th International Conference on Artificial Intelligence, IJCAI'15, pages 1347-1353. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Fran\u00e7ois Bar, and Shrikanth Narayanan. 2012. A system for real-time twitter sentiment analysis of 2012 u.s. presidential election cycle", |
| "authors": [ |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dogan", |
| "middle": [], |
| "last": "Can", |
| "suffix": "" |
| }, |
| { |
| "first": "Abe", |
| "middle": [], |
| "last": "Kazemzadeh", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the ACL 2012 System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "115--120", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hao Wang, Dogan Can, Abe Kazemzadeh, Fran\u00e7ois Bar, and Shrikanth Narayanan. 2012. A system for real-time twitter sentiment analysis of 2012 u.s. presidential election cycle. In Proceedings of the ACL 2012 System Demonstrations, pages 115-120,", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Association for Computational Linguistics", |
| "authors": [ |
| { |
| "first": "Jeju", |
| "middle": [], |
| "last": "Island", |
| "suffix": "" |
| }, |
| { |
| "first": "July", |
| "middle": [], |
| "last": "Korea", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeju Island, Korea, July. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Gated neural networks for targeted sentiment analysis", |
| "authors": [ |
| { |
| "first": "Meishan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Duy-Tin", |
| "middle": [], |
| "last": "Vo", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence, AAAI'16", |
| "volume": "", |
| "issue": "", |
| "pages": "3087--3093", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Meishan Zhang, Yue Zhang, and Duy-Tin Vo. 2016. Gated neural networks for targeted sentiment anal- ysis. In Proceedings of the Thirtieth AAAI Con- ference on Artificial Intelligence, AAAI'16, pages 3087-3093. AAAI Press.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "the individual c(s target ), c(s topic ) and joint c(s target , s topic ) distributions of the target/entity s target and topic s topic sentiment. While s target and s topic report how often each sentiment category occurs in the dataset, the joint distribution c(s target , s topic ) (the inner portions of the table", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF1": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table/>" |
| }, |
| "TABREF4": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td colspan=\"4\">: Performance comparison on the election</td></tr><tr><td>dataset 11</td><td/><td/><td/></tr><tr><td>S1</td><td colspan=\"3\">Semeval-best Target-dep+ TDParse</td></tr><tr><td>Macro 3-class-F1</td><td>50.11</td><td>46.24</td><td>47.08</td></tr><tr><td>Micro 3-class-F1</td><td>59.72</td><td>55.82</td><td>57.47</td></tr><tr><td>Macro 2-class-F1</td><td>46.59</td><td>43.42</td><td>42.95</td></tr><tr><td>S2</td><td colspan=\"3\">Semeval-best Target-dep+ TDParse</td></tr><tr><td>Macro 3-class-F1</td><td>37.15</td><td>41.81</td><td>43.07</td></tr><tr><td>Micro 3-class-F1</td><td>45.17</td><td>51.66</td><td>52.05</td></tr><tr><td>Macro 2-class-F1</td><td>37.05</td><td>39.75</td><td>40.92</td></tr><tr><td>S3</td><td colspan=\"3\">Semeval-best Target-dep+ TDParse</td></tr><tr><td>Macro 3-class-F1</td><td>35.08</td><td>42.83</td><td>51.26</td></tr><tr><td>Micro 3-class-F1</td><td>38.16</td><td>46.05</td><td>53.07</td></tr><tr><td>Macro 2-class-F1</td><td>35.17</td><td>40.53</td><td>50.14</td></tr></table>" |
| } |
| } |
| } |
| } |