| { |
| "paper_id": "E17-1026", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T10:51:56.688174Z" |
| }, |
| "title": "A Multi-View Sentiment Corpus", |
| "authors": [ |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Milano-Bicocca", |
| "location": { |
| "addrLine": "Viale Sarca 336", |
| "postCode": "20126", |
| "region": "Milan", |
| "country": "Italy" |
| } |
| }, |
| "email": "debora.nozza@disco.unimib.it" |
| }, |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Milano-Bicocca", |
| "location": { |
| "addrLine": "Viale Sarca 336", |
| "postCode": "20126", |
| "region": "Milan", |
| "country": "Italy" |
| } |
| }, |
| "email": "fersini@disco.unimib.it" |
| }, |
| { |
| "first": "Enza", |
| "middle": [], |
| "last": "Messina", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Milano-Bicocca", |
| "location": { |
| "addrLine": "Viale Sarca 336", |
| "postCode": "20126", |
| "region": "Milan", |
| "country": "Italy" |
| } |
| }, |
| "email": "messina@disco.unimib.it" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Sentiment Analysis is a broad task that involves the analysis of various aspect of the natural language text. However, most of the approaches in the state of the art usually investigate independently each aspect, i.e. Subjectivity Classification, Sentiment Polarity Classification, Emotion Recognition, Irony Detection. In this paper we present a Multi-View Sentiment Corpus (MVSC), which comprises 3000 English microblog posts related the movie domain. Three independent annotators manually labelled MVSC, following a broad annotation schema about different aspects that can be grasped from natural language text coming from social networks. The contribution is therefore a corpus that comprises five different views for each message, i.e. subjective/objective, sentiment polarity, implicit/explicit, irony, emotion. In order to allow a more detailed investigation on the human labelling behaviour, we provide the annotations of each human annotator involved.", |
| "pdf_parse": { |
| "paper_id": "E17-1026", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Sentiment Analysis is a broad task that involves the analysis of various aspect of the natural language text. However, most of the approaches in the state of the art usually investigate independently each aspect, i.e. Subjectivity Classification, Sentiment Polarity Classification, Emotion Recognition, Irony Detection. In this paper we present a Multi-View Sentiment Corpus (MVSC), which comprises 3000 English microblog posts related the movie domain. Three independent annotators manually labelled MVSC, following a broad annotation schema about different aspects that can be grasped from natural language text coming from social networks. The contribution is therefore a corpus that comprises five different views for each message, i.e. subjective/objective, sentiment polarity, implicit/explicit, irony, emotion. In order to allow a more detailed investigation on the human labelling behaviour, we provide the annotations of each human annotator involved.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The exploitation of user-generated content on the Web, and in particular on the social media platforms, has brought to a huge interest on Opinion Mining and Sentiment Analysis. Both Natural Language Processing (NLP) communities and corporations are continuously investigating on more accurate automatic approaches that can manage large quantity of noisy natural language texts, in order to extract opinions and emotions towards a topic. The data are usually collected from Twitter, the most popular microblogging platform. In this particular environment, the posts, called tweets, are constrained to a maximum number of characters. This constraint, in addition to the social media context, leads to a specific language rich of synthetic expressions that allow the users to express their ideas or what happens to them in a short but intense way.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "However, the application of automatic sentiment classification approaches, in particular when dealing with noisy texts, is subjected to the presence of sufficiently manually annotated dataset to perform the training. The majority of the corpora available in the literature are focused on only one (or at most two) aspects related to Sentiment Analysis, i.e. Subjectivity, Polarity, Emotion, Irony.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper we propose a Multi-View Sentiment Corpus 1 , manually labelled by three independent annotators, that makes possible to study Sentiment Analysis by considering several aspects of the natural language text: subjective/objective, sentiment polarity, implicit/explicit, irony and emotion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The work of Go et al. (2009) was the first attempt to address the creation of a sentiment corpus in a microblog environment. Their approach, introduced in (Read, 2005) , consisted to filter all the posts containing emoticons and subsequently label each post with the polarity class provided by them. For example, :) in a tweet indicates that the tweet contains positive sentiment and :( indicates that the tweet contains negative sentiment. The same procedure was also applied in (Pak and Paroubek, 2010) , differently from the aforementioned works they introduced the class of objective posts, retrieved from Twitter accounts of popular newspapers and magazines. Davidov et 1 The proposed corpus is available at www.mind.disco.unimib.it al. (2010) maintained the idea of distant supervision by combining 15 common emoticons and 50 sentiment-driven hashtags for automatic labelling. However, an intervention of human experts was needed to annotate the sentiment of frequent tags. Kouloumpis et al. (2011) extended their work in order to perform a 3-way polarity classification (positive, negative and neutral) on the Edinburgh Twitter corpus (Petrovi\u0107 et al., 2010) .", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 28, |
| "text": "Go et al. (2009)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 155, |
| "end": 167, |
| "text": "(Read, 2005)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 480, |
| "end": 504, |
| "text": "(Pak and Paroubek, 2010)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 664, |
| "end": 676, |
| "text": "Davidov et 1", |
| "ref_id": null |
| }, |
| { |
| "start": 980, |
| "end": 1004, |
| "text": "Kouloumpis et al. (2011)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1142, |
| "end": 1165, |
| "text": "(Petrovi\u0107 et al., 2010)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State of the Art", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Mohammad 2012and Wang et al. (2012) applied the same distant supervision approach for the construction of a large corpus for emotion classification. They collect the data retrieving tweets by considering as keywords a predefined list of emotion hashtags. In (Mohammad, 2012) , the authors used the Ekman's six basic emotions (#anger, #disgust, #fear, #joy, #sadness, and #surprise), while in (Wang et al., 2012) the authors expanded this list by including both basic and secondary emotions and their lexical variants, for a total of 131 keywords.", |
| "cite_spans": [ |
| { |
| "start": 17, |
| "end": 35, |
| "text": "Wang et al. (2012)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 258, |
| "end": 274, |
| "text": "(Mohammad, 2012)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 392, |
| "end": 411, |
| "text": "(Wang et al., 2012)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State of the Art", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Hashtags have also been used to create datasets for irony detection purposes. The work of Reyes et al. (2013) proposed a corpus of 40000 tweets, 10000 ironic and 30000 non ironic tweets respectively retrieved with the hashtags #irony for the former and #education, #humor, #politics for the latter.", |
| "cite_spans": [ |
| { |
| "start": 90, |
| "end": 109, |
| "text": "Reyes et al. (2013)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State of the Art", |
| "sec_num": "2" |
| }, |
| { |
| "text": "However, each of these resources have been created either fully automatically or in a semisupervised way based on the assumption that single words and symbols are representative of the whole document. Moreover, the use of hashtags and emoticons for exploiting distant-supervision approaches can definitely create a bias towards posts that do not use these forms of expression to communicate opinions and emotions. Adopting a manual annotation approach is crucial for dealing with these issues and obtaining high quality labelling. In this direction the SemEval corpora (Nakov et al., 2013; Rosenthal et al., 2014; Nakov et al., 2016) have provided a fundamental contribution. These datasets have been labelled by taking advantage of crowdsourcing platforms, such as Amazon Mechanical Turk and Crowd-Flower. Although the size of these corpora is very high (around 15-20K posts), Mozeti\u010d et al. (2016) overly exceeded these dimensions proposing a set of over 1.6 million sentiment labelled tweets. This corpus, that is the largest manuallylabelled dataset reported in the literature, was an-notated in 13 European languages.", |
| "cite_spans": [ |
| { |
| "start": 569, |
| "end": 589, |
| "text": "(Nakov et al., 2013;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 590, |
| "end": 613, |
| "text": "Rosenthal et al., 2014;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 614, |
| "end": 633, |
| "text": "Nakov et al., 2016)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State of the Art", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Regarding emotion classification, Roberts et al. (2012) introduced a corpus of tweets manually labelled with the Ekman's six basic emotions and love. In (Liew et al., 2016) , the authors extended their work by considering a fine-grained set of emotion categories to better capture the richness of expressed emotions.", |
| "cite_spans": [ |
| { |
| "start": 34, |
| "end": 55, |
| "text": "Roberts et al. (2012)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 153, |
| "end": 172, |
| "text": "(Liew et al., 2016)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State of the Art", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The only manually-annotated corpus on irony detection was proposed by (Gianti et al., 2012) . They studied the use of this particular device on Italian tweets, focusing on the political domain.", |
| "cite_spans": [ |
| { |
| "start": 70, |
| "end": 91, |
| "text": "(Gianti et al., 2012)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State of the Art", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this paper, we present a Multi-View Sentiment Corpus (MSVC) on English microblog posts that differs from the state of the art corpora for several reasons:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State of the Art", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 The proposed corpus is the first benchmark that collects implicit or explicit opinions. This contribution will allow researchers to develop sentiment analysis approaches able to model opinions not directly expressed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State of the Art", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 The corpus provides different annotations simultaneously: subjectivity/objectivity, polarity, implicitness/explicitness, emotion, irony. This characteristic allows researchers to perform wide-ranging studies on the users' opinions, instead of considering each of this view as independent from the others.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State of the Art", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 The corpus will show the label provided by each annotator, instead of producing a final label obtained by a majority voting rule. Given the different expertise of the annotators involved, a detailed investigation on single behaviours can be performed to improve the knowledge about the annotation procedures.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State of the Art", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 This is the first corpus that explicitly labels emojis. We aim to prove that the role of the emojis is strictly related to the context where they appear: their contribution in terms of conveyed sentiment (or conveyed topic) strictly depends on the domain where they are used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State of the Art", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The corpus has been annotated by considering different views related to the same post: subjectivity/objectivity, polarity, implicitness/explicitness, presence of irony and emotion. In this section, we provide a definition and examples for each of these views. Moreover, we present the characteristics of the annotators in order to have more insights on their behaviour.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Procedure", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Given a post p about a given topic t, its subjectivity or objectivity can be defined as follows (Liu, 2012) :", |
| "cite_spans": [ |
| { |
| "start": 96, |
| "end": 107, |
| "text": "(Liu, 2012)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Subjectivity/Objectivity", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Definition 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Subjectivity/Objectivity", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "An objective post p o presents some factual information about the world, while a subjective post p s expresses some personal feelings, views, or beliefs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Subjectivity/Objectivity", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In microblogs contexts the recognition of objective posts can be easily misled by the presence of hashtags and other linguistic artefacts that aim to show the post as more appealing. The reported examples are very similar, despite they belong to different classes:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Subjectivity/Objectivity", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "[Objective] \"Tonight @CinemaX #SuicideSquad!! Come to see #HarleyQuinn :)\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Subjectivity/Objectivity", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "[Subjective] \"-1 to #Deadpool...that's tomorrow!!!! I can't waiit!\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Subjectivity/Objectivity", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Given a subjective post p s that expresses an opinion about a topic t, we want to determine its polarity between positive, negative and neutral classes. While the definition of positive and negative classes is commonly clear, the neutral label is differently treated in the state of the art. As in Pang and Lee (2008) , we use neutral only in the sense of a sentiment that lies between positive and negative.", |
| "cite_spans": [ |
| { |
| "start": 298, |
| "end": 317, |
| "text": "Pang and Lee (2008)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Posts that express a sentiment about specific aspects of a given topic t, such as actors, scenes, commercials for a film are considered part of the topic. Moreover, it is important to understand what is the target of the opinion, because it can lead to completely different interpretations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "[Positive] \"Best Joker EVER!! #suicidesquad\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "[Negative] \"Deadpool is so childish! I slept during the movie\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "[Neutral] \"Good movie, @VancityReynolds worst actor ever #deadpool\" (neutral -mixed sentiment)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "[Neutral] \"I love my boyfriend! We are watching deadpool tonight\" (positive about the boyfriend -neutral about the film)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "3.3 Annotation of explicit/implicit opinion Given a subjective post p s that expresses an opinion about a topic t, we can define its implicitness or explicitness as follows (Liu, 2012) :", |
| "cite_spans": [ |
| { |
| "start": 173, |
| "end": 184, |
| "text": "(Liu, 2012)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Definition 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "An explicit opinion is a subjective statement that gives an opinion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Definition 3. An implicit (or implied) opinion is an objective statement that implies an opinion. Such an objective statement usually expresses a desirable or undesirable fact.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The detection of an implicit opinion can be complex because it does not rely on specific words (e.g. amazing, awful), as in the following examples:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "[Explicit -Positive] \"Suicide Squad is a great movie and an awesome cast\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "[Implicit -Positive] \"I've already watched Deadpool three times this month\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "[Implicit -Negative] \"I went out the cinema after 15 minutes #suicidesquad\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of polarity", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Given a subjective post p s that expresses an opinion about a topic t, the presence of irony can be detected focusing on the definition given by Wilson and Sperber (2007) :", |
| "cite_spans": [ |
| { |
| "start": 145, |
| "end": 170, |
| "text": "Wilson and Sperber (2007)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Irony", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Definition 4. Irony is basically a communicative act that expresses the opposite of what is literally said.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Irony", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Irony is one of the most difficult figurative language to comprehend, and a person can perceive it differently depending on several factors (e.g. culture, language).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Irony", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "[Ironic] \"Hey @20thcenturyfox remember when you didn't want anything to do with #Deadpool and now it's your biggest opening weekend ever?\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Irony", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "A post p about the topic t can be associated to an emotion e corresponding to the eight Plutchik primary emotions (shown in Figure 1 ): anger, anticipation, joy, trust, fear, surprise, sadness and disgust. We provide an example for each emotion.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 124, |
| "end": 132, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation of Emotion", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "[Anger] \" #Deadpool I wasted time and money grrrrrrrr\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Emotion", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "[Anticipation] \"Can't wait to see Deadpool\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Emotion", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "[Joy] \"Deadpool was A-M-A-Z-I-N-G\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Emotion", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "[Trust] \"Best movie ever #Deadpool! Trust me!\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Emotion", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "[Fear] \"Saw #Deadpool last night. I was frightened during some crude scenes!\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Emotion", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "[Surprise] \"Much to my surprise, I actually liked Deadpool.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Emotion", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "[Sadness] \"i finally got to watch deadpool and im so sad this is so boring\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Emotion", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "[Disgust] \"Deadpool is everything I hate about our century combined in the trashiest movie possible.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Emotion", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Figure 1: Plutchik's wheel of emotions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Emotion", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Given a post p related to a specific topic t, each emoji (if present) has been labelled as positive, negative, neutral or topic-related according to the context where it has been used. We provide an example for each label.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation of Emojis", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "The complete set of posts has been labelled by three different annotators. Each annotator is a very proficient English speaker and he/she has a different level of NLP background and topic knowledge from the others. We distinguish these two types of knowledge because they are equally important and necessary for annotating a dataset, especially in a movie domain. A topic expert can be very confident on understanding the meaning of the text, but without any NLP knowledge he/she would not be able to perform a confident annotation, especially when dealing with the implicitness/explicitness and subjectivity/objectivity labels. On the other hand, being only a NLP expert is not sufficient when in the text subtle and sophisticated references to the topic are present, resulting in an incorrect annotation because of an improper understanding. The first annotator A 1 is a NLP expert while he/she is not very confident on the topic selected, the second annotator A 2 has a good expertise in NLP and a good knowledge about the topic, the third annotator A 3 is a beginner in the field of NLP but he/she is competent on the topic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotators", |
| "sec_num": "3.7" |
| }, |
| { |
| "text": "The data has been retrieved by monitoring different keywords on the Twitter microblogging platform related to two popular movies: Deadpool and Suicide Squad. This choice was motivated by the intention to increase the number of opinionated posts and therefore to have a variety of aspects to be analysed. Also, both the movies were massive blockbuster successes with popular actors and this led to a very wide and diverse audience.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "4" |
| }, |
| { |
| "text": "This case study is experimentally convenient for our purposes because it represents a domain where people are more willing to express opinions, so that the final corpus will have a variety of opinionated tweets expressed in diverse ways. The collection of the data has been performed in the narrow days of the release date, Deadpool 18 th February 2016 and Suicide Squad 1 th August 2016.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "4" |
| }, |
| { |
| "text": "After the streaming collection phase, we filter out the non-English tweets, duplicates and retweets resulting in a dataset of millions of posts. Then, we randomly sampled 3000 tweets equally distributed between the topics, maintaining the original daily distribution. This sample has been manually annotated, obtaining a final corpus composed of 1500 posts about Deadpool and 1500 posts about Suicide Squad.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "4" |
| }, |
| { |
| "text": "On average, a tweet is composed of about 14 words of which one is a hashtag. Although this number can lead to conclude that hashtags are an important language expression and therefore they can be used for automatically collecting opinions and emotions, we found that most of them are strongly related only to the topic, e.g. #Show-TimeAtGC, #Joker, #HarleyQuinn, #DC. A preliminary analysis of the user mentions has shown that users are inclined to directly mention the actors or the entertainment companies for complaining or complimenting, and this, together with hashtags, can be particularly helpful when per-forming aspect-based sentiment analysis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Regarding emojis, the most frequent ones are (as expected) sentiment-driven, i.e. joy, heart eyes, sob.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The annotation of emotions, sentiment and other emotional aspects in a microblog message is not an easy task, and strongly depends on subjective judgement of human annotators. Annotators can disagree between themselves, and sometimes an individual cannot be consistent with her/himself. The disagreement depends on the complexity of the annotation task, the use of complex language (e.g., slang), or simply on the poor annotator work. In Table 1 , we report some statistics that summarize behaviours of the involved annotators. By analysing the distributions, we can observe different attitudes: A 1 is inclined to label more posts as positive against the neutral ones; A 2 shows a predisposition to identify a high number of explicit expressions; A 3 has a low sensibility to capture the emotions behind the text. Moreover, we can highlight a balanced distribution for implicit/explicit opinions.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 438, |
| "end": 445, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Evaluations", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For those tweets encoding one of the eight emotions, there is a predominance of the joy label. Concerning the remaining classes the distributions are skewed towards a specific label, i.e. Subjective, Positive and Not Ironic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Evaluations", |
| "sec_num": "5" |
| }, |
| { |
| "text": "An analogous consideration can be drawn for the emojii distribution (see Table 2 ). It turns out that most of the emojis are positive, especially the most popular ones and their presence provide an insight of the human emotional perceptions. By a detailed analysis of the emoji annotations, it emerges that the role of the emojis is closely related to the context where they appear: their contribution in terms of conveyed sentiment (or conveyed topic) strictly depends on the domain where they are used. In Table 3 , we report a comparison between the label distribution of two emojis in our corpus and the corresponding distribution in a state of the art emoji sentiment lexicon (Novak et al., 2015) .", |
| "cite_spans": [ |
| { |
| "start": 681, |
| "end": 701, |
| "text": "(Novak et al., 2015)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 73, |
| "end": 80, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 508, |
| "end": 515, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Evaluations", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In the proposed corpus, the fire emoji has been mainly labelled as positive because it represents the word \"hot\", whose meaning is intended as something beautiful and trendy. However, in the emojii sentiment lexicon the same emoji primarily corresponds to a neutral sentiment. Similar considerations can be drawn for the pistol emoji: in our corpus it represents the topic underlying the two movies, while in the state of the art lexicon it is frequently used to denote a negative sentiment orientation. As conclusion, any emoji should be not considered as independent on the context and therefore evaluated according to its semantic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Evaluations", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The kappa coefficient (Cohen, 1960) is the most used statistic for measuring the degree of reliability between annotators. The need for consistency among annotators immediately arises due to the However, considering only this statistic is not appropriate when the prevalence of a given response is very high or very low in a specific class. In this case, the value of kappa may indicate a low level of reliability even with a high observed proportion of agreement. In order to address these imbalances caused by differences in prevalence and bias, Byrt et al. (1993) introduced a different version of the kappa coefficient called prevalenceadjusted bias-adjusted kappa (PABAK). The estimation of PABAK depends solely on the observed proportion of agreement between annotators:", |
| "cite_spans": [ |
| { |
| "start": 22, |
| "end": 35, |
| "text": "(Cohen, 1960)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 548, |
| "end": 566, |
| "text": "Byrt et al. (1993)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Agreement Measures", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "P ABAK = 2 \u2022 observed agreement \u2212 1 (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Agreement Measures", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "A more reliable measure for estimating the agreement among annotators is PABAK-OS (Parker et al., 2011) , which controls for chance agreement. PABAK-OS aims to avoid the peculiar, unintuitive results sometimes obtained from Cohen's Kappa, especially related to skewed annotations (prevalence of a given label).", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 103, |
| "text": "(Parker et al., 2011)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Agreement Measures", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We report in Table 4 , the inter-agreement between couples of annotators distinguished for each label. We can easily note that the highest agreement is related to the irony/not-irony labelling. This is due to the predominance of non-ironic messages identified by all the annotators. Thus, we perform a detailed analysis on the disagreement between each couple of annotators regarding only the ironic messages. From the results, reported in Table 6 , we can confirm that A 1 and A 2 annotators are more willing to interpret irony similarly (as already stated in Table 4 ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 13, |
| "end": 20, |
| "text": "Table 4", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 440, |
| "end": 447, |
| "text": "Table 6", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 561, |
| "end": 568, |
| "text": "Table 4", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Agreement Measures", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Concerning the implicit/explicit labels, the inter-agreement measure highlights the difficulties encountered by the annotators to distinguish \"objective statements\" (see Definition 1) from \"objective statements that imply an opinion\" (see Definition 3). Regarding the remaining labels, we can assert that there is a moderate agreement between the labellers. An analogous conclusion can be derived for the consensus about the emoji annotation, where the inter-agreement is 0.731 for A 1 vs A 2 , 0.771 for A 2 vs A 3 , and 0.647 for A 1 vs A 3 . When dealing with complex annotations, the perception of the same annotator on the same post can change over time, resulting in inconsistent labelling. In order to estimate the uncertainty of the annotation of each labeller, we sampled a portion of tweets to be annotated twice by the same annotator. We report in Table 5 the self-agreement measure, that is a valid index to quantify the quality of the labelling procedure. The resulting statistics show that there is a high self-agreement for almost all the labels. The annotators can be considered moderately reliable for implicit/explicit annotations and very accurate for the remaining labels.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 859, |
| "end": 866, |
| "text": "Table 5", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Agreement Measures", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In this paper we presented a Multi-View Sentiment Corpus (MVSC), which simultaneously considers different aspects related to sentiment analysis, i.e. subjectivity, polarity, implicitness, irony, emotion. We described the construction of the corpus, together with annotation schema, statistics and some interesting remarks. The proposed corpus is aimed at providing a benchmark to develop sentiment analysis approaches able to model opinions not directly expressed. Researchers can also take advan-tage of the complete label set given by the annotators to investigate their behaviours and the underlying annotation procedures. We finally provided some interesting conclusions related to the use of emojis, highlighting that their role is strictly related to the context where they appear. As future work, we aim at defining novel machine learning models able to simultaneously take advantage of the multiple views available. Moreover, an annotation scheme at a fine-grained level will be investigated.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Bias, prevalence and kappa", |
| "authors": [ |
| { |
| "first": "Ted", |
| "middle": [], |
| "last": "Byrt", |
| "suffix": "" |
| }, |
| { |
| "first": "Janet", |
| "middle": [], |
| "last": "Bishop", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "B" |
| ], |
| "last": "Carlin", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Journal of Clinical Epidemiology", |
| "volume": "46", |
| "issue": "5", |
| "pages": "423--429", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ted Byrt, Janet Bishop, and John B. Carlin. 1993. Bias, prevalence and kappa. Journal of Clinical Epi- demiology, 46(5):423-429.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A Coefficient of Agreement for Nominal Scales", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| } |
| ], |
| "year": 1960, |
| "venue": "Educational and Psychological Measurement", |
| "volume": "20", |
| "issue": "1", |
| "pages": "37--46", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Cohen. 1960. A Coefficient of Agreement for Nominal Scales. Educational and Psychological Measurement, 20(1):37-46.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Enhanced sentiment learning using twitter hashtags and smileys", |
| "authors": [ |
| { |
| "first": "Dmitry", |
| "middle": [], |
| "last": "Davidov", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Tsur", |
| "suffix": "" |
| }, |
| { |
| "first": "Ari", |
| "middle": [], |
| "last": "Rappoport", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 23rd International Conference on Computational Linguistics: Posters", |
| "volume": "", |
| "issue": "", |
| "pages": "241--249", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dmitry Davidov, Oren Tsur, and Ari Rappoport. 2010. Enhanced sentiment learning using twitter hashtags and smileys. In Proceedings of the 23rd Inter- national Conference on Computational Linguistics: Posters, pages 241-249. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Annotating irony in a novel italian corpus for sentiment analysis", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Gianti", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Bolioli", |
| "suffix": "" |
| }, |
| { |
| "first": "Luigi", |
| "middle": [ |
| "Di" |
| ], |
| "last": "Caro", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 4th Workshop on Corpora for Research on Emotion Sentiment and Social Signals", |
| "volume": "", |
| "issue": "", |
| "pages": "1--7", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrea Gianti, Cristina Bosco, Viviana Patti, Andrea Bolioli, and Luigi Di Caro. 2012. Annotating irony in a novel italian corpus for sentiment analysis. In Proceedings of the 4th Workshop on Corpora for Research on Emotion Sentiment and Social Signals, pages 1-7.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Twitter sentiment analysis. Final Projects from CS224N for Spring", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Go", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Richa", |
| "middle": [], |
| "last": "Bhayani", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Go, Lei Huang, and Richa Bhayani. 2009. Twit- ter sentiment analysis. Final Projects from CS224N for Spring 2008/2009 at The Stanford Natural Lan- guage Processing Group.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Twitter sentiment analysis: The good the bad and the omg!", |
| "authors": [ |
| { |
| "first": "Efthymios", |
| "middle": [], |
| "last": "Kouloumpis", |
| "suffix": "" |
| }, |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Johanna", |
| "middle": [ |
| "D" |
| ], |
| "last": "Moore", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 5th International AAAI Conference on Weblogs and Social Media", |
| "volume": "11", |
| "issue": "", |
| "pages": "538--541", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Efthymios Kouloumpis, Theresa Wilson, and Johanna D. Moore. 2011. Twitter sentiment analysis: The good the bad and the omg!. Proceedings of the 5th International AAAI Conference on Weblogs and So- cial Media, 11:538-541.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Emotweet-28: A fine-grained emotion corpus for sentiment analysis", |
| "authors": [ |
| { |
| "first": "Jasy", |
| "middle": [], |
| "last": "Suet", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Liew", |
| "suffix": "" |
| }, |
| { |
| "first": "Howard", |
| "middle": [ |
| "R" |
| ], |
| "last": "Turtle", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [ |
| "D" |
| ], |
| "last": "Liddy", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 10th International Conference on Language Resources and Evaluation. European Language Resources Association (ELRA)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jasy Suet Yan Liew, Howard R. Turtle, and Eliza- beth D. Liddy. 2016. Emotweet-28: A fine-grained emotion corpus for sentiment analysis. In Proceed- ings of the 10th International Conference on Lan- guage Resources and Evaluation. European Lan- guage Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Sentiment analysis and opinion mining", |
| "authors": [ |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Synthesis Lectures on Human Language Technologies", |
| "volume": "5", |
| "issue": "1", |
| "pages": "1--167", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bing Liu. 2012. Sentiment analysis and opinion min- ing. Synthesis Lectures on Human Language Tech- nologies, 5(1):1-167.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "#emotional tweets", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 1st Joint Conference on Lexical and Computational Semantics", |
| "volume": "1", |
| "issue": "", |
| "pages": "246--255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad. 2012. #emotional tweets. In Proceedings of the 1st Joint Conference on Lexical and Computational Semantics -Volume 1: Proceed- ings of the Main Conference and the Shared Task, and Volume 2: Proceedings of the 6th International Workshop on Semantic Evaluation, pages 246-255. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Multilingual twitter sentiment classification: The role of human annotators", |
| "authors": [ |
| { |
| "first": "Igor", |
| "middle": [], |
| "last": "Mozeti", |
| "suffix": "" |
| }, |
| { |
| "first": "Miha", |
| "middle": [], |
| "last": "Grar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jasmina", |
| "middle": [], |
| "last": "Smailovi", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "PLOS ONE", |
| "volume": "11", |
| "issue": "5", |
| "pages": "1--26", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Igor Mozeti, Miha Grar, and Jasmina Smailovi. 2016. Multilingual twitter sentiment classification: The role of human annotators. PLOS ONE, 11(5):1-26.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Semeval-2013 task 2: Sentiment analysis in twitter. Proceedings of the 7th International Workshop on Semantic Evaluation", |
| "authors": [ |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "Zornitsa", |
| "middle": [], |
| "last": "Kozareva", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "312--320", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Preslav Nakov, Sara Rosenthal, Zornitsa Kozareva, Alan Ritter, and Theresa Wilson. 2013. Semeval- 2013 task 2: Sentiment analysis in twitter. Proceed- ings of the 7th International Workshop on Semantic Evaluation, pages 312-320.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Semeval-2016 task 4: Sentiment analysis in twitter", |
| "authors": [ |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabrizio", |
| "middle": [], |
| "last": "Sebastiani", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 10th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "1--18", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Preslav Nakov, Alan Ritter, Sara Rosenthal, Fabrizio Sebastiani, and Veselin Stoyanov. 2016. Semeval- 2016 task 4: Sentiment analysis in twitter. In Proceedings of the 10th International Workshop on Semantic Evaluation, pages 1-18. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Borut Sluban, and Igor Mozeti\u010d", |
| "authors": [ |
| { |
| "first": "Petra", |
| "middle": [ |
| "Kralj" |
| ], |
| "last": "Novak", |
| "suffix": "" |
| }, |
| { |
| "first": "Jasmina", |
| "middle": [], |
| "last": "Smailovi\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "PLOS ONE", |
| "volume": "10", |
| "issue": "12", |
| "pages": "1--22", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Petra Kralj Novak, Jasmina Smailovi\u0107, Borut Sluban, and Igor Mozeti\u010d. 2015. Sentiment of emojis. PLOS ONE, 10(12):1-22.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Twitter as a corpus for sentiment analysis and opinion mining", |
| "authors": [ |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Pak", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Paroubek", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 7th International Conference on Language Resources and Evaluation. European Language Resources Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander Pak and Patrick Paroubek. 2010. Twitter as a corpus for sentiment analysis and opinion mining. In Proceedings of the 7th International Conference on Language Resources and Evaluation. European Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Opinion mining and sentiment analysis. Foundations and Trends in Information Retrieval", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "1--135", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Pang and Lillian Lee. 2008. Opinion mining and sentiment analysis. Foundations and Trends in In- formation Retrieval, 2(1-2):1-135.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Effect size in single-case research: A review of nine nonoverlap techniques", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [ |
| "I" |
| ], |
| "last": "Parker", |
| "suffix": "" |
| }, |
| { |
| "first": "Kimberly", |
| "middle": [ |
| "J" |
| ], |
| "last": "Vannest", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "L" |
| ], |
| "last": "Davis", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Behavior Modification", |
| "volume": "35", |
| "issue": "4", |
| "pages": "303--322", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard I. Parker, Kimberly J. Vannest, and John L. Davis. 2011. Effect size in single-case research: A review of nine nonoverlap techniques. Behavior Modification, 35(4):303-322.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "The edinburgh twitter corpus", |
| "authors": [ |
| { |
| "first": "Sa\u0161a", |
| "middle": [], |
| "last": "Petrovi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Miles", |
| "middle": [], |
| "last": "Osborne", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Lavrenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the NAACL HLT 2010 Workshop on Computational Linguistics in a World of Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "25--26", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sa\u0161a Petrovi\u0107, Miles Osborne, and Victor Lavrenko. 2010. The edinburgh twitter corpus. In Proceedings of the NAACL HLT 2010 Workshop on Computa- tional Linguistics in a World of Social Media, pages 25-26. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Using emoticons to reduce dependency in machine learning techniques for sentiment classification", |
| "authors": [ |
| { |
| "first": "Jonathon", |
| "middle": [], |
| "last": "Read", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the ACL Student Research Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "43--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathon Read. 2005. Using emoticons to reduce de- pendency in machine learning techniques for senti- ment classification. In Proceedings of the ACL Stu- dent Research Workshop, pages 43-48. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A multidimensional approach for detecting irony in twitter. Language Resources and Evaluation", |
| "authors": [ |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Reyes", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| }, |
| { |
| "first": "Tony", |
| "middle": [], |
| "last": "Veale", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "47", |
| "issue": "", |
| "pages": "239--268", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antonio Reyes, Paolo Rosso, and Tony Veale. 2013. A multidimensional approach for detecting irony in twitter. Language Resources and Evaluation, 47(1):239-268.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Empatweet: Annotating and detecting emotions on twitter", |
| "authors": [ |
| { |
| "first": "Kirk", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "A" |
| ], |
| "last": "Roach", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Josh", |
| "middle": [], |
| "last": "Guthrie", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanda", |
| "middle": [ |
| "M" |
| ], |
| "last": "Harabagiu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 8th International Conference on Language Resources and Evaluation. European Language Resources Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kirk Roberts, Michael A. Roach, Joseph Johnson, Josh Guthrie, and Sanda M. Harabagiu. 2012. Em- patweet: Annotating and detecting emotions on twit- ter. In Proceedings of the 8th International Confer- ence on Language Resources and Evaluation. Euro- pean Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Semeval-2014 task 9: Sentiment analysis in twitter", |
| "authors": [ |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 8th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "73--80", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sara Rosenthal, Alan Ritter, Preslav Nakov, and Veselin Stoyanov. 2014. Semeval-2014 task 9: Sen- timent analysis in twitter. In Proceedings of the 8th International Workshop on Semantic Evaluation, pages 73-80. Association for Computational Lin- guistics and Dublin City University.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Harnessing twitter \"big data\" for automatic emotion identification", |
| "authors": [ |
| { |
| "first": "Wenbo", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Krishnaprasad", |
| "middle": [], |
| "last": "Thirunarayan", |
| "suffix": "" |
| }, |
| { |
| "first": "Amit", |
| "middle": [ |
| "P" |
| ], |
| "last": "Sheth", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 ASE/IEEE International Conference on Social Computing and 2012 ASE/IEEE International Conference on Privacy, Security, Risk and Trust", |
| "volume": "", |
| "issue": "", |
| "pages": "587--592", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenbo Wang, Lu Chen, Krishnaprasad Thirunarayan, and Amit P. Sheth. 2012. Harnessing twitter \"big data\" for automatic emotion identification. In Pro- ceedings of the 2012 ASE/IEEE International Con- ference on Social Computing and 2012 ASE/IEEE International Conference on Privacy, Security, Risk and Trust, pages 587-592. IEEE Computer Society.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "On verbal irony. Irony in language and thought", |
| "authors": [ |
| { |
| "first": "Deirdre", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Sperber", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "35--56", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deirdre Wilson and Dan Sperber. 2007. On verbal irony. Irony in language and thought, pages 35-56.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "content": "<table><tr><td/><td>A1</td><td>A2</td><td>A3</td></tr><tr><td>Subjective</td><td colspan=\"3\">0.671 0.748 0.657</td></tr><tr><td>Objective</td><td colspan=\"3\">0.330 0.252 0.343</td></tr><tr><td>None</td><td colspan=\"3\">0.330 0.252 0.343</td></tr><tr><td>Positive</td><td colspan=\"3\">0.509 0.476 0.426</td></tr><tr><td>Neutral</td><td colspan=\"3\">0.038 0.146 0.131</td></tr><tr><td>Negative</td><td colspan=\"3\">0.123 0.126 0.100</td></tr><tr><td>None</td><td colspan=\"3\">0.330 0.252 0.343</td></tr><tr><td>Explicit</td><td colspan=\"3\">0.254 0.512 0.416</td></tr><tr><td>Implicit</td><td colspan=\"3\">0.416 0.236 0.242</td></tr><tr><td>Not Ironic</td><td colspan=\"3\">0.980 0.988 0.971</td></tr><tr><td>Ironic</td><td colspan=\"3\">0.020 0.012 0.029</td></tr><tr><td>None</td><td colspan=\"3\">0.374 0.355 0.507</td></tr><tr><td>Joy</td><td colspan=\"3\">0.317 0.328 0.243</td></tr><tr><td colspan=\"4\">Anticipation 0.144 0.108 0.078</td></tr><tr><td>Disgust</td><td colspan=\"3\">0.070 0.071 0.047</td></tr><tr><td>Surprise</td><td colspan=\"3\">0.038 0.024 0.038</td></tr><tr><td>Sadness</td><td colspan=\"3\">0.035 0.044 0.036</td></tr><tr><td>Anger</td><td colspan=\"3\">0.014 0.034 0.028</td></tr><tr><td>Trust</td><td colspan=\"3\">0.008 0.034 0.022</td></tr><tr><td>Fear</td><td colspan=\"3\">0.001 0.001 0.002</td></tr></table>", |
| "text": "Label distribution per annotator", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF1": { |
| "content": "<table><tr><td/><td>A1</td><td>A2</td><td>A3</td></tr><tr><td>Topic</td><td colspan=\"3\">0.141 0.129 0.095</td></tr><tr><td>Positive</td><td colspan=\"3\">0.559 0.601 0.593</td></tr><tr><td>Negative</td><td colspan=\"3\">0.141 0.187 0.173</td></tr><tr><td>Neutral</td><td colspan=\"3\">0.160 0.083 0.139</td></tr></table>", |
| "text": "Emojii distribution", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td/><td>Topic</td><td>0.127 0.476</td></tr><tr><td>Multi-View</td><td colspan=\"2\">Negative 0.079 0.143</td></tr><tr><td>Sentiment Corpus</td><td>Neutral</td><td>0.111 0.190</td></tr><tr><td/><td>Positive</td><td>0.683 0.190</td></tr><tr><td>Emoji Sentiment Ranking (Novak et al., 2015)</td><td colspan=\"2\">Negative 0.124 0.493 Neutral 0.613 0.209 Positive 0.263 0.298</td></tr></table>", |
| "text": "Emoji label distribution", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF3": { |
| "content": "<table><tr><td/><td colspan=\"5\">Subjective/Objective Sentiment Polarity Implicit/Explicit Irony Emotion</td></tr><tr><td>A1 vs A2</td><td>0.606</td><td>0.598</td><td>0.354</td><td>0.949</td><td>0.590</td></tr><tr><td>A2 vs A3</td><td>0.670</td><td>0.596</td><td>0.476</td><td>0.923</td><td>0.601</td></tr><tr><td>A1 vs A3</td><td>0.592</td><td>0.585</td><td>0.416</td><td>0.912</td><td>0.551</td></tr></table>", |
| "text": "Inter-agreement (PABAK-OS)", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF4": { |
| "content": "<table><tr><td/><td/><td colspan=\"3\">: Self-agreement (PABAK-OS)</td><td/></tr><tr><td/><td/><td colspan=\"5\">Subjective/Objective Sentiment Polarity Implicit/Explicit Irony Emotion</td></tr><tr><td/><td>A1</td><td>0.920</td><td>0.920</td><td>0.640</td><td>1.000</td><td>0.820</td></tr><tr><td/><td>A2</td><td>0.878</td><td>0.867</td><td>0.670</td><td>0.960</td><td>0.865</td></tr><tr><td/><td>A3</td><td>1.000</td><td>0.920</td><td>0.850</td><td>0.878</td><td>0.820</td></tr><tr><td colspan=\"4\">variability among human perceptions. This inter-</td><td/><td/></tr><tr><td colspan=\"3\">agreement measure can be summarized as:</td><td/><td/><td/></tr><tr><td>k =</td><td colspan=\"2\">observed agreement \u2212 chanche agreement 1 \u2212 chance agreement</td><td>(1)</td><td/><td/></tr></table>", |
| "text": "", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF5": { |
| "content": "<table><tr><td/><td>Disagreement (irony)</td></tr><tr><td>A1 vs A2</td><td>76</td></tr><tr><td>A2 vs A3</td><td>114</td></tr><tr><td>A1 vs A3</td><td>132</td></tr></table>", |
| "text": "Count of disagreement on the irony label", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |