| { |
| "paper_id": "S14-2004", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:32:18.387427Z" |
| }, |
| "title": "SemEval-2014 Task 4: Aspect Based Sentiment Analysis", |
| "authors": [ |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Pontiki", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Institute for Language and Speech Processing, \"Athena\" Research Center", |
| "location": {} |
| }, |
| "email": "mpontiki@ilsp.gr" |
| }, |
| { |
| "first": "Haris", |
| "middle": [], |
| "last": "Papageorgiou", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Dimitrios", |
| "middle": [], |
| "last": "Galanis", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "galanisd@ilsp.gr" |
| }, |
| { |
| "first": "Ion", |
| "middle": [], |
| "last": "Androutsopoulos", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Economics", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Pavlopoulos", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Athens University of Economics", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Suresh", |
| "middle": [], |
| "last": "Manandhar", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of York", |
| "location": {} |
| }, |
| "email": "suresh@cs.york.ac.uk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Sentiment analysis is increasingly viewed as a vital task both from an academic and a commercial standpoint. The majority of current approaches, however, attempt to detect the overall polarity of a sentence, paragraph, or text span, irrespective of the entities mentioned (e.g., laptops) and their aspects (e.g., battery, screen). SemEval-2014 Task 4 aimed to foster research in the field of aspect-based sentiment analysis, where the goal is to identify the aspects of given target entities and the sentiment expressed for each aspect. The task provided datasets containing manually annotated reviews of restaurants and laptops, as well as a common evaluation procedure. It attracted 163 submissions from 32 teams.", |
| "pdf_parse": { |
| "paper_id": "S14-2004", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Sentiment analysis is increasingly viewed as a vital task both from an academic and a commercial standpoint. The majority of current approaches, however, attempt to detect the overall polarity of a sentence, paragraph, or text span, irrespective of the entities mentioned (e.g., laptops) and their aspects (e.g., battery, screen). SemEval-2014 Task 4 aimed to foster research in the field of aspect-based sentiment analysis, where the goal is to identify the aspects of given target entities and the sentiment expressed for each aspect. The task provided datasets containing manually annotated reviews of restaurants and laptops, as well as a common evaluation procedure. It attracted 163 submissions from 32 teams.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "With the proliferation of user-generated content on the web, interest in mining sentiment and opinions in text has grown rapidly, both in academia and business. Early work in sentiment analysis mainly aimed to detect the overall polarity (e.g., positive or negative) of a given text or text span (Pang et al., 2002; Turney, 2002) . However, the need for a more fine-grained approach, such as aspect-based (or 'feature-based') sentiment analysis (ABSA), soon became apparent (Liu, 2012) . For example, laptop reviews not only express the overall sentiment about a specific model (e.g., \"This is a great This work is licensed under a Creative Commons Attribution 4.0 International Licence. Page numbers and proceedings footer are added by the organisers. Licence details: http://creativecommons.org/licenses/by/4.0/ laptop\"), but also sentiments relating to its specific aspects, such as the hardware, software, price, etc. Subsequently, a review may convey opposing sentiments (e.g., \"Its performance is ideal, I wish I could say the same about the price\") or objective information (e.g., \"This one still has the CD slot\") for different aspects of an entity.", |
| "cite_spans": [ |
| { |
| "start": 296, |
| "end": 315, |
| "text": "(Pang et al., 2002;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 316, |
| "end": 329, |
| "text": "Turney, 2002)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 474, |
| "end": 485, |
| "text": "(Liu, 2012)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "ABSA is critical in mining and summarizing opinions from on-line reviews (Gamon et al., 2005; Titov and McDonald, 2008; Hu and Liu, 2004a; Popescu and Etzioni, 2005) . In this setting, ABSA aims to identify the aspects of the entities being reviewed and to determine the sentiment the reviewers express for each aspect. Within the last decade, several ABSA systems of this kind have been developed for movie reviews (Thet et al., 2010) , customer reviews of electronic products like digital cameras (Hu and Liu, 2004a) or netbook computers (Brody and Elhadad, 2010) , services (Long et al., 2010) , and restaurants (Ganu et al., 2009; Brody and Elhadad, 2010) .", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 93, |
| "text": "(Gamon et al., 2005;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 94, |
| "end": 119, |
| "text": "Titov and McDonald, 2008;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 120, |
| "end": 138, |
| "text": "Hu and Liu, 2004a;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 139, |
| "end": 165, |
| "text": "Popescu and Etzioni, 2005)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 416, |
| "end": 435, |
| "text": "(Thet et al., 2010)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 499, |
| "end": 518, |
| "text": "(Hu and Liu, 2004a)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 540, |
| "end": 565, |
| "text": "(Brody and Elhadad, 2010)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 577, |
| "end": 596, |
| "text": "(Long et al., 2010)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 615, |
| "end": 634, |
| "text": "(Ganu et al., 2009;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 635, |
| "end": 659, |
| "text": "Brody and Elhadad, 2010)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Previous publicly available ABSA benchmark datasets adopt different annotation schemes within different tasks. The restaurant reviews dataset of Ganu et al. (2009) uses six coarse-grained aspects (e.g., FOOD, PRICE, SERVICE) and four overall sentence polarity labels (positive, negative, conflict, neutral). Each sentence is assigned one or more aspects together with a polarity label for each aspect; for example, \"The restaurant was expensive, but the menu was great.\" would be assigned the aspect PRICE with negative polarity and FOOD with positive polarity. In the product reviews dataset of Hu and Liu (2004a; 2004b) , aspect terms, i.e., terms naming aspects (e.g., 'radio', 'voice dialing') together with strength scores (e.g., 'radio': +2, 'voice dialing': \u22123) are pro-vided. No predefined inventory of aspects is provided, unlike the dataset of Ganu et al.", |
| "cite_spans": [ |
| { |
| "start": 145, |
| "end": 163, |
| "text": "Ganu et al. (2009)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 203, |
| "end": 224, |
| "text": "FOOD, PRICE, SERVICE)", |
| "ref_id": null |
| }, |
| { |
| "start": 596, |
| "end": 614, |
| "text": "Hu and Liu (2004a;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 615, |
| "end": 621, |
| "text": "2004b)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The SemEval-2014 ABSA Task is based on laptop and restaurant reviews and consists of four subtasks (see Section 2). Participants were free to participate in a subset of subtasks and the domains (laptops or restaurants) of their choice.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "For the first two subtasks (SB1, SB2), datasets on both domains (restaurants, laptops) were provided. For the last two subtasks (SB3, SB4), datasets only for the restaurant reviews were provided.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Aspect term extraction (SB1): Given a set of review sentences, the task is to identify all aspect terms present in each sentence (e.g., 'wine', 'waiter', 'appetizer', 'price', 'food') . We require all the aspect terms to be identified, including aspect terms for which no sentiment is expressed (neutral polarity). These will be useful for constructing an ontology of aspect terms and to identify frequently discussed aspects.", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 183, |
| "text": "'wine', 'waiter', 'appetizer', 'price', 'food')", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Aspect term polarity (SB2): In this subtask, we assume that the aspect terms are given (as described in SB1) and the task is to determine the polarity of each aspect term (positive, negative, conflict, or neutral). The conflict label applies when both positive and negative sentiment is expressed about an aspect term (e.g., \"Certainly not the best sushi in New York, however, it is always fresh\"). An alternative would have been to tag the aspect term in these cases with the dominant polarity, but this in turn would be difficult to agree on.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Aspect category detection (SB3): Given a predefined set of aspect categories (e.g., PRICE, FOOD) and a set of review sentences (but without any annotations of aspect terms and their polarities), the task is to identify the aspect categories discussed in each sentence. Aspect categories are typically coarser than the aspect terms as defined in SB1, and they do not necessarily occur as terms in the sentences. For example, in \"Delicious but expensive\", the aspect categories FOOD and PRICE are not instantiated through specific aspect terms, but are only inferred through the adjectives 'delicious' and 'expensive'. SB1 and SB3 were treated as separate subtasks, thus no information linking aspect terms to aspect categories was provided.", |
| "cite_spans": [ |
| { |
| "start": 84, |
| "end": 96, |
| "text": "PRICE, FOOD)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Aspect category polarity (SB4): For this subtask, aspect categories for each review sentence are provided. The goal is to determine the polar-ity (positive, negative, conflict, or neutral) of each aspect category discussed in each sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Subtasks SB1 and SB2 are useful in cases where no predefined inventory of aspect categories is available. In these cases, frequently discussed aspect terms of the entity can be identified together with their overall sentiment polarities. We hope to include an additional aspect term aggregation subtask in future (Pavlopoulos and Androutsopoulos, 2014b) to cluster near-synonymous (e.g., 'money', 'price', 'cost') or related aspect terms (e.g., 'design', 'color', 'feeling') together with their averaged sentiment scores as shown in Subtasks SB3 and SB4 are useful when a predefined inventory of (coarse) aspect categories is available. A table like the one of Fig. 1 can then also be generated, but this time using the most frequent aspect categories to label the rows, with stars showing the proportion of reviews expressing positive vs. negative opinions for each aspect category.", |
| "cite_spans": [ |
| { |
| "start": 313, |
| "end": 353, |
| "text": "(Pavlopoulos and Androutsopoulos, 2014b)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 661, |
| "end": 667, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The training and test data sizes are provided in Table 1. The restaurants training data, consisting of 3041 English sentences, is a subset of the dataset from Ganu et al. (2009) , which included annotations for coarse aspect categories (as in SB3) and overall sentence polarities. We added annotations for aspect terms occurring in the sentences (SB1), aspect term polarities (SB2), and aspect category polarities (SB4). Additional restaurant reviews were collected and annotated (from scratch) in the same manner and used as test data (800 sentences). The laptops dataset contains 3845 English sentences extracted from laptop custumer reviews. Human annotators tagged the aspect terms (SB1) and their polarities (SB2); 3045 sentences were used for training and 800 for testing (evaluation).", |
| "cite_spans": [ |
| { |
| "start": 159, |
| "end": 177, |
| "text": "Ganu et al. (2009)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Collection", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Train Test Total Restaurants 3041 800 3841 Laptops 3045 800 3845 Total 6086 1600 7686 Table 1 : Sizes (sentences) of the datasets.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 86, |
| "end": 93, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Domain", |
| "sec_num": null |
| }, |
| { |
| "text": "For a given target entity (a restaurant or a laptop) being reviewed, the annotators were asked to provide two types of information: aspect terms (SB1) and aspect term polarities (SB2). For the restaurants dataset, two additional annotation layers were added: aspect category (SB3) and aspect category polarity (SB4). The annotators used BRAT (Stenetorp et al., 2012) , a web-based annotation tool, which was configured appropriately for the needs of the ABSA task. 1 Figure 2 shows an annotated sentence in BRAT, as viewed by the annotators.", |
| "cite_spans": [ |
| { |
| "start": 342, |
| "end": 366, |
| "text": "(Stenetorp et al., 2012)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 467, |
| "end": 475, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Process", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Stage 1: Aspect terms and polarities. During a first annotation stage, the annotators tagged all the single or multiword terms that named particular aspects of the target entity (e.g., \"I liked the service and the staff, but not the food\" \u2192 {'service', 'staff', 'food'}, \"The hard disk is very noisy\" \u2192 {'hard disk'}). They were asked to tag only aspect terms explicitly naming particular aspects (e.g., \"everything about it\" or \"it's expensive\" do not name particular aspects). The aspect terms were annotated as they appeared, even if misspelled (e.g., 'warrenty' instead of 'warranty'). Each identified aspect term also had to be assigned a polarity label (positive, negative, neutral, conflict). For example, \"I hated their fajitas, but their salads were great\" \u2192 {'fajitas': negative, 'salads': positive}, \"The hard disk is very noisy\" \u2192 {'hard disk': negative}.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Process", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Each sentence of the two datasets was annotated by two annotators, a graduate student (annotator A) and an expert linguist (annotator B). Initially, two subsets of sentences (300 from each dataset) were tagged by annotator A and the annotations were inspected and validated by annotator B. The disagreements between the two annotators were confined to borderline cases. Taking into account the types of these disagreements (discussed below), annotator A was provided with additional guidelines and tagged the remainder of the sentences in both datasets. 2 When A was not confident, a decision was made collaboratively with B. When A and B disagreed, a decision was made collaboratively by them and a third expert annotator. Most disagreements fall into one of the following three types: Polarity ambiguity: In several sentences, it was unclear if the reviewer expressed positive or negative opinion, or no opinion at all (just reporting a fact), due to lack of context. For example, in \"12.44 seconds boot time\" it is unclear if the reviewer expresses a positive, negative, or no opinion about the aspect term 'boot time'. In future challenges, it would be better to allow the annotators (and the participating systems) to consider the entire review instead of each sentence in isolation. Multi-word aspect term boundaries: In several cases, the annotators disagreed on the exact boundaries of multi-word aspect terms when they appeared in conjunctions or disjunctions (e.g., \"selection of meats and seafoods\", \"noodle and rices dishes\", \"school or office use\"). In such cases, we asked the annotators to tag as a single aspect term the maximal noun phrase (the entire conjunction or disjunction). Other disagreements concerned the extent of the aspect terms when adjectives that may or may not have a subjective meaning were also present. For example, if 'large' in \"large whole shrimp\" is part of the dish name, then the guidelines require the adjective to be included in the aspect term; otherwise (e.g., in \"large portions\") 'large' is a subjectivity indicator not to be included in the aspect term. Despite the guidelines, in some cases it was difficult to isolate and tag the exact aspect term, because of intervening words, punctuation, or long-term dependencies. Aspect term vs. reference to target entity: In some cases, it was unclear if a noun or noun phrase was used as the aspect term or if it referred to the entity being reviewed as whole. In \"This place is awesome\", for example, 'place' most probably refers to the restaurant as a whole (hence, it should not be tagged as an aspect term), but in \"Cozy Figure 2 : A sentence in the BRAT tool, annotated with four aspect terms ('appetizers', 'salads', 'steak', 'pasta') and one aspect category (FOOD). For aspect categories, the whole sentence is tagged. place and good pizza\" it probably refers to the ambience of the restaurant. A broader context would again help in some of these cases.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 2619, |
| "end": 2627, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Process", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We note that laptop reviews often evaluate each laptop as a whole, rather than expressing opinions about particular aspects. Furthermore, when they express opinions about particular aspects, they often do so by using adjectives that refer implicitly to aspects (e.g., 'expensive', 'heavy'), rather than using explicit aspect terms (e.g., 'cost', 'weight'); the annotators were instructed to tag only explicit aspect terms, not adjectives implicitly referring to aspects. By contrast, restaurant reviews contain many more aspect terms ( Another difference between the two datasets is that the neutral class is much more frequent in (the aspect terms of) laptops, since laptop reviews often mention features without expressing any (clear) sentiment (e.g., \"the latest version does not have a disc drive\"). Nevertheless, the positive class is the majority in both datasets, but it is much more frequent in restaurants ( Table 2 ). The majority of the aspect terms are single-words in both datasets (2148 in laptops, 4827 in restaurants, out of 3012 and 4827 total aspect terms, respectively).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 917, |
| "end": 924, |
| "text": "Table 2", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Process", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Stage 2: Aspect categories and polarities. In this task, each sentence needs to be tagged with the aspect categories discussed in the sentence. The aspect categories are FOOD, SERVICE, PRICE, AMBIENCE (the atmosphere and environment of 3 We count aspect term occurrences, not distinct terms. a restaurant), and ANECDOTES/MISCELLANEOUS (sentences not belonging in any of the previous aspect categories). 4 For example, \"The restaurant was expensive, but the menu was great\" is assigned the aspect categories PRICE and FOOD. Additionally, a polarity (positive, negative, conflict, neutral) for each aspect category should be provided (e.g., \"The restaurant was expensive, but the menu was great\" \u2192 {PRICE: negative, FOOD: positive}.", |
| "cite_spans": [ |
| { |
| "start": 236, |
| "end": 237, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Process", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "One annotator validated the existing aspect category annotations of the corpus of Ganu et al. (2009) . The agreement with the existing annotations was 92% measured as average F 1 . Most disagreements concerned additions of missing aspect category annotations. Furthermore, the same annotator validated and corrected (if needed) the existing polarity labels per aspect category annotation. The agreement for the polarity labels was 87% in terms of accuracy and it was measured only on the common aspect category annotations. The additional 800 sentences (not present in Ganu et al.'s dataset) were used for testing and were annotated from scratch in the same manner. The distribution of the polarity classes per category is presented in Table 3 . Again, 'positive' is the majority polarity class while the dominant aspect category is FOOD in both the training and test restaurant sentences.", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 100, |
| "text": "Ganu et al. (2009)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 736, |
| "end": 743, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Process", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Determining the aspect categories of the sentences and their polarities (Stage 2) was an easier task compared to detecting aspect terms and their polarities (Stage 1). The annotators needed less time in Stage 2 and it was easier to reach agreement. Exceptions were some sentences where it was difficult to decide if the categories AMBIENCE or ANECDOTES/MISCELLANEOUS applied (e.g., \"One of my Fav spots in the city\"). We instructed the annotators to classify those sentences only in ANECDOTES/MISCELLANEOUS, if they conveyed Test FOOD 867 302 209 69 66 16 90 31 1232 418 PRICE 179 51 115 28 17 3 10 1 321 83 SERVICE 324 101 218 63 35 5 20 3 597 172 AMBIENCE 263 76 98 21 47 13 23 8 431 general views about a restaurant, without explicitly referring to its atmosphere or environment.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 525, |
| "end": 722, |
| "text": "Test FOOD 867 302 209 69 66 16 90 31 1232 418 PRICE 179 51 115 28 17 3 10 1 321 83 SERVICE 324 101 218 63 35 5 20 3 597 172 AMBIENCE 263 76 98 21 47 13 23 8 431", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Process", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The datasets of the ABSA task were provided in an XML format (see Fig. 3 ). They are available with a non commercial, no redistribution license through META-SHARE, a repository devoted to the sharing and dissemination of language resources (Piperidis, 2012). 5", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 66, |
| "end": 72, |
| "text": "Fig. 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Format and Availability of the Datasets", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The evaluation of the ABSA task ran in two phases. In Phase A, the participants were asked to return the aspect terms (SB1) and aspect categories (SB3) for the provided test datasets. Subsequently, in Phase B, the participants were given the gold aspect terms and aspect categories (as in Fig. 3 ) for the sentences of Phase A and they were asked to return the polarities of the aspect terms (SB2) and the polarities of the aspect categories of each sentence (SB4). 6 Each participating team was allowed to submit up to two runs per subtask and domain (restaurants, laptops) in each phase; one constrained (C), where only the provided training data and other resources (e.g., publicly available lexica) excluding additional annotated sentences could be used, and one unconstrained (U), where additional data of any kind could be used for training. In the latter case, the teams had to report the resources they used. To evaluate aspect term extraction (SB1) and aspect category detection (SB3) in Phase A, we used 5 The datasets can be downloaded from http:// metashare.ilsp.gr:8080/. META-SHARE (http: //www.meta-share.org/) was implemented in the framework of the META-NET Network of Excellence (http://www.meta-net.eu/).", |
| "cite_spans": [ |
| { |
| "start": 1014, |
| "end": 1015, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 289, |
| "end": 295, |
| "text": "Fig. 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation Measures and Baselines", |
| "sec_num": "4" |
| }, |
| { |
| "text": "6 Phase A ran from 9:00 GMT, March 24 to 21:00 GMT, March 25, 2014. Phase B ran from 9:00 GMT, March 27 to 17:00 GMT, March 29, 2014. the F 1 measure, defined as usually:", |
| "cite_spans": [ |
| { |
| "start": 113, |
| "end": 133, |
| "text": "GMT, March 29, 2014.", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Measures and Baselines", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "F 1 = 2 \u2022 P \u2022 R P + R", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Evaluation Measures and Baselines", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where precision (P ) and recall (R) are defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Measures and Baselines", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P = |S \u2229 G| |S| , R = |S \u2229 G| |G|", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Evaluation Measures and Baselines", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Here S is the set of aspect term or aspect category annotations (in SB1 and SB3, respectively) that a system returned for all the test sentences (of a domain), and G is the set of the gold (correct) aspect term or aspect category annotations. To evaluate aspect term polarity (SB2) and aspect category polarity (SB4) detection in Phase B, we calculated the accuracy of each system, defined as the number of correctly predicted aspect term or aspect category polarity labels, respectively, divided by the total number of aspect term or aspect category annotations. Recall that we used the gold aspect term and category annotations in Phase B.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Measures and Baselines", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We provided four baselines, one per subtask: 7 Aspect term extraction (SB1) baseline: A sequence of tokens is tagged as an aspect term in a test sentence (of a domain), if it is listed in a dictionary that contains all the aspect terms of the training sentences (of the same domain). Aspect term polarity (SB2) baseline: For each aspect term t in a test sentence s (of a particular domain), this baseline checks if t had been encountered in the training sentences (of the domain). If so, it retrieves the k most similar to s training sentences (of the domain), and assigns to the aspect term t the most frequent polarity it had in the k sentences. Otherwise, if t had not been encountered in the training sentences, it is assigned the most frequent aspect term polarity label of the <sentence id=\"11351725#582163#9\"> <text>Our waiter was friendly and it is a shame that he didnt have a supportive staff to work with.</text> <aspectTerms> <aspectTerm term=\"waiter\" polarity=\"positive\" from=\"4\" to=\"10\"/> <aspectTerm term=\"staff\" polarity=\"negative\" from=\"74\" to=\"79\"/> </aspectTerms> <aspectCategories> <aspectCategory category=\"service\" polarity=\"conflict\"/> </aspectCategories> </sentence> Figure 3 : An XML snippet that corresponds to the annotated sentence of Fig. 2. training set. The similarity between two sentences is measured as the Dice coefficient of the sets of (distinct) words of the two sentences. For example, the similarity between \"this is a demo\" and \"that is yet another demo\" is 2\u20222 4+5 = 0.44. Aspect category extraction (SB3) baseline: For every test sentence s, the k most similar to s training sentences are retrieved (as in the SB2 baseline). Then, s is assigned the m most frequent aspect category labels of the k retrieved sentences; m is the most frequent number of aspect category labels per sentence among the k sentences. Aspect category polarity (SB4): This baseline assigns to each aspect category c of a test sentence s the most frequent polarity label that c had in the k most similar to s training sentences (of the same domain), considering only training sentences that have the aspect category label c. Sentence similarity is computed as in the SB2 baseline.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1191, |
| "end": 1199, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 1263, |
| "end": 1270, |
| "text": "Fig. 2.", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation Measures and Baselines", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For subtasks SB2 and SB4, we also use a majority baseline that assigns the most frequent polarity (in the training data) to all the aspect terms and aspect categories. The scores of all the baselines and systems are presented in Tables 4-6.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Measures and Baselines", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The ABSA task attracted 32 teams in total and 165 submissions (systems), 76 for phase A and 89 for phase B. Based on the human-annotation experience, the expectations were that systems would perform better in Phase B (SB3, SB4, involving aspect categories) than in Phase A (SB1, SB2, involving aspect terms). The evaluation results confirmed our expectations (Tables 4-6).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The aspect term extraction subtask (SB1) attracted 24 teams for the laptops dataset and 24 teams for the restaurants dataset; consult Overall, the systems achieved significantly higher scores (+10%) in the restaurants domain, compared to laptops. The best F 1 score (74.55%) for laptops was achieved by the IHS RD. team, which relied on Conditional Random Fields (CRF) with features extracted using named entity recognition, POS tagging, parsing, and semantic analysis. The IHS RD. team used additional reviews from Amazon and Epinions (without annotated terms) to learn the sentiment orientation of words and they trained their CRF on the union of the restaurant and laptop training data that we provided; the same trained CRF classifier was then used in both domains.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results of Phase A", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The second system, the unconstrained system of DLIREC, also uses a CRF, along with POS and dependency tree based features. It also uses features derived from the aspect terms of the training data and clusters created from additional re-views from YELP and Amazon. In the restaurants domain, the unconstrained system of DLIREC ranked first with an F 1 of 84.01%, but the best unconstrained system, that of XRCE, was very close (83.98%). The XRCE system relies on a parser to extract syntactic/semantic dependencies (e.g., 'dissapointed'-'food'). For aspect term extraction, the parser's vocabulary was enriched with the aspect terms of the training data and a term list extracted from Wikipedia and Wordnet. A set of grammar rules was also added to detect multiword terms and associate them with the corresponding aspect category (e.g., FOOD, PRICE) .", |
| "cite_spans": [ |
| { |
| "start": 836, |
| "end": 848, |
| "text": "FOOD, PRICE)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results of Phase A", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The aspect category extraction subtask (SB3) attracted 18 teams. As shown in Table 5 , the best score was achieved by the system of NRC-Canada (88.57%), which relied on five binary (one-vs-all) SVMs, one for each aspect category. The SVMs used features based on various types of n-grams (e.g., stemmed) and information from a lexicon learnt from YELP data, which associates aspect terms with aspect categories. The latter lexicon significantly improved F 1 . The constrained UN-ITOR system uses five SVMs with bag-of-words (BoW) features, which in the unconstrained submission are generalized using distributional vectors learnt from Opinosis and TripAdvisor data. Similarly, UWB uses a binary MaxEnt classifier for each aspect category with BoW and TF-IDF features. The unconstrained submission of UWB also uses word clusters learnt using various methods (e.g., LDA); additional features indicate which clusters the words of the sentence being classified come from. XRCE uses information identified by its syntactic parser as well as BoW features to train a logistic regression model that assigns to the sentence probabilities of belonging to each aspect category. A probability threshold, tuned on the training data, is then used to determine which categories will be assigned to the sentence.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 77, |
| "end": 84, |
| "text": "Table 5", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results of Phase A", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The aspect term polarity detection subtask (SB2) attracted 26 teams for the laptops dataset and 26 teams for the restaurants dataset. DCU and NRC-Canada had the best systems in both domains (Table 6). Their scores on the laptops dataset were identical (70.48%). On the laptops dataset, the DCU system performed slightly better (80.95% vs. 80.15%). For SB2, both NRC-Canada and DCU relied on an SVM classifier with features mainly based on n-grams, parse trees, and several out-of-domain, publicly available sentiment lexica (e.g., MPQA, SentiWordnet and Bing Liu's Opinion Lexicon). NRC-Canada also used two automatically compiled polarity lexica for restaurants and laptops, obtained from YELP and Amazon data, respectively. Furthermore, NRC-Canada showed by ablation experiments that the most useful features are those derived from the sentiment lexica. On the other hand, DCU used only publicly available lexica, which were manually adapted by filtering words that do not express sentiment in laptop and restaurant reviews (e.g., 'really') and by adding others that were missing and do express sentiment (e.g., 'mouthwatering').", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results of Phase B", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The aspect category polarity detection subtask (SB4) attracted 20 teams. NRC-Canada again had the best score (82.92%) using an SVM classifier. The same feature set as in SB2 was used, but it was further enriched to capture information related to each specific aspect category. The second team, XRCE, used information from its syntactic parser, BoW features, and an out-of-domain sentiment lexicon to train an SVM model that predicts the polarity of each given aspect category.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results of Phase B", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We provided an overview of Task 4 of SemEval-2014. The task aimed to foster research in aspectbased sentiment analysis (ABSA). We constructed and released ABSA benchmark datasets containing manually annotated reviews from two domains (restaurants, laptops). The task attracted 163 submissions from 32 teams that were evaluated in four subtasks centered around aspect terms (detecting aspect terms and their polarities) and coarser aspect categories (assigning aspect categories and aspect category polarities to sentences). The task will be repeated in SemEval-2015 with additional datasets and a domain-adaptation subtask. 8 In the future, we hope to add an aspect term aggregation subtask (Pavlopoulos and Androutsopoulos, 2014a) . Table 6 : Results for the aspect term polarity subtask (SB2). Stars indicate unconstrained systems. The \u2020 indicates a constrained system that was not trained on the in-domain training dataset (unlike the rest of the constrained systems), but on the union of the two training datasets. IITP's original submission files were corrupted; they were resent and scored after the end of the evaluation period.", |
| "cite_spans": [ |
| { |
| "start": 624, |
| "end": 625, |
| "text": "8", |
| "ref_id": null |
| }, |
| { |
| "start": 691, |
| "end": 731, |
| "text": "(Pavlopoulos and Androutsopoulos, 2014a)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 734, |
| "end": 741, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "annotation process, and Juli Bakagianni, who supported our use of the META-SHARE platform. We are also very grateful to the participants for their feedback. Maria Pontiki and Haris Papageorgiou were supported by the IS-HELLEANA (09-72-922) and the POLYTROPON (KRIPIS-GSRT, MIS: 448306) projects.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Consult http://brat.nlplab.org/ for more information about BRAT.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The guidelines are available at: http://alt.qcri. org/semeval2014/task4/data/uploads/.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In the original dataset ofGanu et al. (2009), ANECDOTES and MISCELLANEOUS were separate categories, but in practice they were difficult to distinguish and we merged them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Implementations of the baselines and further information about the baselines are available at: http://alt.qcri. org/semeval2014/task4/data/uploads/.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Consult http://alt.qcri.org/semeval2015/ task12/.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank Ioanna Lazari, who provided an initial version of the laptops dataset, Konstantina Papanikolaou, who carried out a critical part of the", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "An unsupervised aspect-sentiment model for online reviews", |
| "authors": [ |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Brody", |
| "suffix": "" |
| }, |
| { |
| "first": "Noemie", |
| "middle": [], |
| "last": "Elhadad", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "804--812", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel Brody and Noemie Elhadad. 2010. An unsu- pervised aspect-sentiment model for online reviews. In Proceedings of NAACL, pages 804-812, Los An- geles, California.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Pulse: Mining customer opinions from free text", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gamon", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Aue", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Corston-Oliver", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "K" |
| ], |
| "last": "Ringger", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "IDA", |
| "volume": "", |
| "issue": "", |
| "pages": "121--132", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Gamon, Anthony Aue, Simon Corston-Oliver, and Eric K. Ringger. 2005. Pulse: Mining customer opinions from free text. In IDA, pages 121-132, Madrid, Spain.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Beyond the stars: Improving rating predictions using review text content", |
| "authors": [ |
| { |
| "first": "Gayatree", |
| "middle": [], |
| "last": "Ganu", |
| "suffix": "" |
| }, |
| { |
| "first": "Noemie", |
| "middle": [], |
| "last": "Elhadad", |
| "suffix": "" |
| }, |
| { |
| "first": "Am\u00e9lie", |
| "middle": [], |
| "last": "Marian", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of WebDB", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gayatree Ganu, Noemie Elhadad, and Am\u00e9lie Marian. 2009. Beyond the stars: Improving rating predic- tions using review text content. In Proceedings of WebDB, Providence, Rhode Island, USA.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Mining and summarizing customer reviews", |
| "authors": [ |
| { |
| "first": "Minqing", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of KDD", |
| "volume": "", |
| "issue": "", |
| "pages": "168--177", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minqing Hu and Bing Liu. 2004a. Mining and sum- marizing customer reviews. In Proceedings of KDD, pages 168-177, Seattle, WA, USA.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Mining opinion features in customer reviews", |
| "authors": [ |
| { |
| "first": "Minqing", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "755--760", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minqing Hu and Bing Liu. 2004b. Mining opinion fea- tures in customer reviews. In Proceedings of AAAI, pages 755-760, San Jose, California.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Sentiment Analysis and Opinion Mining", |
| "authors": [ |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Synthesis Lectures on Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bing Liu. 2012. Sentiment Analysis and Opinion Min- ing. Synthesis Lectures on Human Language Tech- nologies. Morgan & Claypool Publishers.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A review selection approach for accurate feature rating estimation", |
| "authors": [ |
| { |
| "first": "Chong", |
| "middle": [], |
| "last": "Long", |
| "suffix": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of COLING (Posters)", |
| "volume": "", |
| "issue": "", |
| "pages": "766--774", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chong Long, Jie Zhang, and Xiaoyan Zhu. 2010. A review selection approach for accurate feature rating estimation. In Proceedings of COLING (Posters), pages 766-774, Beijing, China.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Thumbs up? sentiment classification using machine learning techniques", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Shivakumar", |
| "middle": [], |
| "last": "Vaithyanathan", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Pang, Lillian Lee, and Shivakumar Vaithyanathan. 2002. Thumbs up? sentiment classification us- ing machine learning techniques. In Proceedings of EMNLP, pages 79-86, Philadelphia, Pennsylvania, USA.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Aspect term extraction for sentiment analysis: New datasets, new evaluation measures and an improved unsupervised method", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Pavlopoulos", |
| "suffix": "" |
| }, |
| { |
| "first": "Ion", |
| "middle": [], |
| "last": "Androutsopoulos", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of LASM-EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "44--52", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Pavlopoulos and Ion Androutsopoulos. 2014a. Aspect term extraction for sentiment analysis: New datasets, new evaluation measures and an improved unsupervised method. In Proceedings of LASM- EACL, pages 44-52, Gothenburg, Sweden.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Multi-granular aspect aggregation in aspect-based sentiment analysis", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Pavlopoulos", |
| "suffix": "" |
| }, |
| { |
| "first": "Ion", |
| "middle": [], |
| "last": "Androutsopoulos", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "78--87", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Pavlopoulos and Ion Androutsopoulos. 2014b. Multi-granular aspect aggregation in aspect-based sentiment analysis. In Proceedings of EACL, pages 78-87, Gothenburg, Sweden.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "The META-SHARE language resources sharing infrastructure: Principles, challenges, solutions", |
| "authors": [ |
| { |
| "first": "Stelios", |
| "middle": [], |
| "last": "Piperidis", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of LREC-2012", |
| "volume": "", |
| "issue": "", |
| "pages": "36--42", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stelios Piperidis. 2012. The META-SHARE language resources sharing infrastructure: Principles, chal- lenges, solutions. In Proceedings of LREC-2012, pages 36-42, Istanbul, Turkey.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Extracting product features and opinions from reviews", |
| "authors": [ |
| { |
| "first": "Ana-Maria", |
| "middle": [], |
| "last": "Popescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of HLT/EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "339--346", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ana-Maria Popescu and Oren Etzioni. 2005. Extract- ing product features and opinions from reviews. In Proceedings of HLT/EMNLP, pages 339-346, Van- couver, British Columbia, Canada.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "BRAT: a web-based tool for NLP-assisted text annotation", |
| "authors": [ |
| { |
| "first": "Pontus", |
| "middle": [], |
| "last": "Stenetorp", |
| "suffix": "" |
| }, |
| { |
| "first": "Sampo", |
| "middle": [], |
| "last": "Pyysalo", |
| "suffix": "" |
| }, |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Topi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomoko", |
| "middle": [], |
| "last": "Ohta", |
| "suffix": "" |
| }, |
| { |
| "first": "Sophia", |
| "middle": [], |
| "last": "Ananiadou", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun'ichi", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "102--107", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pontus Stenetorp, Sampo Pyysalo, Goran Topi\u0107, Tomoko Ohta, Sophia Ananiadou, and Jun'ichi Tsu- jii. 2012. BRAT: a web-based tool for NLP-assisted text annotation. In Proceedings of EACL, pages 102-107, Avignon, France.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Aspect-based sentiment analysis of movie reviews on discussion boards", |
| "authors": [ |
| { |
| "first": "Jin-Cheon", |
| "middle": [], |
| "last": "Tun Thura Thet", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "S G" |
| ], |
| "last": "Na", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Khoo", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "J. Information Science", |
| "volume": "36", |
| "issue": "6", |
| "pages": "823--848", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tun Thura Thet, Jin-Cheon Na, and Christopher S. G. Khoo. 2010. Aspect-based sentiment analysis of movie reviews on discussion boards. J. Information Science, 36(6):823-848.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A joint model of text and aspect ratings for sentiment summarization", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Titov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [ |
| "T" |
| ], |
| "last": "Mcdonald", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "308--316", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Titov and Ryan T. McDonald. 2008. A joint model of text and aspect ratings for sentiment sum- marization. In Proceedings of ACL, pages 308-316, Columbus, Ohio, USA.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Thumbs up or thumbs down? semantic orientation applied to unsupervised classification of reviews", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Turney", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "417--424", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Turney. 2002. Thumbs up or thumbs down? se- mantic orientation applied to unsupervised classifi- cation of reviews. In Proceedings of ACL, pages 417-424, Philadelphia, Pennsylvania, USA.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Fig. 1.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "text": "Aggregated aspect terms and average sentiment polarities for a target entity.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "text": "", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td/><td/><td/><td/><td colspan=\"2\">, last column). 3</td></tr><tr><td>Dataset</td><td colspan=\"5\">Pos. Neg. Con. Neu. Tot.</td></tr><tr><td colspan=\"2\">LPT-TR 987</td><td>866</td><td>45</td><td colspan=\"2\">460 2358</td></tr><tr><td colspan=\"2\">LPT-TE 341</td><td>128</td><td>16</td><td>169</td><td>654</td></tr><tr><td colspan=\"3\">RST-TR 2164 805</td><td>91</td><td colspan=\"2\">633 3693</td></tr><tr><td colspan=\"2\">RST-TE 728</td><td>196</td><td>14</td><td colspan=\"2\">196 1134</td></tr></table>" |
| }, |
| "TABREF1": { |
| "text": "Aspect terms and their polarities per domain. LPT and RST indicate laptop and restaurant reviews, respectively. TR and TE indicate the training and test set.", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF4": { |
| "text": "Aspect categories distribution per sentiment class.", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF5": { |
| "text": "", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td colspan=\"2\">Laptops</td><td colspan=\"2\">Restaurants</td></tr><tr><td>Team</td><td>F 1</td><td>Team</td><td>F 1</td></tr><tr><td>IHS RD.</td><td colspan=\"2\">74.55 \u2020 DLIREC</td><td>84.01*</td></tr><tr><td>DLIREC</td><td colspan=\"2\">73.78* XRCE</td><td>83.98</td></tr><tr><td>DLIREC</td><td>70.4</td><td colspan=\"2\">NRC-Can. 80.18</td></tr><tr><td colspan=\"2\">NRC-Can. 68.56</td><td>UNITOR</td><td>80.09</td></tr><tr><td>UNITOR</td><td colspan=\"2\">67.95* UNITOR</td><td>79.96*</td></tr><tr><td>XRCE</td><td>67.24</td><td>IHS RD.</td><td>79.62 \u2020</td></tr><tr><td>SAP RI</td><td>66.6</td><td>UWB</td><td>79.35*</td></tr><tr><td>IITP</td><td>66.55</td><td>SeemGo</td><td>78.61</td></tr><tr><td>UNITOR</td><td>66.08</td><td>DLIREC</td><td>78.34</td></tr><tr><td>SeemGo</td><td>65.99</td><td>ECNU</td><td>78.24</td></tr><tr><td>ECNU</td><td>65.88</td><td>SAP RI</td><td>77.88</td></tr><tr><td>SNAP</td><td>62.4</td><td>UWB</td><td>76.23</td></tr><tr><td>DMIS</td><td>60.59</td><td>IITP</td><td>74.94</td></tr><tr><td>UWB</td><td>60.39</td><td>DMIS</td><td>72.73</td></tr><tr><td>JU CSE.</td><td>59.37</td><td>JU CSE.</td><td>72.34</td></tr><tr><td>lsis lif</td><td>56.97</td><td>Blinov</td><td>71.21*</td></tr><tr><td>USF</td><td>52.58</td><td>lsis lif</td><td>71.09</td></tr><tr><td>Blinov</td><td colspan=\"2\">52.07* USF</td><td>70.69</td></tr><tr><td>UFAL</td><td>48.98</td><td>EBDG</td><td>69.28*</td></tr><tr><td>UBham</td><td>47.49</td><td>UBham</td><td>68.63*</td></tr><tr><td>UBham</td><td colspan=\"2\">47.26* UBham</td><td>68.51</td></tr><tr><td>SINAI</td><td>45.28</td><td>SINAI</td><td>65.41</td></tr><tr><td>EBDG</td><td colspan=\"2\">41.52* V3</td><td>60.43*</td></tr><tr><td>V3</td><td colspan=\"2\">36.62* UFAL</td><td>58.88</td></tr><tr><td colspan=\"2\">COMMIT. 25.19</td><td colspan=\"2\">COMMIT. 54.38</td></tr><tr><td colspan=\"2\">NILCUSP 25.19</td><td colspan=\"2\">NILCUSP 49.04</td></tr><tr><td>iTac</td><td>23.92</td><td>SNAP</td><td>46.46</td></tr><tr><td/><td/><td>iTac</td><td>38.29</td></tr><tr><td>Baseline</td><td>35.64</td><td>Baseline</td><td>47.15</td></tr></table>" |
| }, |
| "TABREF6": { |
| "text": "Results for aspect term extraction (SB1).", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>Stars indicate unconstrained systems. The \u2020 indi-</td></tr><tr><td>cates a constrained system that was not trained on</td></tr><tr><td>the in-domain training dataset (unlike the rest of</td></tr><tr><td>the constrained systems), but on the union of the</td></tr><tr><td>two training datasets (laptops, restaurants).</td></tr></table>" |
| }, |
| "TABREF7": { |
| "text": "", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>: Results for aspect category detection</td></tr><tr><td>(SB3) and aspect category polarity (SB4). Stars</td></tr><tr><td>indicate unconstrained systems.</td></tr></table>" |
| } |
| } |
| } |
| } |