| { |
| "paper_id": "Y13-1036", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:32:28.450756Z" |
| }, |
| "title": "Collective Sentiment Classification based on User Leniency and Product Popularity", |
| "authors": [ |
| { |
| "first": "Wenliang", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Tokyo \u2021 National Institute of Informatics", |
| "location": {} |
| }, |
| "email": "wl-gao@tkl.iis.u-tokyo.ac.jp" |
| }, |
| { |
| "first": "Naoki", |
| "middle": [], |
| "last": "Yoshinaga", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Tokyo \u2021 National Institute of Informatics", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Nobuhiro", |
| "middle": [], |
| "last": "Kaji", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Tokyo \u2021 National Institute of Informatics", |
| "location": {} |
| }, |
| "email": "kaji@tkl.iis.u-tokyo.ac.jp" |
| }, |
| { |
| "first": "Masaru", |
| "middle": [], |
| "last": "Kitsuregawa", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Tokyo \u2021 National Institute of Informatics", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We propose a method of collective sentiment classification that assumes dependencies among labels of an input set of reviews. The key observation behind our method is that the distribution of polarity labels over reviews written by each user or written on each product is often skewed in the real world; intolerant users tend to report complaints while popular products are likely to receive praise. We encode these characteristics of users and products (referred to as user leniency and product popularity) by introducing global features in supervised learning. To resolve dependencies among labels of a given set of reviews, we explore two approximated decoding algorithms, \"easiest-first decoding\" and \"twostage decoding\". Experimental results on two real-world datasets with product and user/product information confirmed that our method contributed greatly to the classification accuracy.", |
| "pdf_parse": { |
| "paper_id": "Y13-1036", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We propose a method of collective sentiment classification that assumes dependencies among labels of an input set of reviews. The key observation behind our method is that the distribution of polarity labels over reviews written by each user or written on each product is often skewed in the real world; intolerant users tend to report complaints while popular products are likely to receive praise. We encode these characteristics of users and products (referred to as user leniency and product popularity) by introducing global features in supervised learning. To resolve dependencies among labels of a given set of reviews, we explore two approximated decoding algorithms, \"easiest-first decoding\" and \"twostage decoding\". Experimental results on two real-world datasets with product and user/product information confirmed that our method contributed greatly to the classification accuracy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In document-level sentiment classification, early studies have exploited language-based clues (e.g., n-grams) extracted from the textual content (Turney, 2002; Pang et al., 2002) , followed by recent studies which adapt the classifier to the reviews written by a specific user or written on a specific product (Tan et al., 2011; Seroussi et al., 2010; Speriosu et al., 2011; . Although the user-and product-aware methods exhibited better performance over the methods based on purely textual clues, most of them use only the user information (Tan et al., 2011; Seroussi et al., 2010; Speriosu et al., 2011) , or they assume that the user and the product of a test review is known in advance . These assumptions heav-ily limit their applicability in a real-world scenario where new users and new products are ceaselessly emerging.", |
| "cite_spans": [ |
| { |
| "start": 145, |
| "end": 159, |
| "text": "(Turney, 2002;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 160, |
| "end": 178, |
| "text": "Pang et al., 2002)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 310, |
| "end": 328, |
| "text": "(Tan et al., 2011;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 329, |
| "end": 351, |
| "text": "Seroussi et al., 2010;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 352, |
| "end": 374, |
| "text": "Speriosu et al., 2011;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 541, |
| "end": 559, |
| "text": "(Tan et al., 2011;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 560, |
| "end": 582, |
| "text": "Seroussi et al., 2010;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 583, |
| "end": 605, |
| "text": "Speriosu et al., 2011)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This paper proposes a method of collective sentiment classification that is aware of the user and the product of the target review, which benefits from the skewed distributions of polarity labels: intolerant users tend to report complaints while popular products are likely to receive praise. We introduce global features to encode these characteristics of a user and a product (referred to as user leniency and product popularity), and then compute the values of global features along with testing. Our method is therefore applicable to reviews written by users and on products that are not observed in the training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Because global features depend on labels of test reviews while the labels reversely depend on the global features, we need to globally optimize a label configuration for a given set of reviews. In this study, we resort to approximate algorithms, easiest-first (Tsuruoka and Tsujii, 2005) and twostage strategies (Krishnan and Manning, 2006) , in decoding labels, and empirically compare their speed and accuracy.", |
| "cite_spans": [ |
| { |
| "start": 260, |
| "end": 287, |
| "text": "(Tsuruoka and Tsujii, 2005)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 312, |
| "end": 340, |
| "text": "(Krishnan and Manning, 2006)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We evaluated our method on two real-world datasets with product (Maas et al., 2011) and user/product information (Blitzer et al., 2007) . Experimental results demonstrated that the collective sentiment classification significantly improved the classification accuracy against the state-of-the-art methods, regardless of the choice of decoding strategy.", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 83, |
| "text": "(Maas et al., 2011)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 113, |
| "end": 135, |
| "text": "(Blitzer et al., 2007)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The remainder of this paper is organized as follows. Section 2 discusses related work that exploits user and product information in a sentiment classification task. Then, Section 3 proposes a method that collectively classifies polarity of given set of reviews. Section 4 reports exper-imental results. Finally, Section 5 concludes this study and addresses future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Early studies on sentiment analysis considers only textual content for classifying the sentiment of a given review (Pang and Lee, 2008) . Pang et al. (2002) developed a supervised sentiment classifier which only takes n-gram features. Nakagawa et al. (2010) and Socher et al. (2011) considered structural interaction among words to capture complex intra-sentential phenomena such as polarity shifting (Li et al., 2010) .", |
| "cite_spans": [ |
| { |
| "start": 115, |
| "end": 135, |
| "text": "(Pang and Lee, 2008)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 138, |
| "end": 156, |
| "text": "Pang et al. (2002)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 235, |
| "end": 257, |
| "text": "Nakagawa et al. (2010)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 262, |
| "end": 282, |
| "text": "Socher et al. (2011)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 401, |
| "end": 418, |
| "text": "(Li et al., 2010)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "On the other hand, recent studies started exploring the effectiveness of user and/or product information. Tan et al., (2011) and Speriosu et al., (2011) exploited user network behind a social media (Twitter in their case) and assumed that friends give similar ratings towards similar products. Seroussi et al. (2010) proposed a framework that computes users' similarity on the basis of text and their rating histories. Then, they classify a given review by referring to ratings given for the same product by other users who are similar to the user in question. However, such user networks are not always available in the real world.", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 124, |
| "text": "Tan et al., (2011)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 129, |
| "end": 152, |
| "text": "Speriosu et al., (2011)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 294, |
| "end": 316, |
| "text": "Seroussi et al. (2010)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Li et al. (2011) incorporate user-or productdependent n-gram features into a classifier. They argue that users use a personalized language to express their sentiment, while the sentiment toward a product is described by product-specific language. This approach, however, requires the training data to contain reviews written by test users and written for test products. This is infeasible since labeling reviews requires too much manual work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "This section describes our method of collective sentiment classification that uses user leniency and product popularity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our task is, given a set of N reviews R, to predict labels Y, where y r \u2208 {+1, \u22121} 1 for each given review r \u2208 R. The label of each review is predicted based on the following scoring function:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "s r = score(x r ) = w T x r ,", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Overview", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where x r is feature vector representation of the review r and w is the weight vector. With this scoring function, the label is predicted as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "y r = sgn(s r ) = +1 if s r > 0, \u22121 otherwise.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Our interest is to exploit user leniency and product popularity for improving sentiment classification. We realize this by encoding such biases as two global features, as detailed in Section 3.2. Since global features make it impossible to independently predict the labels of reviews, we explored two approximate decoding strategies in Section 3.3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Note that we assume the review is associated with the user who wrote that review, the product on which that review is written, or both. This assumption is not unrealistic nowadays. User information is available in many standard dataset (Blitzer et al., 2007; Pang and Lee, 2004) . Moreover, as for product information, even if such information is not available, it is possible to extract it (Qiu et al., 2011) . We should emphasize here that our method does not require user profiles, product descriptions, or any sort of extrinsic knowledge on the users and the products.", |
| "cite_spans": [ |
| { |
| "start": 236, |
| "end": 258, |
| "text": "(Blitzer et al., 2007;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 259, |
| "end": 278, |
| "text": "Pang and Lee, 2004)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 391, |
| "end": 409, |
| "text": "(Qiu et al., 2011)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Our features can be divided into local and global ones such that x r = {x l r , x g r }. While local features (x l r ) are conventional word n-grams (n = 1 and n = 2), global features (x g r ) represent the user leniency and product popularity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Our global features are computed as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "x g r = {f u + (r), f u \u2212 (r), f p + (r), f p \u2212 (r)}, where f u + (r) = |{r j | y j = +1, r j \u2208 N u (r)}| |N u (r)| , f u \u2212 (r) = |{r j | y j = \u22121, r j \u2208 N u (r)}| |N u (r)| , f p + (r) = |{r j | y j = +1, r j \u2208 N p (r)}| |N p (r)| , f p \u2212 (r) = |{r j | y j = \u22121, r j \u2208 N p (r)}| |N p (r)| .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "N u (r), the user-related neighbors, is the set of reviews, excluding r, written by the user who wrote the review r, and N p (r), the product-related neighbors, is the set of reviews, excluding r, on the same product as the review r, respectively. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "r max = arg max r i \u2208R |score(x r i )| 6: y rmax = sgn(score(x rmax )) 7: for r j \u2208 (N u (r max ) \u222a N p (r max )) \u2229 R do 8: update global features 9:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "re-compute score(x r j )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "10:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "R = R\\{r max } 11: return Y", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The first two features capture user leniency, i.e., how likely the user is to write positive and negative reviews, respectively. The other features capture product popularity, i.e., how likely positive and negative reviews on the product at hand are to be written.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The global features make it difficult to perform decoding, i.e., labeling reviews, since each review can no longer be labeled independently. Exact decoding algorithms based on dynamic programming are not feasible in our case, because the search space grows exponentially as the number of test reviews increases. So instead, we explore and empirically compare two approximate algorithms: easy-first (Tsuruoka and Tsujii, 2005 ) and two-stage strategy (Krishnan and Manning, 2006) .", |
| "cite_spans": [ |
| { |
| "start": 398, |
| "end": 424, |
| "text": "(Tsuruoka and Tsujii, 2005", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 450, |
| "end": 478, |
| "text": "(Krishnan and Manning, 2006)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Two Approximate Decoding Strategies", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Algorithm 1 depicts the easiest-first decoding algorithm. This strategy iteratively determines the label of each review one by one. In each iteration step, a review that is the easiest to label , i.e., the review with the highest score, is picked up (line 5 in Algorithm 1), and then its label is determined (line 6 in Algorithm 1). This process is repeated until all the reviews are labeled. The global features are computed by using the labels of reviews that are already assigned with labels. That is, at the beginning of decoding, no global features are fired; more global features are fired as the labeling process proceeds. The score of the review is computed in a different way depending on how global features are fired, as analogous to (Tsuruoka and Tsujii, 2005) . Specifically, we prepare four classifiers, and those classifiers are used when 1 Next, we introduce a two-stage strategy (Krishnan and Manning, 2006), which has better scalability than easy-first strategy. It is depicted in Algorithm 2. This strategy performs decoding twice. In the first stage (line 1 to line 2 in Algorithm 2), we ignore all the global features, and use only local features to classify all the reviews. In the second stage (line 3 to line 5 in Algorithm 2), labels predicted in the first stage are used to compute global features and the labels are re-assigned by using both global features and local features. In our case, two-stage at first only uses word n-gram features to estimate the labels of reviews. Thereafter, those labels are used to compute global features in the second stage.", |
| "cite_spans": [ |
| { |
| "start": 745, |
| "end": 772, |
| "text": "(Tsuruoka and Tsujii, 2005)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Two Approximate Decoding Strategies", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "This subsection analyzes the time complexity of the two decoding strategy with respect to the number of reviews, N .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Time Complexity", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "In easiest-first strategy, two processes consume most of the computing time. One is choosing the easiest review label (line 5 in Algorithm 1). The arg max operation takes O(log N ) time in each iteration by using a heap structure. Thus, the total time complexity in this step is O(N log N ) for N iteration. Another bottleneck is score recomputation (line 9 in Algorithm 1). To update the score for each review r j \u2208 N u (r max )\u2229N p (r max ), we need at most |N u (r max ) \u2229 N p (r max )| times delete and insert operations to the heap. Since we could limit the number of reviews for each user or each product, |N u (r max )\u2229N p (r max )| is treated as a constant C. In two-stage strategy, the complexity is O(N ) for both stages. Then the total complexity is also O(N ) , which is the same as the existing method that uses only local textual features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Time Complexity", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "It is straightforward to train the parameters of the scoring functions. We train a binary classifier as the score estimation function in Eq. 1, considering word n-gram features, user leniency features, and product popularity features. The values of global features are computed by using the gold labels. We assume that a value of the user leniency feature or product popularity feature for a review whose user has no other reviews or whose product has no other reviews is set to 0.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "In this section, we evaluate our method of collective sentiment classification on two real-world review datasets with user/product or product information (Blitzer et al., 2007; Maas et al., 2011) . We preprocessed each review in the datasets by OpenNLP 3 toolkit to detect sentence boundaries and to tokenize n-grams. Following Pang et al. (2002) , we induce word unigrams and bigrams as local features, taking negation into account. We ignored n-grams that appeared less than six times in the training data.", |
| "cite_spans": [ |
| { |
| "start": 154, |
| "end": 176, |
| "text": "(Blitzer et al., 2007;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 177, |
| "end": 195, |
| "text": "Maas et al., 2011)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 328, |
| "end": 346, |
| "text": "Pang et al. (2002)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We adopted a confidence-weighted linear classifier (Dredze et al., 2008) with n-gram features as our baseline. To make the comparison fair, we used the same classifier, which despite of local features also considers global features, as the local classifier in our method. We used the default hyper-parameters to this classifier. Note that the confidence-weighted algorithm performed as good as SVM (Dredze et al., 2008 ) so it constructs a strong baseline.", |
| "cite_spans": [ |
| { |
| "start": 51, |
| "end": 72, |
| "text": "(Dredze et al., 2008)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 398, |
| "end": 418, |
| "text": "(Dredze et al., 2008", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Blitzer et al. (2007) and Maas et al. (2011) collected two datasets which contain user/product or 3 http://opennlp.apache.org/", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 21, |
| "text": "(2007)", |
| "ref_id": null |
| }, |
| { |
| "start": 26, |
| "end": 44, |
| "text": "Maas et al. (2011)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Blitzer Maas Seroussi et al., (2010) 89.37 n/a Maas et al., (2011) n/a 88 product information respectively. Table 1 summarizes the statistics of the two datasets. We should mention that the original Blitzer dataset contains more than 780k reviews collected from Amazon.com on several domains (e.g. books, movies and games). We automatically delete replicated reviews written by the same author on the same product (resulting in 740k raw reviews). Then the reviews are balanced for positive and negative labels (over 90k reviews for each) to maintain consistency with the Maas dataset. The Maas dataset has 25,000 positive and 25,000 negative reviews on movies. We have used a URL (linked to the move title) provided with each review as the identifier of the product movie. Because user information cannot be fully recovered, we only model the product popularity on this dataset.", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 36, |
| "text": "Maas Seroussi et al., (2010)", |
| "ref_id": null |
| }, |
| { |
| "start": 47, |
| "end": 66, |
| "text": "Maas et al., (2011)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 108, |
| "end": 115, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": null |
| }, |
| { |
| "text": "In the two datasets, the reviews were ordered by 4 This results uses different 2-fold splitting from ours. Under their splitting, our accuracies (+user+product) are 91.02%, 92.54% and 92.28% for baseline, easiest-first and two-stage with product popularity features respectively. Both strategies easily beat Maas et al., (2011)'s accuracy, 88.89%. The main difference between our baseline and their baseline is the features. They use only unigram features (baseline accuracy is 87.80%), while we use unigram and bigram (which considers negation) as features.", |
| "cite_spans": [ |
| { |
| "start": 49, |
| "end": 50, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": null |
| }, |
| { |
| "text": "5 The two-stage implementation in Gao et al. (2013) used a different setting. In that paper, the classifiers for the first stage and second stage are the same one considering local and global features. While in this paper, the classifier used in the first stage only considers local features and the classifier for the second stage considers both. user and product. In order to prevent the seemingly unfair accuracy gain under this particular splitting, we performed a 2-fold cross-validation after randomly splitting reviews, rather than using the split provided by the authors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": null |
| }, |
| { |
| "text": "We then compared the accuracy of our method with the two baseline methods on the two datasets: a confidence-weighted linear classifier with ngram features and a user-aware sentiment classifier proposed by Seroussi et al. (2010) .", |
| "cite_spans": [ |
| { |
| "start": 205, |
| "end": 227, |
| "text": "Seroussi et al. (2010)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In Seroussi's method, we need to fix the threshold to the number of reviews written by the same user to prepare and train a personalized classifier. After several test, the threshold is set to be 5 to gain a better performance 6 . Similarity of users is computed by word n-gram jaccard distance (called \"AIT\" in their paper). When the user of the test review is unseen in the training set, the default classifier trained on all the training reviews (identical to the other baseline classifier based on n-grams) is used to determine the label. Table 2 shows the experimental results. Our method significantly improved accuracies across the two datasets against the baseline classifier. A larger improvement is acquired on the Maas dataset probably because the average number of reviews for each product is higher than that on the Blitzer dataset so we could estimate more reliable global features.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 543, |
| "end": 550, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "On the Blitzer dataset, the user leniency was more helpful than the product popularity. This is probably because the Blitzer dataset had been collected for users, which means to collect all the reviews written by each user. While on the Maas dataset, product information plays a important role because the reviews are collected for each product.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Among the two decoding methods, the easiestfirst decoding achieved better for this test. This conforms our expectation that the easiest-first decoding is more cautious than the other. However, easiest-first decoding has it's own weakness. In what follows, we investigate the speed and accuracy trade-off of the two decoding methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Impact of test review size on speed and accuracy: Next, we investigate the impact of the number of test reviews on speed and accuracy in our collective sentiment classification. We use Blitzer dataset for evaluation because of its larger review size. The two types of global features are both considered.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We performed 2-fold cross-validation with the same splitting for the Blitzer dataset, while changing the size of test reviews processed at once to investigate the impact of test review size on classification accuracy. In this experiment, we split the test reviews into equal-sized smaller subsets and applied our classifier independently to each of the subsets. We average the result for all the subsets to get a stable accuracy. Figure 1 shows the experimental results. When we process a larger number of reviews at once, the accuracies of the two methods increase because of more reliable global features.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 430, |
| "end": 438, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We then performed the speed test using the same setting as the previous test, but measured the average time consumed by one single subset. As : Accuracy (%) on known/unknown user/product splits on Blitzer dataset. su, uu, sp and up stand for seen user, unseen user, seen product and unseen product respectively. Float inside parenthesises is the difference compared to the baseline classifier.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "shown in Figure 2 , the speed of the easiest-first decoding significantly slows down as the number of processed reviews grows, whereas the speed of the two-stage decoding increases compute time linearly. Meanwhile, the accuracy of the two strategies are competitive as shown in Figure 1 . These results confirm the analysis in Section 3.4 that the easiest-first decoding takes most of the time in re-computing and sorting the scores. More specifically, if the user has plenty of reviews or the product has been rated by plenty of reviews, the score frequently changes in each iteration in response to the change of global features' values. Based on these observations, when the amount of test data is large, the two-stage decoding is tremendously faster with only a little loss of accuracy. When the dataset is small, to fully utilize the user leniency and product popularity properties, easiest-first decoding should be adopted.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 9, |
| "end": 17, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 278, |
| "end": 286, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Impact of user/product-awareness: We investigate the performance on the test reviews when we observed the user/product or not in the training data. We use the leniency and popularity global features on the Blitzer dataset, while we consider only product popularity features on the Maas dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The baseline classifier is expected to better estimate the labels of reviews written by known user or written on known product because similar ngrams would be contained in the training. On the other hand, in our model's setting, more reviews per user (or per product) should lead to more reliable leniency (or popularity) features thus better accuracy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "On the Maas dataset as shown in Table 3 , the improvement on unknown product set is larger than that on known product set. We have to note here that the improvement on the unknown product set is greater while the review number for each product is smaller, which seems to violate our assumption. The reason is that baseline on the unknown product set performed poorly, which left our method larger space for improvement, even without enough global features.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 32, |
| "end": 39, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "On the Blitzer dataset as shown in Table 4 , improvement is higher on known user sets. We find that average review number for each user is extremely low (1.04 reviews). Then lacking reliable global features may be the main reason for the poor performance on unknown user sets. We next investigate how many reviews are needed to compute reliable global features.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 35, |
| "end": 42, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Accuracy Distribution: We here investigate how the reliability of the global features would influence the accuracy improvement. We exploit the accuracies with respect to how many reviews Table 6 : Accuracy (%, downer inside cell) of proposed method (two-stage) and the review size (upper inside cell) on Blitzer dataset separated according to the number of reviews on the product. The float inside parenthesizes is the difference from the baseline method. each user or product has. More reviews means that more reliable global features will be extracted by our model.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 187, |
| "end": 194, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Since user leniency is the dominant influential global feature on the Blitzer dataset, Table 5 shows the leniency features is related to the improvement. Product popularity has limited influence on this dataset because it is collected according to users. On the Maas dataset, popularity features play an important role as shown in Table 6 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 87, |
| "end": 94, |
| "text": "Table 5", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 331, |
| "end": 338, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We noticed that when the review number of a user or a product reaches some point (|N u (r)| = 3 \u2212 7 in the Blitzer dataset and |N p (r)| = 2 \u2212 5 in the Maas dataset), having more reviews does not improve the accuracy any further. However, higher |N u (r)| or |N p (r)| number induces lower speed of easiest-first decoding as we analyzed in Section 3.4. Then, we could collect a bounded number of reviews for each user or product to cost less time and acquire better accuracy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Examples: Some examples are given here to explain how our model would work. As shown in Table 7 , it is sometimes hard to correctly classify labels when only the text is given.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 88, |
| "end": 95, |
| "text": "Table 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In the first two examples, weak negative textual features are found in the test instance. However, since the two users are lenient and the first product is relatively popular (these characteristics are captured by our proposed method), these two reviews should still be given positive labels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Frequently, sentiment expressed inside a review is not obvious if the classifier does not know the latent meaning of the words (sometimes, even real person feels hard to extract sentiment from these words). As we can see in the third example in Table 7 , the baseline classifier could recognize no obvious sentiment evidence from the textual features, while our method classified it as negative by detecting that its on a notorious product and the user is critical.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 245, |
| "end": 252, |
| "text": "Table 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "These examples illustrate that our model can successfully use the user and product-based dependencies to improve sentiment classification accuracy. Nowadays, in the big data background, this method could be more useful with huge amount of unlabeled data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We have presented collective sentiment classification which captures and utilizes user leniency and product popularity. Different from the previous studies that are aware of the user and product of ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The labels, +1 and -1, represent positive and negative polarity, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "However, based on our experiment as shown inFigure 2, the number |Nu(rmax) \u2229 Np(rmax)| is weakly related to N .PACLIC-27", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Seroussi et al., chose users who have more than 50 positive and 50 negative reviews. Few users or product in", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "the review, our model does not assume the training data to contain the reviews written by the same user of test reviews or written on the same product of test reviews. To decode a labels configuration for a given set of reviews, we adopted and compared two strategies, namely \"easiest-first decoding\" and \"two-stage decoding\". We conducted experiments on two real-world review datasets to compare our method with the existing methods. The proposed method performed more accurately than the baseline methods that uses word n-gram as features. It also outperforms another state-of-the-art method which trains personalized sentiment classifiers significantly. The more reviews per-user/product possesses, the larger improvement our model would gain. Two-stage strategy gains less accuracy than easiest-first, however, consumes only linear time in terms of the test review size (expected to be the same order of speed as the baseline classifiers). We plan to publish the code and datasets 7 .A future extension of this work is to use this on other task, such as classifying the subjectivity of a given document. We also plan to use dual decomposition as an advanced decoding strategy on our model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Biographies, bollywood, boom-boxes and blenders: Domain adaptation for sentiment classification", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Blitzer", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "440--447", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Blitzer, Mark Dredze, and Fernando Pereira. 2007. Biographies, bollywood, boom-boxes and blenders: Domain adaptation for sentiment classi- fication. In Proceedings of ACL, pages 440-447, Prague, Czech Republic.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Confidence-weighted linear classification", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| }, |
| { |
| "first": "Koby", |
| "middle": [], |
| "last": "Crammer", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "264--271", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Dredze, Koby Crammer, and Fernando Pereira. 7 http://www.tkl.iis.u-tokyo.ac.jp/~wl-gao/ 2008. Confidence-weighted linear classification. In Proceedings of ICML, pages 264-271, Helsinki, Finland.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Modeling user leniency and product popularity for sentiment classification", |
| "authors": [ |
| { |
| "first": "Wenliang", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Naoki", |
| "middle": [], |
| "last": "Yoshinaga", |
| "suffix": "" |
| }, |
| { |
| "first": "Nobuhiro", |
| "middle": [], |
| "last": "Kaji", |
| "suffix": "" |
| }, |
| { |
| "first": "Masaru", |
| "middle": [], |
| "last": "Kitsuregawa", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of IJCNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenliang Gao, Naoki Yoshinaga, Nobuhiro Kaji, and Masaru Kitsuregawa. 2013. Modeling user leniency and product popularity for sentiment classification. In Proceedings of IJCNLP, Nagoya, Japan. to ap- pear.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "An effective two-stage model for exploiting nonlocal dependencies in named entity recognition", |
| "authors": [ |
| { |
| "first": "Vijay", |
| "middle": [], |
| "last": "Krishnan", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of COLING-ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1121--1128", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vijay Krishnan and Christopher D. Manning. 2006. An effective two-stage model for exploiting non- local dependencies in named entity recognition. In Proceedings of COLING-ACL, pages 1121-1128, Sydney, NSW, Australia.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Sentiment classification and polarity shifting", |
| "authors": [ |
| { |
| "first": "Shoushan", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [ |
| "M" |
| ], |
| "last": "Sophia", |
| "suffix": "" |
| }, |
| { |
| "first": "Ying", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Chu-Ren", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Guodong", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "635--643", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shoushan Li, Sophia Y. M. Lee, Ying Chen, Chu-Ren Huang, and Guodong Zhou. 2010. Sentiment clas- sification and polarity shifting. In Proceedings of COLING, pages 635-643, Beijing, China.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Incorporating reviewer and product information for review rating prediction", |
| "authors": [ |
| { |
| "first": "Fangtao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongwei", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiang", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "1820--1825", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fangtao Li, Nathan Liu, Hongwei Jin, Kai Zhao, Qiang Yang, and Xiaoyan Zhu. 2011. Incorporating re- viewer and product information for review rating prediction. In Proceedings of IJCAI, pages 1820- 1825, Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Learning word vectors for sentiment analysis", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [ |
| "L" |
| ], |
| "last": "Maas", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "E" |
| ], |
| "last": "Daly", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "T" |
| ], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of ACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "142--150", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng, and Christopher Potts. 2011. Learning word vectors for sentiment analysis. In Proceedings of ACL-HLT, pages 142-150, Port- land, Oregon, USA.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Dependency tree-based sentiment classification using crfs with hidden variables", |
| "authors": [ |
| { |
| "first": "Tetsuji", |
| "middle": [], |
| "last": "Nakagawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Kentaro", |
| "middle": [], |
| "last": "Inui", |
| "suffix": "" |
| }, |
| { |
| "first": "Sadao", |
| "middle": [], |
| "last": "Kurohashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "786--794", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tetsuji Nakagawa, Kentaro Inui, and Sadao Kurohashi. 2010. Dependency tree-based sentiment classifica- tion using crfs with hidden variables. In Proceed- ings of NAACL-HLT, pages 786-794, Los Angeles, CA, USA.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A sentimental education: sentiment analysis using subjectivity summarization based on minimum cuts", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "271--278", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Pang and Lillian Lee. 2004. A sentimental educa- tion: sentiment analysis using subjectivity summa- rization based on minimum cuts. In Proceedings of ACL, pages 271-278, Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Opinion mining and sentiment analysis. Foundation and Trends in Information Retrieval", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "1--135", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Pang and Lillian Lee. 2008. Opinion mining and sentiment analysis. Foundation and Trends in Infor- mation Retrieval, 2(1-2):1-135.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Thumbs up? sentiment classification using machine learning techniques", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Shivakumar", |
| "middle": [], |
| "last": "Vaithyanathan", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Pang, Lillian Lee, and Shivakumar Vaithyanathan. 2002. Thumbs up? sentiment classification us- ing machine learning techniques. In Proceedings of EMNLP, pages 79-86, Pennsylvania, PA, USA.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Opinion word expansion and target extraction through double propagation", |
| "authors": [ |
| { |
| "first": "Guang", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiajun", |
| "middle": [], |
| "last": "Bu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chun", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Computational Linguistics", |
| "volume": "37", |
| "issue": "1", |
| "pages": "9--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guang Qiu, Bing Liu, Jiajun Bu, and Chun Chen. 2011. Opinion word expansion and target extraction through double propagation. Computational Lin- guistics, 37(1):9-27.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Collaborative inference of sentiments from texts", |
| "authors": [ |
| { |
| "first": "Yanir", |
| "middle": [], |
| "last": "Seroussi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ingrid", |
| "middle": [], |
| "last": "Zukerman", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabian", |
| "middle": [], |
| "last": "Bohnert", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of UMAP", |
| "volume": "", |
| "issue": "", |
| "pages": "195--206", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yanir Seroussi, Ingrid Zukerman, and Fabian Bohnert. 2010. Collaborative inference of sentiments from texts. In Proceedings of UMAP, pages 195-206, Big Island, HI, USA.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Semi-supervised recursive autoencoders for predicting sentiment distributions", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "H" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "151--161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Jeffrey Pennington, Eric H. Huang, Andrew Y. Ng, and Christopher D. Manning. 2011. Semi-supervised recursive autoencoders for predict- ing sentiment distributions. In Proceedings of EMNLP, pages 151-161, Edinburgh, Scotland, UK., July.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Twitter polarity classification with label propagation over lexical links and the follower graph", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Speriosu", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Sudan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sid", |
| "middle": [], |
| "last": "Upadhyay", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Baldridge", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of EMNLP, workshop on Unsupervised Learning in NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "53--63", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Speriosu, Nikita Sudan, Sid Upadhyay, and Jason Baldridge. 2011. Twitter polarity classifica- tion with label propagation over lexical links and the follower graph. In Proceedings of EMNLP, work- shop on Unsupervised Learning in NLP, pages 53- 63, Edinburgh, UK.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "User-level sentiment analysis incorporating social networks", |
| "authors": [ |
| { |
| "first": "Chenhao", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Long", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Ping", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of KDD", |
| "volume": "", |
| "issue": "", |
| "pages": "1397--1405", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chenhao Tan, Lillian Lee, Jie Tang, Long Jiang, Ming Zhou, and Ping Li. 2011. User-level sentiment anal- ysis incorporating social networks. In Proceedings of KDD, pages 1397-1405, San Diego, California, USA.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Bidirectional inference with the easiest-first strategy for tagging sequence data", |
| "authors": [ |
| { |
| "first": "Yoshimasa", |
| "middle": [], |
| "last": "Tsuruoka", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun'ichi", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of HLT-EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "467--474", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshimasa Tsuruoka and Jun'ichi Tsujii. 2005. Bidi- rectional inference with the easiest-first strategy for tagging sequence data. In In Proceedings of HLT- EMNLP, pages 467-474, Vancouver, B.C., Canada.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Thumbs up or thumbs down?: semantic orientation applied to unsupervised classification of reviews", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Turney", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "417--424", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter D. Turney. 2002. Thumbs up or thumbs down?: semantic orientation applied to unsupervised classi- fication of reviews. In Proceedings of ACL, pages 417-424, Pennsylvania, PA, USA.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "no global features are fired, (2) only user leniency features are fired, (3) only product pop-Algorithm 2 Two-stage strategy 1: for r \u2208 R do 2: y r = sgn(score(x r )) 3: for r \u2208 R do 4: compute global features 5: y r = sgn(score(x r )) 6: return Y ularity features are fired, and (4) both global features are fired, respectively.", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "num": null, |
| "text": "2 The overall time complexity sums up to O(N (log N + C log N )) = O(N log N ).", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "num": null, |
| "text": "Average computation time when we changed the size of subset on test reviews.", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF3": { |
| "num": null, |
| "text": "", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "text": "Algorithm 1 Easiest-first strategy 1: for r \u2208 R do", |
| "content": "<table><tr><td>2:</td><td>initialize the global features to 0</td></tr><tr><td>3:</td><td>compute score(x r )</td></tr><tr><td colspan=\"2\">4: while R = \u2205 do</td></tr><tr><td>5:</td><td/></tr></table>", |
| "num": null, |
| "html": null |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td>: Accuracy (%) on review datasets.</td></tr><tr><td>+user/+product means modeling the user leniency</td></tr><tr><td>/ product popularity features. Accuracy marked</td></tr><tr><td>with \" \" or \">\" was significantly better than</td></tr><tr><td>baseline (p < 0.01 or 0.01 \u2264 p < 0.05 assessed</td></tr><tr><td>by McNemar's test).</td></tr></table>", |
| "num": null, |
| "html": null |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "text": "+1.24) 87.73 (+2.22) 92.73 (+1.32) proposed (two-stage) 93.09 (+1.22) 87.48 (+1.97) 92.68 (+1.21)", |
| "content": "<table><tr><td/><td>sp)</td><td>(up)</td><td>total</td></tr><tr><td>No. of reviews</td><td>46,397</td><td>3,603</td><td>50,000</td></tr><tr><td>Ave. No. of reviews/product</td><td>4.82</td><td>1.62</td><td>4.22</td></tr><tr><td>baseline</td><td>91.87</td><td>85.51</td><td>91.41</td></tr><tr><td>proposed (easiest-first)</td><td>93.11 (</td><td/><td/></tr></table>", |
| "num": null, |
| "html": null |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "text": "Accuracy (%) on known/unknown product splits on Maas dataset. sp and up stand for seen product and unseen product. Float inside parenthesizes is the difference compared to the baseline classifier.", |
| "content": "<table><tr><td/><td>(su, sp)</td><td>(uu, sp)</td><td>(su, up)</td><td>(uu, up)</td><td>total</td></tr><tr><td>No. of reviews</td><td>35,689</td><td>60,775</td><td>36,895</td><td>55,027</td><td>188,350</td></tr><tr><td>Ave. No. of reviews/user</td><td>2.04</td><td>1.04</td><td>2.14</td><td>1.04</td><td>1.40</td></tr><tr><td>Ave. No. of reviews/product</td><td>1.20</td><td>1.39</td><td>1.14</td><td>1.20</td><td>1.43</td></tr><tr><td>baseline</td><td>89.71</td><td>90.45</td><td>90.37</td><td>89.95</td><td>90.13</td></tr><tr><td>proposed (easiest-first)</td><td colspan=\"5\">91.42 (+1.71) 90.93 (+0.59) 92.19 (+1.82) 90.76 (+0.81) 91.11 (+0.98)</td></tr><tr><td>proposed (two-stage)</td><td colspan=\"5\">91.23 (+1.52) 90.88 (+0.54) 92.09 (+1.72) 90.30 (+0.35) 91.02 (+0.89)</td></tr></table>", |
| "num": null, |
| "html": null |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "text": "", |
| "content": "<table/>", |
| "num": null, |
| "html": null |
| }, |
| "TABREF7": { |
| "type_str": "table", |
| "text": "No. of product-related neighbors (|N p (r)|) +0.03) 90.13 (+0.26) 90.80 (+0.53) 92.48 (+0.58) +1.37) 91.24 (+2.11) 91.32 (+1.17) 92.12 (+1.04)", |
| "content": "<table><tr><td/><td/><td/><td>0</td><td>1</td><td>2</td><td>3-</td></tr><tr><td>No. of user-related</td><td>u (r)|) neighbors (|N</td><td colspan=\"3\">55,043 10,768 90.11 (1 0 91.18 (2 4,595 91.28 (+1.55) 91.26(+2.66) 90.56 (+1.71) 92.14 (+2.36) 34,735 16,601 9,630 6,530 2,974 1,536 2,711 1,292 663 8,120 4,974 2,174 998 3-7 92.48 (+2.33) 91.19 (+2.27) 92.18 (+3.31) 90.18 (+1.50)</td></tr><tr><td/><td/><td>8-</td><td colspan=\"2\">13,243 93.73 (+2.2) 92.28 (+1.74) 91.28 (+1.52) 90.22 (+1.62) 7,484 3,017 1,289</td></tr></table>", |
| "num": null, |
| "html": null |
| }, |
| "TABREF8": { |
| "type_str": "table", |
| "text": "Accuracy (%, downer inside cell) of proposed method (two-stage) and review size (upper inside cell) on Blitzer dataset separated according to the number of reviews written by the user and the number of reviews on the product.The float inside parenthesizes is the difference from the baseline method.", |
| "content": "<table><tr><td/><td colspan=\"3\">No. of product-related neighbors (|N p (r)|)</td><td/></tr><tr><td>0</td><td>1</td><td>2-5</td><td>6-10</td><td>11-</td></tr><tr><td>3,597</td><td>4,646</td><td>14,394</td><td>10,444</td><td>16,919</td></tr><tr><td colspan=\"5\">86.41 (+0.42) 90.94 (+1.96) 92.59 (+1.75) 93.98 (+1.31) 93.78 (+0.83)</td></tr></table>", |
| "num": null, |
| "html": null |
| } |
| } |
| } |
| } |