| { |
| "paper_id": "O14-3002", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:04:35.053062Z" |
| }, |
| "title": "Modeling the Helpful Opinion Mining of Online Consumer Reviews as a Classification Problem", |
| "authors": [ |
| { |
| "first": "Yi-Ching", |
| "middle": [], |
| "last": "Zeng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chaoyang University of Technology", |
| "location": { |
| "settlement": "Taichung", |
| "country": "Taiwan, R.O.C" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Tsun", |
| "middle": [], |
| "last": "Ku", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Central University", |
| "location": { |
| "country": "Taiwan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Shih-Hung", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chaoyang University of Technology", |
| "location": { |
| "settlement": "Taichung", |
| "country": "Taiwan, R.O.C" |
| } |
| }, |
| "email": "shwu@cyut.edu.tw" |
| }, |
| { |
| "first": "Liang-Pu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "chen@csie.ncu.edu.tw" |
| }, |
| { |
| "first": "Gwo-Dong", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Central University", |
| "location": { |
| "country": "Taiwan" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The paper addresses an opinion mining problem: how to find the helpful reviews from online consumer reviews via the quality of the content. Since there are too many reviews, efficiently identifying the helpful ones earlier can benefit both consumers and companies. Consumers can read only the helpful opinions from helpful reviews before they purchase a product, while companies can acquire the true reasons a product is liked or hated. A system is built to assess the difficulty of the problem. The experimental results show that helpful reviews can be distinguished from unhelpful ones with high precision.", |
| "pdf_parse": { |
| "paper_id": "O14-3002", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The paper addresses an opinion mining problem: how to find the helpful reviews from online consumer reviews via the quality of the content. Since there are too many reviews, efficiently identifying the helpful ones earlier can benefit both consumers and companies. Consumers can read only the helpful opinions from helpful reviews before they purchase a product, while companies can acquire the true reasons a product is liked or hated. A system is built to assess the difficulty of the problem. The experimental results show that helpful reviews can be distinguished from unhelpful ones with high precision.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Online consumer (or customer) review is a very important information source for many potential consumers to decide whether to buy a product or not. Li et al. (2011) shows that, compared to an expert product review, \"the consumer product review in the online shopping environment will be perceived by consumers to be more credible.\" This fact makes opinion mining of consumer reviews more interesting since it shows that opinions from other consumers are more appreciated than those from experts. Nevertheless, some reviews are not 18 Yi-Ching Zeng et al. very helpful, as we can see from the voting results on each consumer review from readers on Amazon.com. This paper will address an opinion mining problem: how to find the helpful reviews from online consumers' reviews before mining the information from them. This task can benefit both consumers and companies. Consumers can read the opinions from useful reviews before they purchase a product, while companies can acquire the true reasons a product is liked or hated. Both save time from reading meaningless opinions that do not show good reasons. Figure 1 shows a clip image of an Amazon.com customer review. Each review has been labeled with stars by the author and people who found the review helpful and has been labeled with the number of total votes. A three-class classification problem is defined to model this application. A system is designed to find the helpful positive reviews for finding good reasons to buy a product; to find the helpful negative reviews for finding reasons not to buy a product; and to filter out the unhelpful reviews, no matter whether they are positive or negative.", |
| "cite_spans": [ |
| { |
| "start": 148, |
| "end": 164, |
| "text": "Li et al. (2011)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 534, |
| "end": 554, |
| "text": "Yi-Ching Zeng et al.", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1104, |
| "end": 1112, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "The paper is organized as follows. Section 2 describes the related works. Section 3 describes the features that can be used to classify the reviews as helpful or unhelpful. Section", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Figure 1. A clip image of an Amazon.com customer review.", |
| "sec_num": null |
| }, |
| { |
| "text": "Online Consumer Reviews as a Classification Problem 4 describes the data collection of this study. Section 5 reports and discusses the experiment. The final section gives conclusions and future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modeling the Helpful Opinion Mining of 19", |
| "sec_num": null |
| }, |
| { |
| "text": "Early works on opinion mining focused on the polarity of opinion, positive or negative; this kind of opinion mining is called sentiment analysis. Another type of opinion mining focused on finding the detailed information of a product from reviews; this approach is a kind of information extraction (Hu & Liu, 2004) . Recent research has focused on assessing the review quality before mining the opinion. Kim et al. (2006) explored the use of some semantic features for review helpfulness ranking. They found that some important features of a review, including length, unigrams, and stars, might provide the basis for assessing the helpfulness of reviews. Siersdorfer et al. (2010) presented a system that could automatically structure and filter comments for YouTube videos by analyzing dependencies between comments, views, comment ratings, and topic categories. Their method used the SentiWordNet thesaurus, a lexical WordNet-based resource containing sentiment annotations. Moghaddam et al. (2011) proposed the Matrix Factorization Model and Tensor Factorization Model to predict of the quality of online reviews, and they evaluated the models on a real-life database from Epinions.com. Lu (2010) exploited contextual information about authors' identities and social networks to improve review quality prediction. Lu's method provided a generic framework to incorporate social context information by adding regularization constraints to the text-based predictor. Xiong and Litman (2011) investigated the utility of incorporating specialized features tailored to peer-review helpfulness. They found that structural features, review unigrams, and meta-data combination were useful in modeling the helpfulness of both peer reviews and product reviews.", |
| "cite_spans": [ |
| { |
| "start": 298, |
| "end": 314, |
| "text": "(Hu & Liu, 2004)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 404, |
| "end": 421, |
| "text": "Kim et al. (2006)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 655, |
| "end": 680, |
| "text": "Siersdorfer et al. (2010)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 977, |
| "end": 1000, |
| "text": "Moghaddam et al. (2011)", |
| "ref_id": null |
| }, |
| { |
| "start": 1190, |
| "end": 1199, |
| "text": "Lu (2010)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1466, |
| "end": 1489, |
| "text": "Xiong and Litman (2011)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Works", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Observation is necessary to find features for the helpful/unhelpful classification. Connors et al. (2011) gave a list of common ideas related to helpfulness and unhelpfulness, shown in Table 1 , which was collected from 40 students, with each student reading 20 online reviews about a single product and giving comments on the reviews. The study provided 15 reasons people think a consumer review is helpful and 10 reasons why it is unhelpful. These ideas can be considered as features for a classifier. Nevertheless, some of them are difficult to implement and require clear definition. For example, mining comparative sentences from text requires considerable knowledge of the language. (Jindal & Liu, 2006) .", |
| "cite_spans": [ |
| { |
| "start": 84, |
| "end": 105, |
| "text": "Connors et al. (2011)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 689, |
| "end": 709, |
| "text": "(Jindal & Liu, 2006)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 185, |
| "end": 192, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Observation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "reasons they think it to be unhelpful (Connors et al., 2011) .", |
| "cite_spans": [ |
| { |
| "start": 38, |
| "end": 60, |
| "text": "(Connors et al., 2011)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Table 1. The 15 reasons that people think a customer review helpful and the 10", |
| "sec_num": null |
| }, |
| { |
| "text": "Pros Table 2 lists the features that we implement in this study. Compared with the features used in Kim et al. (2006) , we add more features, based on the observation of Connors et al. (2011) , especially the degree of detail. The first three features are common n-grams used between a review and the corresponding product description. We believe that they are effective since a good review should contain more relevant information and use exact terminology. The fourth feature is the length of the review. A very short review cannot give much information, and a long review might give more useful information. The fifth feature is whether or not the review makes a comparison among things. A good review should compare similar products. Our program detects whether the string \"compare to/with\" or the pattern \"ADJ+er than\" exists in the review or not, with the help of a list of comparative adjectives. The sixth feature is the degree of detail, which is a combination of length and n-gram. The degree of detail has not been defined well in previous works. Our definition is only a tentative one. We define the degree of detail of a review as:", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 117, |
| "text": "Kim et al. (2006)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 170, |
| "end": 191, |
| "text": "Connors et al. (2011)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 5, |
| "end": 12, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Helpfulness Times Mentioned", |
| "sec_num": null |
| }, |
| { |
| "text": "10 (Unigram+Bigram+Trigram+Length) log (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where unigram, bigram, and trigram are the common n-grams between a review and the corresponding product description. Length is the length of the review. The seventh feature is the number of stars given by the review author. The eighth feature is whether the review contains \"Pros\" and \"Cons\" or not. Our system detects whether the string \"Pros\" and \"Cons\" exist in the review or not. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Unigram (Product Description)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Description", |
| "sec_num": null |
| }, |
| { |
| "text": "The number of unigrams used between the review and the corresponding product description", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Description", |
| "sec_num": null |
| }, |
| { |
| "text": "Bigram (Product Description)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Description", |
| "sec_num": null |
| }, |
| { |
| "text": "The number of bigrams used between the review and the corresponding product description", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Description", |
| "sec_num": null |
| }, |
| { |
| "text": "Trigram (Product Description)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Description", |
| "sec_num": null |
| }, |
| { |
| "text": "The number of trigrams used between the review and the corresponding product description", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Description", |
| "sec_num": null |
| }, |
| { |
| "text": "The length of a review", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Length", |
| "sec_num": null |
| }, |
| { |
| "text": "The review uses the string \"compare to\" or \"ADJ + er than\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparisons", |
| "sec_num": null |
| }, |
| { |
| "text": "Degree of detail Defined by formula (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparisons", |
| "sec_num": null |
| }, |
| { |
| "text": "The \"Star\" ratings of the review", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Use of Ratings", |
| "sec_num": null |
| }, |
| { |
| "text": "The review contains exact the strings \"Pros\" and \"Cons\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pros and Cons", |
| "sec_num": null |
| }, |
| { |
| "text": "We use an example to show the eight feature values. Consider the review in Figure 2 , where the \"pros_cons\" value is 1, since we can see the author explicitly lists the pros and cons. The \"Detail\" value is 1.17760, as defined in Formula (1). The \"Length\" value is 568, which is the number of words in the review. The \"Compare\" value is 4, because the author really makes a comparison of this product with other products. The \"Star\" value is 5, since the author gave five stars to the product. The \"Unigram\" value is 15. The \"Bigram\" value is 0, since we found no common bigrams between the review and the corresponding product description (not shown here). Hence, the \"Trigram\" value is also 0. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 75, |
| "end": 83, |
| "text": "Figure 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Pros and Cons", |
| "sec_num": null |
| }, |
| { |
| "text": "In order to test the idea, we collected online customer reviews manually from Amazon.com in March and April 2013. The reviews were from eight different product domains: Book, Digital Camera, Computer, Food & Drink, Movie, Shoes, Toys, and Cell phone. Without any special selection criterion in each domain, we collected the first available 1000+ reviews with an equal number of reviews of one to five stars. The average length was 80.63 words. The summary of our data collection is listed in The helpfulness score is given by the readers. As shown in Figure 1 , the reviewer labeled the number of stars and other users voted the review as helpful or unhelpful. We take the confidence in being helpful as an index to sort the reviews. Figure 3 shows the distribution of polarity (from 1 to 5 stars) and the helpful/unhelpful confidence, where the y-axis is the confidence score. Note that the confidence score in previous works has been defined as:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 551, |
| "end": 559, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 734, |
| "end": 742, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Collection", |
| "sec_num": "4." |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "# of Think helpful vote Confidence=100% # of Total vote \uf0e6 \uf0f6 \uf0b4 \uf0e7 \uf0f7 \uf0e8 \uf0f8", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Data Collection", |
| "sec_num": "4." |
| }, |
| { |
| "text": "Nevertheless, since there are some high confidence reviews with very little support, the reviews might not be very helpful. We discount the confidence of them by redefining the confidence score as the log-support confidence (LSC): (3) Figure 3 shows the data distribution. The positive reviews (with 4 or 5 stars) get higher helpfulness confidence in most product categories. This fact shows that readers think other consumers are credible. The confidence of helpfulness is lower for the negative reviews. The average LSC confidence scores for each product category are listed in Table 4 . ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 235, |
| "end": 243, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 580, |
| "end": 587, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Collection", |
| "sec_num": "4." |
| }, |
| { |
| "text": "Instead of finding the correlation between the ranking of helpfulness and the prediction, we define the problem as a three-class classification problem. The three classes are: the helpful", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Three-class Classification Problem", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Online Consumer Reviews as a Classification Problem positive reviews, for finding good reasons to buy a product; the helpful negative reviews, for finding reasons not to buy a product; and the unhelpful reviews.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modeling the Helpful Opinion Mining of 25", |
| "sec_num": null |
| }, |
| { |
| "text": "Since there is no distinct boundary between the helpful and the unhelpful and since one purpose of the system is to filter out the most unhelpful reviews, the sizes of the three classes can be adjusted by setting different thresholds. A higher threshold filters out more data. We can control the filtering level by setting different thresholds.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modeling the Helpful Opinion Mining of 25", |
| "sec_num": null |
| }, |
| { |
| "text": "In our experiments, Class 1 includes positive reviews with 4 or 5 stars and the helpfulness confidence higher than the threshold. Class 2 includes negative reviews with 1 to 3 stars and the helpfulness confidence higher than the threshold. Class 3 is the remaining reviews, which are regarded as unhelpful, where the helpfulness confidence is lower than the threshold.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modeling the Helpful Opinion Mining of 25", |
| "sec_num": null |
| }, |
| { |
| "text": "The goal of the experiment is to test the filter accuracy of the three-class classification problem with different thresholds. We use the libSVM 1 toolkit to build the classifier, based on the features described in Section 2.2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5." |
| }, |
| { |
| "text": "We divide the data into a training set and test set, consisting of 7,690 reviews and 1,000 reviews, respectively. The class distribution of the test data are balanced to one third for each class. The different thresholds tested in our experiment are 1.039, 1.5, and 2.0. The first threshold is the average confidence score in Table 5 , which filters out 56.1% of the reviews as unhelpful; the second threshold 1.5, filtering out 79.6%; and the third threshold 2.0, filtering out 91.0%. The numbers of useful (both positive and negative) reviews of each product domain to the three thresholds are listed in Tables 5, 7, and 9. The sizes of classes corresponding to the three thresholds are shown in Tables 6, 8, and 10. We conducted two experiments. The first one was a 10-fold validation on the training set, and the second one was a test on a separated test set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 326, |
| "end": 333, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Design", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The average accuracy of the 10-fold cross-validation result of each configuration is shown in Table 11 . The 7,690 training data were separated into ten folds, and the system used 90% of the data as the training set and the other 10% as the test set. A SVM classifier was trained in each fold and repeated 10 times. The result shows that, with a higher threshold, 1.5 or 2.0, the accuracy of our system is about 72%.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 94, |
| "end": 102, |
| "text": "Table 11", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Yi-Ching Zeng et al. In the second experiment, we used the 7,690 reviews as a training set and tested the classification on the 1,000 test set, where the number of tests of each class was balanced to 1/3. Note that the actual class of the test was fixed during the test, which corresponds to a threshold 1.039. The classifier was trained with three different class distributions. The confusion matrix of our system is shown in Tables 12 to 14. The precision and the recall of each class are also shown. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "28", |
| "sec_num": null |
| }, |
| { |
| "text": "To compare which features are more important in the classifier, we conducted a series experiments with one less feature each time. The results are shown in Table 15 . We can find that the \"detail\" feature is the most important. Second, third, and fourth are length, star, and unigram. Since detail is a hybrid feature, this result suggests that a hybrid feature works better than the combination of individual ones. Table 15 . Accuracy with all-minus-one features system recall for the three classes are 64%, 77%, and 87%. We also can find a similar result in Table 14 , where the threshold is 2.0. The precision is almost the same, and the recall is slightly different.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 156, |
| "end": 164, |
| "text": "Table 15", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 416, |
| "end": 424, |
| "text": "Table 15", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 560, |
| "end": 568, |
| "text": "Table 14", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Feature Analysis Result", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "From Table 15 , we can find that the \"detail\" feature is the most important. Without it, the accuracy drops from 60.83% to 38.57%. Nevertheless, each feature helps the performance, so no one feature can be omitted. This result also suggests that more features might be necessary to attain higher performance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 5, |
| "end": 13, |
| "text": "Table 15", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Feature Analysis Result", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The paper reports how a system can find helpful online reviews, and the system is tested on a three-class classification problem. The threshold of helpful/unhelpful reviews can be decided according to the amount of data that the users want to prune. The overall accuracy of the three-class problem is about 73%. Helpful negative reviews can be found with 82% precision and 77% recall. Helpful positive reviews can be found with 74% precision and 64% recall. Unhelpful reviews can be filtered out automatically from the consumer reviews with a high recall rate of about 87% with 73% precision. Considering the original data distribution (only 20% as useful), the system performance is quite high. Currently, our system is based on features observed by humans in previous works, and we only implement some of them. In the future, we will try to implement more features and attempt to extract features from the training corpus automatically.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Works", |
| "sec_num": "6." |
| }, |
| { |
| "text": "http://www.csie.ntu.edu.tw/~cjlin/lib", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Yi-Ching Zeng et al.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This study was conducted under the \"Online and Offline Integrated Smart Commerce Platform (1/4)\" of the Institute for Information Industry, which is subsidized by the Ministry of Economic Affairs of the Republic of China. This study was partially supported by Research Grant NSC 102-2221-E-324 -034 from the Ministry of Science and Technology.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| }, |
| { |
| "text": "All-(Detail) 38.569%All-(Compare) 52.152%All-(Pros_cons) 49.727% Table 11 shows that the average accuracy numbers of the three data sets are 60.83%, 72.72%, and 72.82%. We find that setting the threshold to 1.5 is expected to prune 79.6% of data; our system can get 72.72% accuracy on the helpful/unhelpful classification. This is a considerable reduction of human labor to find better mining candidates.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 65, |
| "end": 73, |
| "text": "Table 11", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Features Accuracy", |
| "sec_num": null |
| }, |
| { |
| "text": "From the confusion matrix in Table 13 , we find that choosing the threshold as 1.5 enables our system to classify the three classes with precision 74%, 82%, and 73%; while the", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 29, |
| "end": 37, |
| "text": "Table 13", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion on the Experimental Result", |
| "sec_num": "5.4" |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Is it the Review or the Reviewer? A Multi-Method Approach to Determine the Antecedents of Online Review Helpfulness", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Connors", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mudambi", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Schuff", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 2011 Hawaii International Conference on Systems Sciences (HICSS)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Connors, L., Mudambi, S. M., & Schuff, D. (2011). Is it the Review or the Reviewer? A Multi-Method Approach to Determine the Antecedents of Online Review Helpfulness. In Proceedings of the 2011 Hawaii International Conference on Systems Sciences (HICSS), January.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Mining opinion features in customer reviews", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 19th national conference on Artifical intelligence (AAAI'04", |
| "volume": "", |
| "issue": "", |
| "pages": "755--760", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hu, M., & Liu, B. (2004). Mining opinion features in customer reviews. In Proceedings of the 19th national conference on Artifical intelligence (AAAI'04), Anthony G. Cohn (Ed.). AAAI Press 755-760.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Online Consumer Reviews as a Classification Problem", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Online Consumer Reviews as a Classification Problem", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Mining comparative sentences and relations", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Jindal", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "proceedings of the 21st national conference on Artificial intelligence", |
| "volume": "2", |
| "issue": "", |
| "pages": "1331--1336", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jindal, N., & Liu, B. (2006). Mining comparative sentences and relations. In proceedings of the 21st national conference on Artificial intelligence -Volume 2 (AAAI'06), Anthony Cohn (Ed.), Vol. 2. AAAI Press 1331-1336.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Automatically Assessing Review Helpfulness", |
| "authors": [ |
| { |
| "first": "S.-M", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Pantel", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Chklovski", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Pennacchiotti", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 2006 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "423--430", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kim, S.-M., Pantel, P., Chklovski, T., & Pennacchiotti, M. (2006). Automatically Assessing Review Helpfulness. In Proceedings of the 2006 Conference on Empirical Methods in Natural Language Processing, 423-430.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Assessing The Helpfulness Of Online Product Review: A Progressive Experimental Approach", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of PACIS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li, M., Huang, L., Tan, C., & Wei, K. (2011) Assessing The Helpfulness Of Online Product Review: A Progressive Experimental Approach. In Proceedings of PACIS.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Exploiting Social Context for Review Quality Prediction", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Tsaparas", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ntoulas", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Polanyi", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 19th international conference on World wide web", |
| "volume": "", |
| "issue": "", |
| "pages": "691--700", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lu, Y., Tsaparas, P., Ntoulas, A., & Polanyi, L. (2010). Exploiting Social Context for Review Quality Prediction. In Proceedings of the 19th international conference on World wide web, 691-700.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Review Recommendation: Personalized Prediction of the Quality of Online Reviews", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Moghaddam", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Jamali", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Ester", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 20th ACM international conference on Information and knowledge management", |
| "volume": "", |
| "issue": "", |
| "pages": "2249--2252", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Moghaddam, S., Jamali, M., & Ester, M. (2010). Review Recommendation: Personalized Prediction of the Quality of Online Reviews. In Proceedings of the 20th ACM international conference on Information and knowledge management, 2249-2252.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "What Makes a Helpful Online Review? A Study of Customer Reviews on Amazon", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mudambi", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Schuff", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "MIS Quarterly", |
| "volume": "34", |
| "issue": "1", |
| "pages": "185--200", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mudambi, S. M., & Schuff, D. (2010). What Makes a Helpful Online Review? A Study of Customer Reviews on Amazon.com. MIS Quarterly, 34(1), 185-200.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "How useful are your comments?: analyzing and predicting youtube comments and comment ratings", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Siersdorfer", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Chelaru", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "San Pedro", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 19th international conference on World wide web", |
| "volume": "", |
| "issue": "", |
| "pages": "891--900", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siersdorfer, S., Chelaru, S., & San Pedro, J. (2010). How useful are your comments?: analyzing and predicting youtube comments and comment ratings. In Proceedings of the 19th international conference on World wide web, 891-900.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Automatically Predicting Peer-Review Helpfulness", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Litman", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "502--507", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiong, W., & Litman, D. (2011). Automatically Predicting Peer-Review Helpfulness. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics, 502-507.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Example of review", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF1": { |
| "text": "Stars vs. helpfulness distribution of our data collection. The x-axis is the number of stars of customer reviews; the y-axis is the confidence score LSC.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "TABREF1": { |
| "num": null, |
| "html": null, |
| "text": "", |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "num": null, |
| "html": null, |
| "text": "Modeling the Helpful Opinion Mining of 23 Online Consumer Reviews as a Classification Problem", |
| "content": "<table><tr><td>Product</td><td>Reviews</td><td>Total Reviews Words</td><td>Average Length</td><td>s.d.</td></tr><tr><td>Book</td><td>1,065</td><td>93,497</td><td>87.79</td><td>1.8</td></tr><tr><td>Digital Camera</td><td>1,028</td><td>93,404</td><td>90.85</td><td>2.7</td></tr><tr><td>Computer</td><td>1,067</td><td>83,708</td><td>78.45</td><td>2.1</td></tr><tr><td>Foods & Drink</td><td>1,025</td><td>71,027</td><td>69.29</td><td>1.7</td></tr><tr><td>Movies</td><td>1,097</td><td>94,037</td><td>88.13</td><td>2.5</td></tr><tr><td>Shoes</td><td>1,000</td><td>75,237</td><td>75.23</td><td>1.6</td></tr><tr><td>Toys</td><td>1,100</td><td>85,196</td><td>77.45</td><td>1.7</td></tr><tr><td>Cell Phone</td><td>1,308</td><td>101,957</td><td>77.88</td><td>2.0</td></tr><tr><td>Total / Average</td><td>8,690</td><td>884,964</td><td>80.63</td><td>2.02</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "num": null, |
| "html": null, |
| "text": "", |
| "content": "<table><tr><td>Product</td><td>Average LSC Confidence score</td></tr><tr><td>Book</td><td>1.134</td></tr><tr><td>Digital Camera</td><td>1.373</td></tr><tr><td>Computer</td><td>1.140</td></tr><tr><td>Foods & Drink</td><td>0.932</td></tr><tr><td>Movies</td><td>1.116</td></tr><tr><td>Shoes</td><td>0.808</td></tr><tr><td>Toys</td><td>0.807</td></tr><tr><td>Cell Phone</td><td>1.005</td></tr><tr><td>Total average</td><td>1.039</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "num": null, |
| "html": null, |
| "text": "", |
| "content": "<table><tr><td>Book</td><td>522</td></tr><tr><td>Digital Camera</td><td>698</td></tr><tr><td>Computer</td><td>532</td></tr><tr><td>Foods & Drink</td><td>404</td></tr><tr><td>Movies</td><td>521</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF6": { |
| "num": null, |
| "html": null, |
| "text": "", |
| "content": "<table><tr><td>\"1.5\"</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF7": { |
| "num": null, |
| "html": null, |
| "text": "", |
| "content": "<table><tr><td>Data set</td><td>Average Accuracy</td></tr><tr><td>LSC threshold 1.039</td><td>60.83%</td></tr><tr><td>LSC threshold 1.5</td><td>72.72%</td></tr><tr><td>LSC threshold 2.0</td><td>72.82%</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF8": { |
| "num": null, |
| "html": null, |
| "text": "", |
| "content": "<table><tr><td>Predicted</td><td colspan=\"3\">Actual Class 1 Class 2 Class 3</td><td>Total</td><td>Precision</td></tr><tr><td>Class 1</td><td>172</td><td>75</td><td>46</td><td>293</td><td>59%</td></tr><tr><td>Class 2</td><td>80</td><td>196</td><td>24</td><td>300</td><td>65%</td></tr><tr><td>Class 3</td><td>81</td><td>62</td><td>264</td><td>407</td><td>65%</td></tr><tr><td>Total</td><td>333</td><td>333</td><td>334</td><td>1,000</td><td/></tr><tr><td>Recall</td><td>52%</td><td>59%</td><td>79%</td><td/><td/></tr><tr><td>Predicted</td><td colspan=\"3\">Actual Class 1 Class 2 Class 3</td><td>Total</td><td>Precision</td></tr><tr><td>Class 1</td><td>213</td><td>47</td><td>28</td><td>288</td><td>74%</td></tr><tr><td>Class 2</td><td>42</td><td>257</td><td>14</td><td>313</td><td>82%</td></tr><tr><td>Class 3</td><td>78</td><td>29</td><td>292</td><td>399</td><td>73%</td></tr><tr><td>Total</td><td>333</td><td>333</td><td>334</td><td>1,000</td><td/></tr><tr><td>Recall</td><td>64%</td><td>77%</td><td>87%</td><td/><td/></tr></table>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |