| { |
| "paper_id": "P09-1027", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:54:34.592680Z" |
| }, |
| "title": "Co-Training for Cross-Lingual Sentiment Classification", |
| "authors": [ |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Key Laboratory of Computational Lin-guistics", |
| "institution": "MOE Peking University", |
| "location": { |
| "postCode": "100871", |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "wanxiaojun@icst.pku.edu.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The lack of Chinese sentiment corpora limits the research progress on Chinese sentiment classification. However, there are many freely available English sentiment corpora on the Web. This paper focuses on the problem of cross-lingual sentiment classification, which leverages an available English corpus for Chinese sentiment classification by using the English corpus as training data. Machine translation services are used for eliminating the language gap between the training set and test set, and English features and Chinese features are considered as two independent views of the classification problem. We propose a cotraining approach to making use of unlabeled Chinese data. Experimental results show the effectiveness of the proposed approach, which can outperform the standard inductive classifiers and the transductive classifiers.", |
| "pdf_parse": { |
| "paper_id": "P09-1027", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The lack of Chinese sentiment corpora limits the research progress on Chinese sentiment classification. However, there are many freely available English sentiment corpora on the Web. This paper focuses on the problem of cross-lingual sentiment classification, which leverages an available English corpus for Chinese sentiment classification by using the English corpus as training data. Machine translation services are used for eliminating the language gap between the training set and test set, and English features and Chinese features are considered as two independent views of the classification problem. We propose a cotraining approach to making use of unlabeled Chinese data. Experimental results show the effectiveness of the proposed approach, which can outperform the standard inductive classifiers and the transductive classifiers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Sentiment classification is the task of identifying the sentiment polarity of a given text. The sentiment polarity is usually positive or negative and the text genre is usually product review. In recent years, sentiment classification has drawn much attention in the NLP field and it has many useful applications, such as opinion mining and summarization Ku et al., 2006; Titov and McDonald, 2008) .", |
| "cite_spans": [ |
| { |
| "start": 355, |
| "end": 371, |
| "text": "Ku et al., 2006;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 372, |
| "end": 397, |
| "text": "Titov and McDonald, 2008)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To date, a variety of corpus-based methods have been developed for sentiment classification. The methods usually rely heavily on an annotated corpus for training the sentiment classifier. The sentiment corpora are considered as the most valuable resources for the sentiment classification task. However, such resources in different languages are very imbalanced. Because most previous work focuses on English sentiment classification, many annotated corpora for English sentiment classification are freely available on the Web. However, the annotated corpora for Chinese sentiment classification are scarce and it is not a trivial task to manually label reliable Chinese sentiment corpora. The challenge before us is how to leverage rich English corpora for Chinese sentiment classification. In this study, we focus on the problem of cross-lingual sentiment classification, which leverages only English training data for supervised sentiment classification of Chinese product reviews, without using any Chinese resources. Note that the above problem is not only defined for Chinese sentiment classification, but also for various sentiment analysis tasks in other different languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Though pilot studies have been performed to make use of English corpora for subjectivity classification in other languages (Mihalcea et al., 2007; Banea et al., 2008) , the methods are very straightforward by directly employing an inductive classifier (e.g. SVM, NB), and the classification performance is far from satisfactory because of the language gap between the original language and the translated language.", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 146, |
| "text": "(Mihalcea et al., 2007;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 147, |
| "end": 166, |
| "text": "Banea et al., 2008)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this study, we propose a co-training approach to improving the classification accuracy of polarity identification of Chinese product reviews. Unlabeled Chinese reviews can be fully leveraged in the proposed approach. First, machine translation services are used to translate English training reviews into Chinese reviews and also translate Chinese test reviews and additional unlabeled reviews into English reviews. Then, we can view the classification problem in two independent views: Chinese view with only Chinese features and English view with only English features. We then use the co-training approach to making full use of the two redundant views of features. The SVM classifier is adopted as the basic classifier in the proposed approach. Experimental results show that the proposed approach can outperform the baseline inductive classifiers and the more advanced transductive classifiers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of this paper is organized as follows: Section 2 introduces related work. The proposed co-training approach is described in detail in Section 3. Section 4 shows the experimental results. Lastly we conclude this paper in Section 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Sentiment classification can be performed on words, sentences or documents. In this paper we focus on document sentiment classification. The methods for document sentiment classification can be generally categorized into lexicon-based and corpus-based.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Classification", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Lexicon-based methods usually involve deriving a sentiment measure for text based on sentiment lexicons. Turney (2002) predicates the sentiment orientation of a review by the average semantic orientation of the phrases in the review that contain adjectives or adverbs, which is denoted as the semantic oriented method. Kim and Hovy (2004) build three models to assign a sentiment category to a given sentence by combining the individual sentiments of sentimentbearing words. Hiroshi et al. (2004) use the technique of deep language analysis for machine translation to extract sentiment units in text documents. Kennedy and Inkpen (2006) determine the sentiment of a customer review by counting positive and negative terms and taking into account contextual valence shifters, such as negations and intensifiers. Devitt and Ahmad (2007) explore a computable metric of positive or negative polarity in financial news text.", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 118, |
| "text": "Turney (2002)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 319, |
| "end": 338, |
| "text": "Kim and Hovy (2004)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 475, |
| "end": 496, |
| "text": "Hiroshi et al. (2004)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 611, |
| "end": 636, |
| "text": "Kennedy and Inkpen (2006)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 811, |
| "end": 834, |
| "text": "Devitt and Ahmad (2007)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Classification", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Corpus-based methods usually consider the sentiment analysis task as a classification task and they use a labeled corpus to train a sentiment classifier. Since the work of Pang et al. (2002) , various classification models and linguistic features have been proposed to improve the classification performance (Pang and Lee, 2004; Mullen and Collier, 2004; Wilson et al., 2005; Read, 2005) . Most recently, McDonald et al. (2007) investigate a structured model for jointly classifying the sentiment of text at varying levels of granularity. Blitzer et al. (2007) investigate domain adaptation for sentiment classifiers, focusing on online reviews for different types of products. Andreevskaia and Bergler (2008) present a new system consisting of the ensemble of a corpus-based classifier and a lexicon-based classifier with precision-based vote weighting.", |
| "cite_spans": [ |
| { |
| "start": 172, |
| "end": 190, |
| "text": "Pang et al. (2002)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 308, |
| "end": 328, |
| "text": "(Pang and Lee, 2004;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 329, |
| "end": 354, |
| "text": "Mullen and Collier, 2004;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 355, |
| "end": 375, |
| "text": "Wilson et al., 2005;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 376, |
| "end": 387, |
| "text": "Read, 2005)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 390, |
| "end": 427, |
| "text": "Most recently, McDonald et al. (2007)", |
| "ref_id": null |
| }, |
| { |
| "start": 539, |
| "end": 560, |
| "text": "Blitzer et al. (2007)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 678, |
| "end": 709, |
| "text": "Andreevskaia and Bergler (2008)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Classification", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Chinese sentiment analysis has also been studied (Tsou et al., 2005; Ye et al., 2006; Li and Sun, 2007) and most such work uses similar lexicon-based or corpus-based methods for Chinese sentiment classification.", |
| "cite_spans": [ |
| { |
| "start": 49, |
| "end": 68, |
| "text": "(Tsou et al., 2005;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 69, |
| "end": 85, |
| "text": "Ye et al., 2006;", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 86, |
| "end": 103, |
| "text": "Li and Sun, 2007)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Classification", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "To date, several pilot studies have been performed to leverage rich English resources for sentiment analysis in other languages. Standard Na\u00efve Bayes and SVM classifiers have been applied for subjectivity classification in Romanian (Mihalcea et al., 2007; Banea et al., 2008) , and the results show that automatic translation is a viable alternative for the construction of resources and tools for subjectivity analysis in a new target language. Wan (2008) focuses on leveraging both Chinese and English lexicons to improve Chinese sentiment analysis by using lexicon-based methods. In this study, we focus on improving the corpus-based method for crosslingual sentiment classification of Chinese product reviews by developing novel approaches.", |
| "cite_spans": [ |
| { |
| "start": 232, |
| "end": 255, |
| "text": "(Mihalcea et al., 2007;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 256, |
| "end": 275, |
| "text": "Banea et al., 2008)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 446, |
| "end": 456, |
| "text": "Wan (2008)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Classification", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Cross-domain text classification can be considered as a more general task than cross-lingual sentiment classification. In the problem of crossdomain text classification, the labeled and unlabeled data come from different domains, and their underlying distributions are often different from each other, which violates the basic assumption of traditional classification learning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Domain Text Classification", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "To date, many semi-supervised learning algorithms have been developed for addressing the cross-domain text classification problem by transferring knowledge across domains, including Transductive SVM (Joachims, 1999) , EM (Nigam et al., 2000) , EM-based Na\u00efve Bayes classifier (Dai et al., 2007a) , Topic-bridged PLSA , Co-Clustering based classification (Dai et al., 2007b) , two-stage approach (Jiang and Zhai, 2007) . Daum\u00e9III and Marcu (2006) introduce a statistical formulation of this problem in terms of a simple mixture model.", |
| "cite_spans": [ |
| { |
| "start": 199, |
| "end": 215, |
| "text": "(Joachims, 1999)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 221, |
| "end": 241, |
| "text": "(Nigam et al., 2000)", |
| "ref_id": null |
| }, |
| { |
| "start": 276, |
| "end": 295, |
| "text": "(Dai et al., 2007a)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 354, |
| "end": 373, |
| "text": "(Dai et al., 2007b)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 395, |
| "end": 417, |
| "text": "(Jiang and Zhai, 2007)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 420, |
| "end": 445, |
| "text": "Daum\u00e9III and Marcu (2006)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Domain Text Classification", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In particular, several previous studies focus on the problem of cross-lingual text classification, which can be considered as a special case of general cross-domain text classification. Bel et al. (2003) present practical and cost-effective solutions. A few novel models have been proposed to address the problem, e.g. the EM-based algorithm (Rigutini et al., 2005) , the information bottleneck approach (Ling et al., 2008) , the multilingual domain models (Gliozzo and Strapparava, 2005) , etc. To the best of our knowledge, cotraining has not yet been investigated for crossdomain or cross-lingual text classification.", |
| "cite_spans": [ |
| { |
| "start": 186, |
| "end": 203, |
| "text": "Bel et al. (2003)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 342, |
| "end": 365, |
| "text": "(Rigutini et al., 2005)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 404, |
| "end": 423, |
| "text": "(Ling et al., 2008)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 457, |
| "end": 488, |
| "text": "(Gliozzo and Strapparava, 2005)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Domain Text Classification", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The purpose of our approach is to make use of the annotated English corpus for sentiment polarity identification of Chinese reviews in a supervised framework, without using any Chinese resources. Given the labeled English reviews and unlabeled Chinese reviews, two straightforward methods for addressing the problem are as follows: 1) We first learn a classifier based on the labeled English reviews, and then translate Chinese reviews into English reviews. Lastly, we use the classifier to classify the translated English reviews.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "2) We first translate the labeled English reviews into Chinese reviews, and then learn a classifier based on the translated Chinese reviews with labels. Lastly, we use the classifier to classify the unlabeled Chinese reviews.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The above two methods have been used in (Banea et al., 2008) for Romanian subjectivity analysis, but the experimental results are not very promising. As shown in our experiments, the above two methods do not perform well for Chinese sentiment classification, either, because the underlying distribution between the original language and the translated language are different.", |
| "cite_spans": [ |
| { |
| "start": 40, |
| "end": 60, |
| "text": "(Banea et al., 2008)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In order to address the above problem, we propose to use the co-training approach to make use of some amounts of unlabeled Chinese reviews to improve the classification accuracy. The co-training approach can make full use of both the English features and the Chinese features in a unified framework. The framework of the proposed approach is illustrated in Figure 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 357, |
| "end": 365, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The framework consists of a training phase and a classification phase. In the training phase, the input is the labeled English reviews and some amounts of unlabeled Chinese reviews 1 . The labeled English reviews are translated into labeled Chinese reviews, and the unlabeled Chinese reviews are translated into unlabeled English reviews, by using machine translation services. Therefore, each review is associated with an English version and a Chinese version. The English features and the Chinese features for each review are considered two independent and redundant views of the review. The co-training algorithm is then applied to learn two classifiers 1 The unlabeled Chinese reviews used for co-training do not include the unlabeled Chinese reviews for testing, i.e., the Chinese reviews for testing are blind to the training phase. and finally the two classifiers are combined into a single sentiment classifier. In the classification phase, each unlabeled Chinese review for testing is first translated into English review, and then the learned classifier is applied to classify the review into either positive or negative.", |
| "cite_spans": [ |
| { |
| "start": 657, |
| "end": 658, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The steps of review translation and the cotraining algorithm are described in details in the next sections, respectively. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In order to overcome the language gap, we must translate one language into another language. Fortunately, machine translation techniques have been well developed in the NLP field, though the translation performance is far from satisfactory. A few commercial machine translation services can be publicly accessed, e.g. In this study, we adopt Google Translate for both English-to-Chinese Translation and Chinese-to-English Translation, because it is one of the state-of-the-art commercial machine translation systems used today. Google Translate applies statistical learning techniques to build a translation model based on both monolingual text in the target language and aligned text consisting of examples of human translations between the languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Review Translation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The co-training algorithm (Blum and Mitchell, 1998 ) is a typical bootstrapping method, which starts with a set of labeled data, and increase the amount of annotated data using some amounts of unlabeled data in an incremental way. One important aspect of co-training is that two conditional independent views are required for cotraining to work, but the independence assumption can be relaxed. Till now, co-training has been successfully applied to statistical parsing (Sarkar, 2001) , reference resolution (Ng and Cardie, 2003) , part of speech tagging (Clark et al., 2003) , word sense disambiguation (Mihalcea, 2004) and email classification (Kiritchenko and Matwin, 2001 ).", |
| "cite_spans": [ |
| { |
| "start": 26, |
| "end": 50, |
| "text": "(Blum and Mitchell, 1998", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 469, |
| "end": 483, |
| "text": "(Sarkar, 2001)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 507, |
| "end": 528, |
| "text": "(Ng and Cardie, 2003)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 554, |
| "end": 574, |
| "text": "(Clark et al., 2003)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 603, |
| "end": 619, |
| "text": "(Mihalcea, 2004)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 645, |
| "end": 674, |
| "text": "(Kiritchenko and Matwin, 2001", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Co-Training Algorithm", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In the context of cross-lingual sentiment classification, each labeled English review or unlabeled Chinese review has two views of features: English features and Chinese features. Here, a review is used to indicate both its Chinese version and its English version, until stated otherwise. The co-training algorithm is illustrated in Figure 2 . In the algorithm, the class distribution in the labeled data is maintained by balancing the parameter values of p and n at each iteration.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 333, |
| "end": 341, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Co-Training Algorithm", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The intuition of the co-training algorithm is that if one classifier can confidently predict the class of an example, which is very similar to some of labeled ones, it can provide one more training example for the other classifier. But, of course, if this example happens to be easy to be classified by the first classifier, it does not mean that this example will be easy to be classified by the second classifier, so the second classifier will get useful information to improve itself and vice versa (Kiritchenko and Matwin, 2001 ).", |
| "cite_spans": [ |
| { |
| "start": 502, |
| "end": 531, |
| "text": "(Kiritchenko and Matwin, 2001", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Co-Training Algorithm", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In the co-training algorithm, a basic classification algorithm is required to construct C en and C cn . Typical text classifiers include Support Vector Machine (SVM), Na\u00efve Bayes (NB), Maximum Entropy (ME), K-Nearest Neighbor (KNN), etc. In this study, we adopt the widely-used SVM classifier (Joachims, 2002) . Viewing input data as two sets of vectors in a feature space, SVM constructs a separating hyperplane in the space by maximizing the margin between the two data sets. The English or Chinese features used in this study include both unigrams and bigrams 5 and the feature weight is simply set to term frequency 6 . Feature selection methods (e.g. Document Frequency (DF), Information Gain (IG), and Mutual Information (MI)) can be used for dimension reduction. But we use all the features in the experiments for comparative analysis, because there is no significant performance improvement after applying the feature selection techniques in our empirical study. The output value of the SVM classifier for a review indicates the confidence level of the review's classification. Usually, the sentiment polarity of a review is indicated by the sign of the prediction value.", |
| "cite_spans": [ |
| { |
| "start": 293, |
| "end": 309, |
| "text": "(Joachims, 2002)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Co-Training Algorithm", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Given:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Co-Training Algorithm", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "-F en and F cn are redundantly sufficient sets of features, where F en represents the English features, F cn represents the Chinese features; -L is a set of labeled training reviews; -U is a set of unlabeled reviews; Loop for I iterations:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Co-Training Algorithm", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "1. Learn the first classifier C en from L based on F en ; 2. Use C en to label reviews from U based on F en ; 3. Choose p positive and n negative the most confidently predicted reviews E en from U; 4. Learn the second classifier C cn from L based on F cn ; 5. Use C cn to label reviews from U based on F cn ; 6. Choose p positive and n negative the most confidently predicted reviews E cn from U; 7. Removes reviews E en \u222aE cn from U 7 ; 8. Add reviews E en \u222aE cn with the corresponding labels to L; Figure 2 . The co-training algorithm", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 500, |
| "end": 508, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Co-Training Algorithm", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In the training phase, the co-training algorithm learns two separate classifiers: C en and C cn . Therefore, in the classification phase, we can obtain two prediction values for a test review. We normalize the prediction values into [-1, 1] by dividing the maximum absolute value. Finally, the average of the normalized values is used as the overall prediction value of the review.", |
| "cite_spans": [ |
| { |
| "start": 233, |
| "end": 240, |
| "text": "[-1, 1]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Co-Training Algorithm", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The following three datasets were collected and used in the experiments:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data set", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "Test Set (Labeled Chinese Reviews): In order to assess the performance of the proposed approach, we collected and labeled 886 product reviews (451 positive reviews + 435 negative reviews) from a popular Chinese IT product web site-IT168 8 . The reviews focused on such products as mp3 players, mobile phones, digital camera and laptop computers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data set", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "Training Set (Labeled English Reviews): There are many labeled English corpora available on the Web and we used the corpus constructed for multi-domain sentiment classification (Blitzer et al., 2007) 9 , because the corpus was large-scale and it was within similar domains as the test set. The dataset consisted of 8000 Amazon product reviews (4000 positive reviews + 4000 negative reviews) for four different product types: books, DVDs, electronics and kitchen appliances.", |
| "cite_spans": [ |
| { |
| "start": 177, |
| "end": 199, |
| "text": "(Blitzer et al., 2007)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data set", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "Unlabeled Set (Unlabeled Chinese Reviews): We downloaded additional 1000 Chinese product reviews from IT168 and used the reviews as the unlabeled set. Therefore, the unlabeled set and the test set were in the same domain and had similar underlying feature distributions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data set", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "Each Chinese review was translated into English review, and each English review was translated into Chinese review. Therefore, each review has two independent views: English view and Chinese view. A review is represented by both its English view and its Chinese view.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data set", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "Note that the training set and the unlabeled set are used in the training phase, while the test set is blind to the training phase.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data set", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "We used the standard precision, recall and Fmeasure to measure the performance of positive and negative class, respectively, and used the accuracy metric to measure the overall performance of the system. The metrics are defined the same as in general text categorization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metric", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "In the experiments, the proposed co-training approach (CoTrain) is compared with the following baseline methods: SVM(CN): This method applies the inductive SVM with only Chinese features for sentiment classification in the Chinese view. Only Englishto-Chinese translation is needed. And the unlabeled set is not used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Methods", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "This method applies the inductive SVM with only English features for sentiment classification in the English view. Only Chineseto-English translation is needed. And the unlabeled set is not used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SVM(EN):", |
| "sec_num": null |
| }, |
| { |
| "text": "This method applies the inductive SVM with both English and Chinese features for sentiment classification in the two views. Both English-to-Chinese and Chinese-to-English translations are required. And the unlabeled set is not used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SVM(ENCN1):", |
| "sec_num": null |
| }, |
| { |
| "text": "This method combines the results of SVM(EN) and SVM(CN) by averaging the prediction values in the same way with the co-training approach.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SVM(ENCN2):", |
| "sec_num": null |
| }, |
| { |
| "text": "This method applies the transductive SVM with only Chinese features for sentiment classification in the Chinese view. Only English-to-Chinese translation is needed. And the unlabeled set is used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TSVM(CN):", |
| "sec_num": null |
| }, |
| { |
| "text": "This method applies the transductive SVM with only English features for sentiment classification in the English view. Only Chinese-to-English translation is needed. And the unlabeled set is used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TSVM(EN):", |
| "sec_num": null |
| }, |
| { |
| "text": "This method applies the transductive SVM with both English and Chinese features for sentiment classification in the two views. Both English-to-Chinese and Chinese-to-English translations are required. And the unlabeled set is used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TSVM(ENCN1):", |
| "sec_num": null |
| }, |
| { |
| "text": "This method combines the results of TSVM(EN) and TSVM(CN) by averaging the prediction values.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TSVM(ENCN2):", |
| "sec_num": null |
| }, |
| { |
| "text": "Note that the first four methods are straightforward methods used in previous work, while the latter four methods are strong baselines because the transductive SVM has been widely used for improving the classification accuracy by leveraging additional unlabeled examples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TSVM(ENCN2):", |
| "sec_num": null |
| }, |
| { |
| "text": "In the experiments, we first compare the proposed co-training approach (I=40 and p=n=5) with the eight baseline methods. The three parameters in the co-training approach are empirically set by considering the total number (i.e. 1000) of the unlabeled Chinese reviews. In our empirical study, the proposed approach can perform well with a wide range of parameter values, which will be shown later. Table 1 shows the comparison results.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 397, |
| "end": 404, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Method Comparison", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "Seen from the table, the proposed co-training approach outperforms all eight baseline methods over all metrics. Among the eight baselines, the best one is TSVM(ENCN2), which combines the results of two transductive SVM classifiers. Actually, TSVM(ENCN2) is similar to CoTrain because CoTrain also combines the results of two classifiers in the same way. However, the co-training approach can train two more effective classifiers, and the accuracy values of the component English and Chinese classifiers are 0.775 and 0.790, respectively, which are higher than the corresponding TSVM classifiers. Overall, the use of transductive learning and the combination of English and Chinese views are beneficial to the final classification accuracy, and the cotraining approach is more suitable for making use of the unlabeled Chinese reviews than the transductive SVM. Figure 3 shows the accuracy curve of the cotraining approach (Combined Classifier) with different numbers of iterations. The iteration number I is varied from 1 to 80. When I is set to 1, the co-training approach is degenerated into SVM(ENCN2). The accuracy curves of the component English and Chinese classifiers learned in the co-training approach are also shown in the figure. We can see that the proposed co-training approach can outperform the best baseline-TSVM(ENCN2) after 20 iterations. After a large number of iterations, the performance of the cotraining approach decreases because noisy training examples may be selected from the remaining unlabeled set. Finally, the performance of the approach does not change any more, because the algorithm runs out of all possible examples in the unlabeled set. Fortunately, the proposed approach performs well with a wide range of iteration numbers. We can also see that the two component classifier has similar trends with the cotraining approach. It is encouraging that the component Chinese classifier alone can perform better than the best baseline when the iteration number is set between 40 and 70. Figure 4 shows how the growth size at each iteration (p positive and n negative confident examples) influences the accuracy of the proposed co-training approach. In the above experiments, we set p=n, which is considered as a balanced growth. When p differs very much from n, the growth is considered as an imbalanced growth. Balanced growth of (2, 2), (5, 5), (10, 10) and (15, 15) examples and imbalanced growth of (1, 5), (5, 1) examples are compared in the figure. We can see that the performance of the cotraining approach with the balanced growth can be improved after a few iterations. And the performance of the co-training approach with large p and n will more quickly become unchanged, because the approach runs out of the limited examples in the unlabeled set more quickly. However, the performance of the co-training approaches with the two imbalanced growths is always going down quite rapidly, because the labeled unbalanced examples hurt the performance badly at each iteration. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 860, |
| "end": 868, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 2016, |
| "end": 2024, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Method Comparison", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "In the above experiments, all features (unigram + bigram) are used. As mentioned earlier, feature selection techniques are widely used for dimension reduction. In this section, we further conduct experiments to investigate the influences of feature selection techniques on the classification results. We use the simple but effective document frequency (DF) for feature selection. Figures 6 show the comparison results of different feature sizes for the co-training approach and two strong baselines. The feature size is measured as the proportion of the selected features against the total features (i.e. 100%).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 380, |
| "end": 389, |
| "text": "Figures 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Influences of Feature Selection", |
| "sec_num": "4.2.4" |
| }, |
| { |
| "text": "We can see from the figure that the feature selection technique has very slight influences on the classification accuracy of the methods. It can be seen that the co-training approach can always outperform the two baselines with different feature sizes. The results further demonstrate the effectiveness and robustness of the proposed cotraining approach.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Influences of Feature Selection", |
| "sec_num": "4.2.4" |
| }, |
| { |
| "text": "In this paper, we propose to use the co-training approach to address the problem of cross-lingual sentiment classification. The experimental results show the effectiveness of the proposed approach.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In future work, we will improve the sentiment classification accuracy in the following two ways:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "1) The smoothed co-training approach used in (Mihalcea, 2004) will be adopted for sentiment classification. The approach has the effect of \"smoothing\" the learning curves. During the bootstrapping process of smoothed co-training, the classifier at each iteration is replaced with a majority voting scheme applied to all classifiers constructed at previous iterations.", |
| "cite_spans": [ |
| { |
| "start": 45, |
| "end": 61, |
| "text": "(Mihalcea, 2004)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "2) The feature distributions of the translated text and the natural text in the same language are still different due to the inaccuracy of the machine translation service. We will employ the structural correspondence learning (SCL) domain adaption algorithm used in (Blitzer et al., 2007) for linking the translated text and the natural text.", |
| "cite_spans": [ |
| { |
| "start": 266, |
| "end": 288, |
| "text": "(Blitzer et al., 2007)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "http://translate.google.com/translate_t 3 http://babelfish.yahoo.com/translate_txt 4 http://www.windowslivetranslator.com/ Unlabeled Chinese Reviews Labeled English Reviews", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For Chinese text, a unigram refers to a Chinese word and a bigram refers to two adjacent Chinese words.6 Term frequency performs better than TFIDF by our empirical analysis.7 Note that the examples with conflicting labels are not included in E en \u222aE cn In other words, if an example is in both E en and E cn , but the labels for the example is conflicting, the example will be excluded from E en \u222aE cn.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.it168.com 9 http://www.cis.upenn.edu/~mdredze/datasets/sentiment/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by NSFC (60873155), RFDP (20070001059), Beijing Nova Program (2008B03), National High-tech R&D Program (2008AA01Z421) and NCET (NCET-08-0006). We also thank the anonymous reviewers for their useful comments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "When specialists and generalists work together: overcoming domain dependence in sentiment tagging", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Andreevskaia", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Bergler", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL-08", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Andreevskaia and S. Bergler. 2008. When special- ists and generalists work together: overcoming domain dependence in sentiment tagging. In Pro- ceedings of ACL-08: HLT.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Multilingual subjectivity analysis using machine translation", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Banea", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Hassan", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "C. Banea, R. Mihalcea, J. Wiebe and S. Hassan. 2008. Multilingual subjectivity analysis using machine translation. In Proceedings of EMNLP-2008.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Cross-lingual text categorization", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Bel", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "H A" |
| ], |
| "last": "Koster", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Villegas", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of ECDL-03", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "N. Bel, C. H. A. Koster, and M. Villegas. 2003. Cross-lingual text categorization. In Proceedings of ECDL-03.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Biographies, bollywood, boom-boxes and blenders: domain adaptation for sentiment classification", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Blitzer", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of ACL-07", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Blitzer, M. Dredze and F. Pereira. 2007. Biogra- phies, bollywood, boom-boxes and blenders: do- main adaptation for sentiment classification. In Proceedings of ACL-07.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Combining labeled and unlabeled data with cotraining", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Blum", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of COLT-98", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Blum and T. Mitchell. 1998. Combining labeled and unlabeled data with cotraining. In Proceedings of COLT-98.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Ensemble methods for unsupervised WSD", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Brody", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of COLING-ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Brody, R. Navigli and M. Lapata. 2006. Ensemble methods for unsupervised WSD. In Proceedings of COLING-ACL-2006.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Bootstrapping POS taggers using unlabelled data", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "R" |
| ], |
| "last": "Curran", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Osborne", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of CoNLL-2003", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Clark, J. R. Curran, and M. Osborne. 2003. Boot- strapping POS taggers using unlabelled data. In Proceedings of CoNLL-2003.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Transferring Na\u00efve Bayes Classifiers for text classification", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "G.-R", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of AAAI-07", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "W. Dai, G.-R. Xue, Q. Yang, Y. Yu. 2007a. Transfer- ring Na\u00efve Bayes Classifiers for text classification. In Proceedings of AAAI-07.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Coclustering based classification for out-of-domain documents", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "G.-R", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of KDD-07", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "W. Dai, G.-R. Xue, Q. Yang, Y. Yu. 2007b. Co- clustering based classification for out-of-domain documents. In Proceedings of KDD-07.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Domain adaptation for statistical classifiers", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Daum\u00e9iii", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Journal of Artificial Intelligence Research", |
| "volume": "26", |
| "issue": "", |
| "pages": "101--126", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. Daum\u00e9III and D. Marcu. 2006. Domain adaptation for statistical classifiers. Journal of Artificial Intel- ligence Research, 26:101-126.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Sentiment polarity identification in financial news: a cohesion-based approach", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Devitt", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Ahmad", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of ACL2007", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Devitt and K. Ahmad. 2007. Sentiment polarity identification in financial news: a cohesion-based approach. In Proceedings of ACL2007.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Machine learning research: four current directions. AI Magazine", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "G" |
| ], |
| "last": "Dietterich", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "", |
| "volume": "18", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. G. Dietterich. 1997. Machine learning research: four current directions. AI Magazine, 18(4), 1997.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Cross language text categorization by acquiring multilingual domain models from comparable corpora", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Gliozzo", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Strapparava", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the ACL Workshop on Building and Using Parallel Texts", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Gliozzo and C. Strapparava. 2005. Cross language text categorization by acquiring multilingual do- main models from comparable corpora. In Pro- ceedings of the ACL Workshop on Building and Using Parallel Texts.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Deeper sentiment analysis using machine translation technology", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Hiroshi", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Tetsuya", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Hideo", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of COLING-04", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K. Hiroshi, N. Tetsuya and W. Hideo. 2004. Deeper sentiment analysis using machine translation tech- nology. In Proceedings of COLING-04.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A two-stage approach to domain adaptation for statistical classifiers", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Zhai", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of CIKM-07", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Jiang and C. Zhai. 2007. A two-stage approach to domain adaptation for statistical classifiers. In Pro- ceedings of CIKM-07.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Transductive inference for text classification using support vector machines", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proceedings of ICML-99", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Joachims. 1999. Transductive inference for text classification using support vector machines. In Proceedings of ICML-99.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Learning to classify text using support vector machines", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Joachims. 2002. Learning to classify text using support vector machines. Dissertation, Kluwer, 2002.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Sentiment classification of movie reviews using contextual valence shifters", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Kennedy", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Computational Intelligence", |
| "volume": "22", |
| "issue": "2", |
| "pages": "110--125", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Kennedy and D. Inkpen. 2006. Sentiment classifi- cation of movie reviews using contextual valence shifters. Computational Intelligence, 22(2):110- 125.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Determining the sentiment of opinions", |
| "authors": [ |
| { |
| "first": "S.-M", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of COLING-04", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S.-M. Kim and E. Hovy. 2004. Determining the sen- timent of opinions. In Proceedings of COLING-04.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Email classification with co-training", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Matwin", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the 2001 Conference of the Centre for Advanced Studies on Collaborative Research", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Kiritchenko and S. Matwin. 2001. Email classifica- tion with co-training. In Proceedings of the 2001 Conference of the Centre for Advanced Studies on Collaborative Research.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Opinion extraction, summarization and tracking in news and blog corpora", |
| "authors": [ |
| { |
| "first": "L.-W", |
| "middle": [], |
| "last": "Ku", |
| "suffix": "" |
| }, |
| { |
| "first": "Y.-T", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "H.-H", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of AAAI-2006", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "L.-W. Ku, Y.-T. Liang and H.-H. Chen. 2006. Opin- ion extraction, summarization and tracking in news and blog corpora. In Proceedings of AAAI-2006.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Experimental study on sentiment classification of Chinese review using machine learning techniques", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceeding of IEEE-NLPKE-07", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Li and M. Sun. 2007. Experimental study on senti- ment classification of Chinese review using ma- chine learning techniques. In Proceeding of IEEE- NLPKE-07.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Can Chinese Web pages be classified with English data source?", |
| "authors": [ |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "G.-R", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of WWW-08", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "X. Ling, W. Dai, Y. Jiang, G.-R. Xue, Q. Yang, and Y. Yu. 2008. Can Chinese Web pages be classified with English data source? In Proceedings of WWW-08.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Opinion observer: Analyzing and comparing opinions on the web", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of WWW-2005", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "B. Liu, M. Hu and J. Cheng. 2005. Opinion observer: Analyzing and comparing opinions on the web. In Proceedings of WWW-2005.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Structured models for fine-to-coarse sentiment analysis", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Hannan", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Neylon", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Wells", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Reynar", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of ACL-07", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. McDonald, K. Hannan, T. Neylon, M. Wells and J. Reynar. 2007. Structured models for fine-to-coarse sentiment analysis. In Proceedings of ACL-07.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Co-training and self-training for word sense disambiguation", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of CONLL-04", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Mihalcea. 2004. Co-training and self-training for word sense disambiguation. In Proceedings of CONLL-04.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Learning multilingual subjective language via cross-lingual projections", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Banea", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of ACL-2007", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Mihalcea, C. Banea and J. Wiebe. 2007. Learning multilingual subjective language via cross-lingual projections. In Proceedings of ACL-2007.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Sentiment analysis using support vector machines with diverse information sources", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Mullen", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Collier", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of EMNLP-04", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Mullen and N. Collier. 2004. Sentiment analysis using support vector machines with diverse infor- mation sources. In Proceedings of EMNLP-04.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Weakly supervised natural language learning without redundant views", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of HLT-NAACL-03", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "V. Ng and C. Cardie. 2003. Weakly supervised natu- ral language learning without redundant views. In Proceedings of HLT-NAACL-03.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Text Classification from Labeled and Unlabeled Documents using EM", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Machine Learning", |
| "volume": "39", |
| "issue": "", |
| "pages": "103--134", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mitchell. 2000. Text Classification from Labeled and Unlabeled Documents using EM. Machine Learning, 39(2-3):103-134.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Thumbs up? sentiment classification using machine learning techniques", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Vaithyanathan", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of EMNLP-02", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "B. Pang, L. Lee and S. Vaithyanathan. 2002. Thumbs up? sentiment classification using machine learn- ing techniques. In Proceedings of EMNLP-02.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "A sentimental education: sentiment analysis using subjectivity summarization based on minimum cuts", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of ACL-04", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "B. Pang and L. Lee. 2004. A sentimental education: sentiment analysis using subjectivity summariza- tion based on minimum cuts. In Proceedings of ACL-04.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Using emoticons to reduce dependency in machine learning techniques for sentiment classification", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Read", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of ACL-05", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Read. 2005. Using emoticons to reduce dependency in machine learning techniques for sentiment clas- sification. In Proceedings of ACL-05.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "An EM based training algorithm for cross-language text categorization", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Rigutini", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Maggini", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of WI-05", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "L. Rigutini, M. Maggini and B. Liu. 2005. An EM based training algorithm for cross-language text categorization. In Proceedings of WI-05.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Applying cotraining methods to statistical parsing", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Sarkar", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of NAACL-2001", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Sarkar. 2001. Applying cotraining methods to sta- tistical parsing. In Proceedings of NAACL-2001.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "A joint model of text and aspect ratings for sentiment summarization", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Titov", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL-08", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "I. Titov and R. McDonald. 2008. A joint model of text and aspect ratings for sentiment summarization. In Proceedings of ACL-08:HLT.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Polarity classification of celebrity coverage in the Chinese press", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [ |
| "K Y" |
| ], |
| "last": "Tsou", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "W M" |
| ], |
| "last": "Yuen", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Kwong", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "B Y" |
| ], |
| "last": "La", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "L" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of International Conference on Intelligence Analysis", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "B. K. Y. Tsou, R. W. M. Yuen, O. Y. Kwong, T. B. Y. La and W. L. Wong. 2005. Polarity classification of celebrity coverage in the Chinese press. In Pro- ceedings of International Conference on Intelli- gence Analysis.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Thumbs up or thumbs down? semantic orientation applied to unsupervised classification of reviews", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Turney", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of ACL-2002", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. Turney. 2002. Thumbs up or thumbs down? seman- tic orientation applied to unsupervised classifica- tion of reviews. In Proceedings of ACL-2002.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Using bilingual knowledge and ensemble techniques for unsupervised Chinese sentiment analysis", |
| "authors": [ |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "X. Wan. 2008. Using bilingual knowledge and en- semble techniques for unsupervised Chinese sen- timent analysis. In Proceedings of EMNLP-2008.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Recognizing Contextual Polarity in Phrase-Level Sentiment Analysis", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Hoffmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of HLT/EMNLP-05", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Wilson, J. Wiebe and P. Hoffmann. 2005. Recog- nizing Contextual Polarity in Phrase-Level Senti- ment Analysis. In Proceedings of HLT/EMNLP-05.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Topicbridged PLSA for cross-domain text classification", |
| "authors": [ |
| { |
| "first": "G.-R", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of SIGIR-08", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G.-R. Xue, W. Dai, Q. Yang, Y. Yu. 2008. Topic- bridged PLSA for cross-domain text classification. In Proceedings of SIGIR-08.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Sentiment classification for movie reviews in Chinese by improved semantic oriented approach", |
| "authors": [ |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Ye", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Q. Ye, W. Shi and Y. Li. 2006. Sentiment classifica- tion for movie reviews in Chinese by improved semantic oriented approach. In Proceedings of 39th", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Hawaii International Conference on System Sciences", |
| "authors": [], |
| "year": 2006, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hawaii International Conference on System Sci- ences, 2006.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Framework of the proposed approach", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF1": { |
| "text": "Accuracy vs. different (p, n) for co-Influences of feature size", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF0": { |
| "text": "Google Translate 2 , Yahoo Babel Fish 3 and Windows Live Translate 4 .", |
| "content": "<table><tr><td>Labeled</td><td>Machine</td><td/></tr><tr><td>Chinese</td><td>Translation</td><td/></tr><tr><td>Reviews</td><td>(EN-CN)</td><td/></tr><tr><td/><td>Machine Translation (CN-EN)</td><td>Unlabeled English Reviews</td></tr><tr><td>Chinese View</td><td/><td>English View</td></tr><tr><td/><td>Co-Training</td><td/></tr><tr><td/><td/><td>Training Phase</td></tr><tr><td>Test</td><td/><td>Classification Phase</td></tr><tr><td>Chinese</td><td/><td/></tr><tr><td>Review</td><td/><td/></tr><tr><td>Machine Translation (CN-EN)</td><td>Sentiment Classifier</td><td>Pos\\Neg</td></tr><tr><td>Test</td><td/><td/></tr><tr><td>English</td><td/><td/></tr><tr><td>Review</td><td/><td/></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |