File size: 68,613 Bytes
6fa4bc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 | {
"paper_id": "R13-1007",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T14:56:24.141066Z"
},
"title": "Improving Sentiment Analysis in Twitter Using Multilingual Machine Translated Data",
"authors": [
{
"first": "Alexandra",
"middle": [],
"last": "Balahur",
"suffix": "",
"affiliation": {},
"email": "alexandra.balahur@jrc.ec.europa.eu"
},
{
"first": "Marco",
"middle": [],
"last": "Turchi",
"suffix": "",
"affiliation": {},
"email": "turchi@fbk.eu"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Sentiment analysis is currently a very dynamic field in Computational Linguistics. Research herein has concentrated on the development of methods and resources for different types of texts and various languages. Nonetheless, the implementation of a multilingual system that is able to classify sentiment expressed in various languages has not been approached so far. The main challenge this paper addresses is sentiment analysis from tweets in a multilingual setting. We first build a simple sentiment analysis system for tweets in English. Subsequently, we translate the data from English to four other languages-Italian, Spanish, French and German-using a standard machine translation system. Further on, we manually correct the test data and create Gold Standards for each of the target languages. Finally, we test the performance of the sentiment analysis classifiers for the different languages concerned and show that the joint use of training data from multiple languages (especially those pertaining to the same family of languages) significantly improves the results of the sentiment classification.",
"pdf_parse": {
"paper_id": "R13-1007",
"_pdf_hash": "",
"abstract": [
{
"text": "Sentiment analysis is currently a very dynamic field in Computational Linguistics. Research herein has concentrated on the development of methods and resources for different types of texts and various languages. Nonetheless, the implementation of a multilingual system that is able to classify sentiment expressed in various languages has not been approached so far. The main challenge this paper addresses is sentiment analysis from tweets in a multilingual setting. We first build a simple sentiment analysis system for tweets in English. Subsequently, we translate the data from English to four other languages-Italian, Spanish, French and German-using a standard machine translation system. Further on, we manually correct the test data and create Gold Standards for each of the target languages. Finally, we test the performance of the sentiment analysis classifiers for the different languages concerned and show that the joint use of training data from multiple languages (especially those pertaining to the same family of languages) significantly improves the results of the sentiment classification.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Sentiment analysis is a task in Natural Language Processing whose aim is to automatically detect and classify sentiments in texts. Generally, the \"positive\", \"negative\" and \"neutral\" classes are considered, although other scales have also been used (e.g. from 1 to 5 \"stars\" -according to the reviewing systems put at the disposal of clients or users by amazon.com, booking.com, etc.; adding the \"very positive\" and \"very negative\" classes, scales from 1 to 10, etc.).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In this article, we deal with the issue of sentiment analysis in tweets, in a multilingual setting. We employ machine translation -which was shown to be at a sufficiently high level of performance (Balahur and Turchi, 2012) -to obtain data in four languages. Our goal is to test if the use of multilingual data can help to improve sentiment classification in tweets (as shown to be the case in formal texts - (Banea et al., 2010) ) and if the joint use of data coming from similar languages or languages that are different in structure can influence on the final result.",
"cite_spans": [
{
"start": 197,
"end": 223,
"text": "(Balahur and Turchi, 2012)",
"ref_id": "BIBREF0"
},
{
"start": 409,
"end": 429,
"text": "(Banea et al., 2010)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The main problem when designing automatic methods for the treatment of tweets is that they are highly informal texts, i.e. they contain slang, emoticons, repetitions of letters or punctuation signs, misspellings (done on purpose or due to writing them from mobile devices), entire words in capital letters, etc.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In order to test our hypotheses, we first design a simple tweet sentiment analysis system for English, taking into account the specificity of expressions employed, but without using languagespecific text processing tools. The motivation is related to the fact that: a) such a distinction would require the use of language identifiers and would need the data from the different languages to be separated; b) We would like to apply the same techniques for as many languages as possible and for some of these languages, no freely-available language processing tools exist. We test this system on the SemEval 2013 Task 2 -Sentiment Analysis in Twitter (Wilson et al., 2013) -training data and test on the development data. The choice of this test set was motivated by the fact that it contains approximately 1000 tweets, being large enough to be able to draw relevant conclusions and at the same time small enough to allow manual correction of the translations, to eliminate incorrect translations being present in both training and test data.",
"cite_spans": [
{
"start": 648,
"end": 669,
"text": "(Wilson et al., 2013)",
"ref_id": "BIBREF16"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Subsequently, we employ the Google machine translation system 1 to translate the SemEval 2013 training and development tweets in Italian, Spanish, German and French. We manually correct the translated development data (which we use for testing, not for parameter tuning) to produce a reliable Gold Standard.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Finally, we apply the same sentiment classification system to each of these languages and test the manner in which the combined datasets (from pairs of two languages, families of languages and all the languages together) perform. We conclude that the joint use of training data from different languages improves the classification of sentiment and that the use of training data from languages that are similar in structure helps to achieve statistically significant improvements over the results obtained on individual languages and all languages together.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The remainder of this article is structured as follows: Section 2 gives an overview of the related work. In Section 3, we present the motivations and describe the contributions of this work. In the following section, we describe in detail the process followed to pre-process the tweets, build the classification models and obtain tweets for four other languages. In Section 5, we present the results obtained on different languages and combinations thereof. Finally, Section 6 summarizes the main findings of this work and sketches the lines for future work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The work described herein is related to the development of multilingual sentiment analysis systems and sentiment classification from tweets.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "In order to produce multilingual resources for subjectivity analysis, Banea et al. (Banea et al., 2008) apply bootstrapping to build a subjectivity lexicon for Romanian, starting with a set of 60 words which they translate and subsequently filter using a measure of similarity to the original words, based on Latent Semantic Analysis (LSA) (Deerwester et al., 1990) scores. Another approach to mapping subjectivity lexica to other languages is proposed by Wan (2009), who uses co-training to classify un-annotated Chinese reviews using a corpus of annotated English reviews. (Kim et al., 2010 ) create a number of systems consisting of different subsystems, each classifying the subjectivity of texts in a different language. They translate a corpus annotated for subjectivity analysis (MPQA), the subjectivity clues (Opinion Finder) lexicon and re-train a Naive Bayes classifier that is implemented in the Opinion Finder system using the newly generated resources for all the languages considered. (Banea et al., 2010) translate the MPQA corpus into five other languages (some with a similar ethimology, others with a very different structure). Subsequently, they expand the feature space used in a Naive Bayes classifier using the same data translated to 2 or 3 other languages. Finally, (Steinberger et al., 2011a; Steinberger et al., 2011b) create sentiment dictionaries in other languages using a method called \"triangulation\". They translate the data, in parallel, from English and Spanish to other languages and obtain dictionaries from the intersection of these two translations.",
"cite_spans": [
{
"start": 83,
"end": 103,
"text": "(Banea et al., 2008)",
"ref_id": "BIBREF1"
},
{
"start": 340,
"end": 365,
"text": "(Deerwester et al., 1990)",
"ref_id": "BIBREF3"
},
{
"start": 575,
"end": 592,
"text": "(Kim et al., 2010",
"ref_id": "BIBREF6"
},
{
"start": 999,
"end": 1019,
"text": "(Banea et al., 2010)",
"ref_id": "BIBREF2"
},
{
"start": 1290,
"end": 1317,
"text": "(Steinberger et al., 2011a;",
"ref_id": "BIBREF11"
},
{
"start": 1318,
"end": 1344,
"text": "Steinberger et al., 2011b)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Methods for Multilingual Sentiment Analysis",
"sec_num": "2.1"
},
{
"text": "One of the first studies on the classification of polarity in tweets was (Go et al., 2009) . The authors conducted a supervised classification study on tweets in English, using the emoticons (e.g. \":)\", \":(\", etc.) as markers of positive and negative tweets. (Read, 2005) employed this method to generate a corpus of positive tweets, with positive emoticons \":)\", and negative tweets with negative emoticons \":(\". Subsequently, they employ different supervised approaches (SVM, Na\u00efve Bayes and Maximum Entropy) and various sets of features and conclude that the simple use of unigrams leads to good results, but it can be slightly improved by the combination of unigrams and bigrams.",
"cite_spans": [
{
"start": 73,
"end": 90,
"text": "(Go et al., 2009)",
"ref_id": "BIBREF4"
},
{
"start": 259,
"end": 271,
"text": "(Read, 2005)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Sentiment Classification from Tweets",
"sec_num": "2.2"
},
{
"text": "In the same line of thinking, (Pak and Paroubek, 2010) also generated a corpus of tweets for sentiment analysis, by selecting positive and negative tweets based on the presence of specific emoticons. Subsequently, they compare different supervised approaches with n-gram features and obtain the best results using Na\u00efve Bayes with unigrams and part-of-speech tags.",
"cite_spans": [
{
"start": 30,
"end": 54,
"text": "(Pak and Paroubek, 2010)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Sentiment Classification from Tweets",
"sec_num": "2.2"
},
{
"text": "Another approach on sentiment analysis in tweet is that of (Zhang et al., 2011) . Here, the authors employ a hybrid approach, combining super-vised learning with the knowledge on sentimentbearing words, which they extract from the DAL sentiment dictionary (Whissell, 1989) . Their preprocessing stage includes the removal of retweets, translation of abbreviations into original terms and deleting of links, a tokenization process, and partof-speech tagging. They employ various supervised learning algorithms to classify tweets into positive and negative, using n-gram features with SVM and syntactic features with Partial Tree Kernels, combined with the knowledge on the polarity of the words appearing in the tweets. The authors conclude that the most important features are those corresponding to sentiment-bearing words. Finally, (Jiang et al., 2011) classify sentiment expressed on previously-given \"targets\" in tweets. They add information on the context of the tweet to its text (e.g. the event that it is related to). Subsequently, they employ SVM and General Inquirer and perform a three-way classification (positive, negative, neutral).",
"cite_spans": [
{
"start": 59,
"end": 79,
"text": "(Zhang et al., 2011)",
"ref_id": "BIBREF17"
},
{
"start": 256,
"end": 272,
"text": "(Whissell, 1989)",
"ref_id": "BIBREF15"
},
{
"start": 834,
"end": 854,
"text": "(Jiang et al., 2011)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Sentiment Classification from Tweets",
"sec_num": "2.2"
},
{
"text": "The work presented herein is mainly motivated by the need to: a) develop sentiment analysis tools for a high number of languages, while minimizing the effort to create linguistic resources for each of these languages in part; b) study the manner in which the use of machine translation systems to produce multilingual data performs in the context of informal texts such as tweets; and c) evaluate the performance of sentiment classification when data from different languages is combined in the training phase. We would especially like to study the effect of using data from similar languages versus the use of data from structurally and lexicallydifferent languages. The advantage of such an approach would be that if combined classifiers perform better, then the effort of separating tweets in different languages at the time of analysis (which in the case of streaming data is not negligeable) can be reduced or eliminated entirely.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivation and Contribution",
"sec_num": "3"
},
{
"text": "Unlike approaches we presented in Related Work section, we employ fully-formed machine translation systems.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivation and Contribution",
"sec_num": "3"
},
{
"text": "Bearing this in mind, the main contributions we bring in this paper are:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivation and Contribution",
"sec_num": "3"
},
{
"text": "1. The creation of a simple tweet sentiment analysis system, that employs a preprocessing stage to normalize the language and generalize the vocabulary employed to express sentiment. At this stage, we take into account the linguistic peculiarities of tweets, regarding spelling, use of slang, punctuation, etc., and also replace the sentiment-bearing words from the training data with a unique label. In this way, the sentence \"I love roses.\" will be equivalent to the sentence \"I like roses.\", because \"like\" and \"love\" are both positive words according to the GI dictionary. If example 1 is contained in the training data and example 2 is contained in the test data, replacing the sentiment-bearing word with a general label increases the chance to have example 2 classified correctly. In the same line of thought, we also replaced modifiers with unique corresponding labels.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivation and Contribution",
"sec_num": "3"
},
{
"text": "2. The use of minimal linguistic processing, which makes the approach easily portable to other languages. We employ only tokenization and do not process texts any further. The reason behind this choice is that we would like the final system to work in a similar fashion for as many languages as possible and for some of them, little or no tools are available.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivation and Contribution",
"sec_num": "3"
},
{
"text": "3. The use of a standard news translation system to obtain data in four other languages -Italian, Spanish, German and French;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivation and Contribution",
"sec_num": "3"
},
{
"text": "4. The evaluation of different combinations of languages in the training phase and the effect of using languages from the same family versus the use of individual or all languages in the training phase on the overall performance of the sentiment classification performance.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivation and Contribution",
"sec_num": "3"
},
{
"text": "We show that using the training models generated with the method described we can improve the sentiment classification performance, irrespective of the domain and distribution of the test sets.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivation and Contribution",
"sec_num": "3"
},
{
"text": "Our sentiment analysis system is based on a hybrid approach, which employs supervised learning with the Weka (Weka Machine Learning Project, 2008) implementation of the Support Vector Machines Sequential Minimal Optimization (Platt, 1998) linear kernel, on unigram and bigram features, but exploiting as features sentiment dictionaries, emoticon lists, slang lists and other social media-specific features. We do not employ any specific language analysis software. The aim is to be able to apply, in a straightforward manner, the same approach to as many languages as possible. The approach can be extended to other languages by using similar dictionaries that have been created in our team. They were built using the same dictionaries we employ in this work and their corrected translation to Spanish. The new sentiment dictionaries were created by simultaneously translating from these two languages to a third one and considering the intersection of the translations as correct terms. Currently, new such dictionaries have been created for 15 other languages. The sentiment analysis process contains two stages: pre-processing and sentiment classification.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sentiment Analysis in Tweets",
"sec_num": "4"
},
{
"text": "The language employed in Social Media sites is different from the one found in mainstream media and the form of the words employed is sometimes not the one we may find in a dictionary. Further on, users of Social Media platforms employ a special \"slang\" (i.e. informal language, with special expressions, such as \"lol\", \"omg\"), emoticons, and often emphasize words by repeating some of their letters. Additionally, the language employed in Twitter has specific characteristics, such as the markup of tweets that were reposted by other users with \"RT\", the markup of topics using the \"#\" (hash sign) and of the users using the \"@\" sign.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Tweet Pre-processing",
"sec_num": "4.1"
},
{
"text": "All these aspects must be considered at the time of processing tweets. As such, before applying supervised learning to classify the sentiment of the tweets, we preprocess them, to normalize the language they contain. The pre-processing stage contains the following steps:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Tweet Pre-processing",
"sec_num": "4.1"
},
{
"text": "In the first step of the pre-processing, we detect repetitions of punctuation signs (\".\", \"!\" and \"?\"). Multiple consecutive punctuation signs are replaced with the labels \"multistop\", for the fullstops, \"multiexclamation\" in the case of exclamation sign and \"multiquestion\" for the question mark and spaces before and after.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Tweet Pre-processing",
"sec_num": "4.1"
},
{
"text": "In the second step of the pre-processing, we employ the annotated list of emoticons from Sen-tiStrength 2 (Thelwall et al., 2010) and match the content of the tweets against this list. The emoticons found are replaced with their polarity (\"positive\" or \"negative\") and the \"neutral\" ones are deleted.",
"cite_spans": [
{
"start": 106,
"end": 129,
"text": "(Thelwall et al., 2010)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Tweet Pre-processing",
"sec_num": "4.1"
},
{
"text": "Subsequently, the tweets are lower cased and split into tokens, based on spaces and punctuation signs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Tweet Pre-processing",
"sec_num": "4.1"
},
{
"text": "The next step involves the normalization of the language employed. In order to be able to include the semantics of the expressions frequently used in Social Media, we employed the list of slang from a specialized site 3 .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Tweet Pre-processing",
"sec_num": "4.1"
},
{
"text": "At this stage, the tokens are compared to entries in Rogets Thesaurus. If no match is found, repeated letters are sequentially reduced to two or one until a match is found in the dictionary (e.g. \"perrrrrrrrrrrrrrrrrrfeeect\" becomes \"perrfeect\", \"perfeect\", \"perrfect\" and subsequently \"perfect\"). The words used in this form are maked as \"stressed\".",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Tweet Pre-processing",
"sec_num": "4.1"
},
{
"text": "Further on, the tokens in the tweet are matched against three different sentiment lexicons: GI, LIWC and MicroWNOp, which were previously split into four different categories (\"positive\", \"high positive\", \"negative\" and \"high negative\"). Matched words are replaced with their sentiment label -i.e. \"positive\", \"negative\", \"hpositive\" and \"hnegative\". A version of the data without these replacements is also maintained, for comparison purposes.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Tweet Pre-processing",
"sec_num": "4.1"
},
{
"text": "Similar to the previous step, we employ a list of expressions that negate, intensify or diminish the intensity of the sentiment expressed to detect such words in the tweets. If such a word is matched, it is replaced with \"negator\", \"intensifier\" or \"diminisher\", respectively. As in the case of affective words, a version of the data without these replacements is also maintained, for comparison purposes.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Tweet Pre-processing",
"sec_num": "4.1"
},
{
"text": "Finally, the users mentioned in the tweet, which are marked with \"@\", are replaced with \"PER-SON\" and the topics which the tweet refers to (marked with \"#\") are replaced with \"TOPIC\".",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Tweet Pre-processing",
"sec_num": "4.1"
},
{
"text": "Once the tweets are pre-processed, they are passed on to the sentiment classification module. We employed supervised learning using SVM SMO with a linear kernel, based on boolean features -the presence or absence of n-grams (unigrams, bigrams and unigrams plus bigrams) determined from the training data (tweets that were previousely pre-processed as described above). Bigrams are used specifically to spot the influence of modifiers (negations, intensifiers, diminishers) on the polarity of the sentiment-bearing words.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sentiment Classification of Tweets",
"sec_num": "4.2"
},
{
"text": "Subsequent to the tweet normalization, we translate the Twitter data (the training and development data in the SemEval Task 2 campaign) using the Google machine translation system to four languages -Italian, Spanish, French and German. The reason for choosing the development dataset for testing is that this set is smaller and allows us to manually check and correct it, to obtain a Gold Standard (and ensure that performance results are not biased by the incorrect translation in both the training, as well as the development data). Further on, we extract the same features as in the case of the system working for English -unigrams and bigrams -from these obtained datasets. We employ the features to train an SVM SMO classifier, in the same manner as we did for English.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Obtaining Multilingual Data for Sentiment Analysis in Tweets",
"sec_num": "4.3"
},
{
"text": "Although the different steps included to eliminate the noise in the data and the choice of features have been refined using our in-house gathered Twitter data, in order to evaluate our approach and make it comparable to other methods, we employ the data used in an established competition, allowing subsequent comparisons to be made.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation and Discussion",
"sec_num": "5"
},
{
"text": "The characteristics of the training (T*) and development (test in our case) -t*-datasets employed are described in Table 1 . On the last column, we also include the baseline in terms of accuracy, which is computed as the number of examples of the majoritary class over the total number of examples: Table 1 : Characteristics of the training (T*) and testing (t*) datasets employed.",
"cite_spans": [],
"ref_spans": [
{
"start": 115,
"end": 122,
"text": "Table 1",
"ref_id": null
},
{
"start": 299,
"end": 306,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Data Set",
"sec_num": "5.1"
},
{
"text": "Data #Tweet #",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data Set",
"sec_num": "5.1"
},
{
"text": "In order to test our sentiment analysis approach, we employed the datasets described above, for each of the languages individually, all the twolanguages combinations, combinations of languages from the same linguistic family and all languages together.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation and Results",
"sec_num": "5.2"
},
{
"text": "The results are presented in Table 2 . We consider the measure of accuracy and do not compare to the SemEval official results, because in the competition, the results did not take into account the \"neutral\" class. Table 2 : Results obtained classifying each language individually versus on pairs and families of languages, respectively.",
"cite_spans": [],
"ref_spans": [
{
"start": 29,
"end": 36,
"text": "Table 2",
"ref_id": null
},
{
"start": 214,
"end": 221,
"text": "Table 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Evaluation and Results",
"sec_num": "5.2"
},
{
"text": "From the results obtained, we can draw several conclusions. First of all, we can see that using tweet normalization and employing machine translation, we can obtain high quality training data for sentiment analysis in many languages. The machinetranslated data thus obtained can be reliably employed to build classifiers for sentiment, reaching a performance level that is similar to the results obtained for English and significatly above the baseline.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "5.3"
},
{
"text": "Secondly, seeing the performance of the different pairs of languages compared to individual results, we can: a) on the one hand, see that combining languages with a comparatively high difference in performance results in an increase of the lower-performing one and b) on the other hand, in some cases, the overall performance is improved on both systems, which shows that combining this data helps to disambiguate the contextual use of specific words.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "5.3"
},
{
"text": "Finally, the results show that the use of all the languages together improves the overall classification of sentiment in the data. This shows that a multilingual system can simply employ joint training data from different languages in a single classifier, thus making the sentiment classification straightforward, not needing any language detection software or training different classifiers.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "5.3"
},
{
"text": "By manually inspecting some of the examples in the datasets, we could see that the most important causes of incorrect classification were the word orders and faulty translations in context. Another reason for incorrect sentiment classification was the different manner in which negation is constructed in the different languages considered. In order to improve on this aspect, we will include language-specific rules by adding skip-bigrams (bigrams made up of non-consecutive tokens) features in the languages where the place of the negators can vary.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "5.3"
},
{
"text": "In this article, we presented a method to create a simple sentiment analysis system for English and extend it to the multilingual setting, by employing a standard news machine translation system. We showed that using twitter language normalization, we can obtain good results in target languages and that the joint use of training data from different languages helps to increase the overall performance of the classification. Finally, we showed that the joint training using translated data from languages that are similar yield significantly improved results.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "6"
},
{
"text": "In future work, we plan to evaluate the use of higher-order n-grams (3-grams) and skip-grams to extract more complex patterns of sentiment expressions and be able to identify more precisely the scope of the negation. In this sense, we plan to take into account the modifier/negation schemes typical of each of the languages, to consider (further to translation) language-specific schemes of n-grams.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "6"
},
{
"text": "We also plan to test the performance of sentiment classification using translations *to* English and employing classifiers trained on English data. In order to do this, we require lists of slang and digital dictionaries to perform normalization. We would like to study the performance of our approach in the context of tweets related to specific news, in which case these short texts can be contextualized by adding further content from other information sources. In this way, it would be interesting to make a comparative analysis of the tweets written in different languages (from the same or different regions of the globe), on the same topics.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "6"
},
{
"text": "http://translate.google.com/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://sentistrength.wlv.ac.uk/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://www.chatslang.com/terms/social media",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Multilingual sentiment analysis using machine translation?",
"authors": [
{
"first": "Alexandra",
"middle": [],
"last": "Balahur",
"suffix": ""
},
{
"first": "Marco",
"middle": [],
"last": "Turchi",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the 3rd Workshop in Computational Approaches to Subjectivity and Sentiment Analysis",
"volume": "",
"issue": "",
"pages": "52--60",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Alexandra Balahur and Marco Turchi. 2012. Mul- tilingual sentiment analysis using machine transla- tion? In Proceedings of the 3rd Workshop in Com- putational Approaches to Subjectivity and Sentiment Analysis, pages 52-60, Jeju, Korea, July. Associa- tion for Computational Linguistics.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "A bootstrapping method for building subjectivity lexicons for languages with scarce resources",
"authors": [
{
"first": "C",
"middle": [],
"last": "Banea",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Mihalcea",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Wiebe",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the Conference on Language Resources and Evaluations",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "C. Banea, R. Mihalcea, and J. Wiebe. 2008. A boot- strapping method for building subjectivity lexicons for languages with scarce resources. In Proceedings of the Conference on Language Resources and Eval- uations (LREC 2008), Maraakesh, Marocco.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Multilingual subjectivity: are more languages better?",
"authors": [
{
"first": "C",
"middle": [],
"last": "Banea",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Mihalcea",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Wiebe",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the International Conference on Computational Linguistics (COLING 2010), Beijing",
"volume": "",
"issue": "",
"pages": "28--36",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "C. Banea, R. Mihalcea, and J. Wiebe. 2010. Mul- tilingual subjectivity: are more languages better? In Proceedings of the International Conference on Computational Linguistics (COLING 2010), Bei- jing, China., pages 28-36.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Indexing by latent semantic analysis",
"authors": [
{
"first": "S",
"middle": [],
"last": "Deerwester",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Dumais",
"suffix": ""
},
{
"first": "G",
"middle": [
"W"
],
"last": "Furnas",
"suffix": ""
},
{
"first": "T",
"middle": [
"K"
],
"last": "Landauer",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Harshman",
"suffix": ""
}
],
"year": 1990,
"venue": "Journal of the American Society for Information Science",
"volume": "",
"issue": "41",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "S. Deerwester, S. Dumais, G. W. Furnas, T. K. Lan- dauer, and R. Harshman. 1990. Indexing by latent semantic analysis. Journal of the American Society for Information Science, 3(41).",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Twitter sentiment classification using distant supervision. Processing",
"authors": [
{
"first": "Alec",
"middle": [],
"last": "Go",
"suffix": ""
},
{
"first": "Richa",
"middle": [],
"last": "Bhayani",
"suffix": ""
},
{
"first": "Lei",
"middle": [],
"last": "Huang",
"suffix": ""
}
],
"year": 2009,
"venue": "",
"volume": "",
"issue": "",
"pages": "1--6",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Alec Go, Richa Bhayani, and Lei Huang. 2009. Twit- ter sentiment classification using distant supervision. Processing, pages 1-6.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Target-dependent twitter sentiment classification",
"authors": [
{
"first": "Long",
"middle": [],
"last": "Jiang",
"suffix": ""
},
{
"first": "Mo",
"middle": [],
"last": "Yu",
"suffix": ""
},
{
"first": "Ming",
"middle": [],
"last": "Zhou",
"suffix": ""
},
{
"first": "Xiaohua",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Tiejun",
"middle": [],
"last": "Zhao",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies",
"volume": "",
"issue": "",
"pages": "151--160",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Long Jiang, Mo Yu, Ming Zhou, Xiaohua Liu, and Tiejun Zhao. 2011. Target-dependent twitter sen- timent classification. In Proceedings of the 49th An- nual Meeting of the Association for Computational Linguistics: Human Language Technologies -Vol- ume 1, HLT '11, pages 151-160, Stroudsburg, PA, USA. Association for Computational Linguistics.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Evaluating multilanguage-comparability of subjectivity analysis systems",
"authors": [
{
"first": "J",
"middle": [],
"last": "Kim",
"suffix": ""
},
{
"first": "J.-J",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "J.-H",
"middle": [],
"last": "Lee",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "595--602",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J. Kim, J.-J. Li, and J.-H. Lee. 2010. Evaluating multilanguage-comparability of subjectivity analy- sis systems. In Proceedings of the 48th Annual Meeting of the Association for Computational Lin- guistics, pages 595-602.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Twitter as a corpus for sentiment analysis and opinion mining",
"authors": [
{
"first": "Alexander",
"middle": [],
"last": "Pak",
"suffix": ""
},
{
"first": "Patrick",
"middle": [],
"last": "Paroubek",
"suffix": ""
},
{
"first": ";",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "Khalid",
"middle": [],
"last": "Choukri",
"suffix": ""
},
{
"first": "Bente",
"middle": [],
"last": "Maegaard",
"suffix": ""
},
{
"first": "Joseph",
"middle": [],
"last": "Mariani",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the Seventh conference on International Language Resources and Evaluation (LREC'10)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Alexander Pak and Patrick Paroubek. 2010. Twit- ter as a corpus for sentiment analysis and opin- ion mining. In Nicoletta Calzolari (Conference Chair), Khalid Choukri, Bente Maegaard, Joseph Mariani, Jan Odijk, Stelios Piperidis, Mike Ros- ner, and Daniel Tapias, editors, Proceedings of the Seventh conference on International Language Re- sources and Evaluation (LREC'10), Valletta, Malta;",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "European Language Resources Association",
"authors": [
{
"first": "May",
"middle": [],
"last": "Elra",
"suffix": ""
}
],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "19--21",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "ELRA, may. European Language Resources Associ- ation. 19-21.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Sequential minimal optimization: A fast algorithm for training support vector machines",
"authors": [
{
"first": "C",
"middle": [],
"last": "John",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Platt",
"suffix": ""
}
],
"year": 1998,
"venue": "Advances in Kernel Methods -Support Vector Learning",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "John C. Platt. 1998. Sequential minimal optimiza- tion: A fast algorithm for training support vector ma- chines. Technical report, Advances in Kernel Meth- ods -Support Vector Learning.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Using emoticons to reduce dependency in machine learning techniques for sentiment classification",
"authors": [
{
"first": "Jonathon",
"middle": [],
"last": "Read",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the ACL Student Research Workshop, ACLstudent '05",
"volume": "",
"issue": "",
"pages": "43--48",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jonathon Read. 2005. Using emoticons to re- duce dependency in machine learning techniques for sentiment classification. In Proceedings of the ACL Student Research Workshop, ACLstudent '05, pages 43-48, Stroudsburg, PA, USA. Association for Computational Linguistics.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Creating sentiment dictionaries via triangulation",
"authors": [
{
"first": "J",
"middle": [],
"last": "Steinberger",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Lenkova",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Ebrahim",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Ehrman",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Hurriyetoglu",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Kabadjov",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Steinberger",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Tanev",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Zavarella",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Vazquez",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the 2nd Workshop on Computational Approaches to Subjectivity and Sentiment Analysis",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J. Steinberger, P. Lenkova, M. Ebrahim, M. Ehrman, A. Hurriyetoglu, M. Kabadjov, R. Steinberger, H. Tanev, V. Zavarella, and S. Vazquez. 2011a. Creating sentiment dictionaries via triangulation. In Proceedings of the 2nd Workshop on Computational Approaches to Subjectivity and Sentiment Analysis, Portland, Oregon.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Multilingual entity-centered sentiment analysis evaluated by parallel corpora",
"authors": [
{
"first": "J",
"middle": [],
"last": "Steinberger",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Lenkova",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Kabadjov",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Steinberger",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Van Der Goot",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the Conference on Recent Advancements in Natural Language Processing (RANLP)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J. Steinberger, P. Lenkova, M. Kabadjov, R. Stein- berger, and E. van der Goot. 2011b. Multilingual entity-centered sentiment analysis evaluated by par- allel corpora. In Proceedings of the Conference on Recent Advancements in Natural Language Process- ing (RANLP), Hissar, Bulgaria.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Sentiment in short strength detection informal text",
"authors": [
{
"first": "Mike",
"middle": [],
"last": "Thelwall",
"suffix": ""
},
{
"first": "Kevan",
"middle": [],
"last": "Buckley",
"suffix": ""
},
{
"first": "Georgios",
"middle": [],
"last": "Paltoglou",
"suffix": ""
},
{
"first": "Di",
"middle": [],
"last": "Cai",
"suffix": ""
},
{
"first": "Arvid",
"middle": [],
"last": "Kappas",
"suffix": ""
}
],
"year": 2010,
"venue": "J. Am. Soc. Inf. Sci. Technol",
"volume": "61",
"issue": "12",
"pages": "2544--2558",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mike Thelwall, Kevan Buckley, Georgios Paltoglou, Di Cai, and Arvid Kappas. 2010. Sentiment in short strength detection informal text. J. Am. Soc. Inf. Sci. Technol., 61(12):2544-2558, December.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Weka Machine Learning Project",
"authors": [],
"year": 2008,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Weka Machine Learning Project. 2008. Weka. URL http://www.cs.waikato.ac.nz/\u02dcml/weka.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "The Dictionary of Affect in Language",
"authors": [
{
"first": "Cynthia",
"middle": [],
"last": "Whissell",
"suffix": ""
}
],
"year": 1989,
"venue": "Emotion: theory, research and experience",
"volume": "4",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Cynthia Whissell. 1989. The Dictionary of Affect in Language. In Robert Plutchik and Henry Kellerman, editors, Emotion: theory, research and experience, volume 4, The measurement of emotions. Academic Press, London.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "SemEval-2013 task 2: Sentiment analysis in twitter",
"authors": [
{
"first": "Theresa",
"middle": [],
"last": "Wilson",
"suffix": ""
},
{
"first": "Zornitsa",
"middle": [],
"last": "Kozareva",
"suffix": ""
},
{
"first": "Preslav",
"middle": [],
"last": "Nakov",
"suffix": ""
},
{
"first": "Sara",
"middle": [],
"last": "Rosenthal",
"suffix": ""
},
{
"first": "Veselin",
"middle": [],
"last": "Stoyanov",
"suffix": ""
},
{
"first": "Alan",
"middle": [],
"last": "Ritter",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the International Workshop on Semantic Evaluation, SemEval '13",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Theresa Wilson, Zornitsa Kozareva, Preslav Nakov, Sara Rosenthal, Veselin Stoyanov, and Alan Ritter. 2013. SemEval-2013 task 2: Sentiment analysis in twitter. In Proceedings of the International Work- shop on Semantic Evaluation, SemEval '13, June.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Combining lexicon-based and learning-based methods for twitter sentiment analysis",
"authors": [
{
"first": "Ley",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Riddhiman",
"middle": [],
"last": "Ghosh",
"suffix": ""
},
{
"first": "Mohamed",
"middle": [],
"last": "Dekhil",
"suffix": ""
},
{
"first": "Meichun",
"middle": [],
"last": "Hsu",
"suffix": ""
},
{
"first": "Bing",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2011,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ley Zhang, Riddhiman Ghosh, Mohamed Dekhil, Me- ichun Hsu, and Bing Liu. 2011. Combining lexicon-based and learning-based methods for twit- ter sentiment analysis. Technical Report HPL-2011- 89, HP, 21/06/2011.",
"links": null
}
},
"ref_entries": {}
}
} |