| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T05:10:10.159944Z" |
| }, |
| "title": "Offensive Language Detection in Nepali Social Media", |
| "authors": [ |
| { |
| "first": "Nobal", |
| "middle": [ |
| "B" |
| ], |
| "last": "Niraula", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Saurab", |
| "middle": [], |
| "last": "Dulal", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "sdulal@memphis.edu" |
| }, |
| { |
| "first": "Diwa", |
| "middle": [], |
| "last": "Koirala", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Social media texts such as blog posts, comments, and tweets often contain offensive languages including racial hate speech comments, personal attacks, and sexual harassments. Detecting inappropriate use of language is, therefore, of utmost importance for the safety of the users as well as for suppressing hateful conduct and aggression. Existing approaches to this problem are mostly available for resource-rich languages such as English and German. In this paper, we characterize the offensive language in Nepali, a low-resource language, highlighting the challenges that need to be addressed for processing Nepali social media text. We also present experiments for detecting offensive language using supervised machine learning. Besides contributing the first baseline approaches of detecting offensive language in Nepali, we also release human annotated data sets to encourage future research on this crucial topic.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Social media texts such as blog posts, comments, and tweets often contain offensive languages including racial hate speech comments, personal attacks, and sexual harassments. Detecting inappropriate use of language is, therefore, of utmost importance for the safety of the users as well as for suppressing hateful conduct and aggression. Existing approaches to this problem are mostly available for resource-rich languages such as English and German. In this paper, we characterize the offensive language in Nepali, a low-resource language, highlighting the challenges that need to be addressed for processing Nepali social media text. We also present experiments for detecting offensive language using supervised machine learning. Besides contributing the first baseline approaches of detecting offensive language in Nepali, we also release human annotated data sets to encourage future research on this crucial topic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "User-generated content on social media and discussion forums has surged with the advent of technology and the availability of affordable mobile devices. Users interact on these platforms with natural language posts and comments on diverse topics. Such interactions may contain toxic comments or posts that are acutely insulting or harmful to other participants. Such content (foul language) typically consists of racial hate speech, personal attacks, and sexual harassment. Detection of inappropriate use of language is, therefore, of utmost importance. It keeps the discussion healthy by eliminating foul language and also enhances the security of the users by suppressing hateful conduct and aggression. * These authors contributed equally to this work An approach to filter offensive content is to use human experts (e.g. moderators) and manually review the posts or comments as soon as they get posted. However, manual review is almost impractical and cost-prohibitive, especially when the systems having large user bases that generate a stream of content in a short period. In recent years, the computational linguistics and language technology communities are actively working on automating the detection process. Automated effort can prevent foul content from being posted. It can also flag suspicious content so that human experts monitoring the system can initiate corrective actions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we focus on detecting offensive language in Nepali. While numerous studies exist towards automatic detection of offensive content in resource-rich languages such as English (Gitari et al., 2015; Burnap and Williams, 2016; Davidson et al., 2017; Gamb\u00e4ck and Sikdar, 2017; Waseem, 2016) and German (Schneider et al., 2018; Wiedemann et al., 2018; Michele et al., 2018) , to our knowledge, there is no prior work available for a resource-poor language Nepali. Some studies have been found for Hindi (Dalal et al., 2014; Bharti et al., 2017) which is written in the same Devanagari script as Nepali. However, due to the differences in vocabulary, grammar, culture, and ethnicity, systems developed for Hindi do not work for Nepali. Therefore, our novel work presented in this paper lays a foundation for detecting offensive content in Nepali.", |
| "cite_spans": [ |
| { |
| "start": 188, |
| "end": 209, |
| "text": "(Gitari et al., 2015;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 210, |
| "end": 236, |
| "text": "Burnap and Williams, 2016;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 237, |
| "end": 259, |
| "text": "Davidson et al., 2017;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 260, |
| "end": 285, |
| "text": "Gamb\u00e4ck and Sikdar, 2017;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 286, |
| "end": 299, |
| "text": "Waseem, 2016)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 311, |
| "end": 335, |
| "text": "(Schneider et al., 2018;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 336, |
| "end": 359, |
| "text": "Wiedemann et al., 2018;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 360, |
| "end": 381, |
| "text": "Michele et al., 2018)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 511, |
| "end": 531, |
| "text": "(Dalal et al., 2014;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 532, |
| "end": 552, |
| "text": "Bharti et al., 2017)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The key contributions of this paper are listed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We characterize the offensive languages commonly found in Nepali social media.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We release a human labeled data sets for offensive language detection in Nepali social media which is available at https://github. com/nowalab/offensive-nepali.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We prescribe novel preprocessing approaches for Nepali social media text.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We provide baseline models for coarsegrained and fine-grained classifications of offensive language in Nepali.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Detection of hate speech and offensive language across multiple languages is ramping up in recent years. This task is typically modeled as a supervised learning problem that requires a set of human-labeled training examples corresponding to different target classes. The target classes are the types of hate speech or offensive language under the study. Schmidt and Wiegand (2017) provides a comprehensive survey of the approaches in several aspects such as the features used, classification algorithms, and data sets and annotations. As mentioned previously, majority of studies on hate speech and offensive language detection have been conducted in resource-rich languages such as English and German. Such research is further facilitated by recent competitions and shared tasks that make availability of gold training examples. Toxic Comment Classification Challenge by Kaggle 1 , for example, provides thousands of humanlabeled examples for detecting toxic behaviors in Wikipedia comments. Similarly, First Shared Task on Aggression Identification (Kumar et al., 2018) for Hindi and English, and Germeval (Wiegand et al., 2018) for German provide gold data sets for detecting offensive languages. The former contains 15000 aggression-annotated Facebook posts and comments each in Hindi and English and the latter contains over 8000 human annotated tweets for German.", |
| "cite_spans": [ |
| { |
| "start": 354, |
| "end": 380, |
| "text": "Schmidt and Wiegand (2017)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1051, |
| "end": 1071, |
| "text": "(Kumar et al., 2018)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1108, |
| "end": 1130, |
| "text": "(Wiegand et al., 2018)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "An example of hate speech detection in English language is by Burnap and Williams (2016) who studied the detection in tweets with different categories: (a) race (ethnicity), (b) disability, (c) religion, and (c) sexual orientation and transgender status. Their data set consisted of 1803 tweets related to sexual orientation with 183 instances of offensive or antagonistic content, 1876 tweets related to race with 70 instances of offensive or antagonistic content, and 1914 tweets related to the disability with 51 instances of offensive or antagonistic content. The authors modeled the hate speech detection as a classification problem, achieving F-measures of 0.77, 0.75, 0.75, and 0.47 for religion, disability, race, and sexual orientation respectively. Davidson et al. (2017) differentiated hate speech from offensive languages. They classified each English tweet into (a) offensive (b) hate speech and (c) None using different classifiers. Thousands of tweets were labeled using CrowdFlower for the training examples. Several classifiers were trained using a one-versusrest framework in which a separate classifier was trained for each class and the class label with the highest predicted probability across all classifiers was assigned to each tweet. Out of the several classifiers, logistic regression and support vector machine performed the best achieving the overall precision and recall as 0.91 and 0.90 respectively. However, the precision and recall scores for the hate class were low ( precision of 0.44 and recall 0.61), suggesting that the classification of hate speech is challenging. Similarly, Gamb\u00e4ck and Sikdar (2017) trained Convolutional Neural Networks using 6655 Twitter hate-speech data-set originally created by Waseem (2016) to classify utterances into (a) Sexism, (b) Racism, (c) Sexism and Racism, and (d) Non-hate speech, achieving an overall precision, recall, and f-measure as 0.7287, 0.7775, and 0.7389, respectively.", |
| "cite_spans": [ |
| { |
| "start": 62, |
| "end": 88, |
| "text": "Burnap and Williams (2016)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 759, |
| "end": 781, |
| "text": "Davidson et al. (2017)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1615, |
| "end": 1640, |
| "text": "Gamb\u00e4ck and Sikdar (2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1741, |
| "end": 1754, |
| "text": "Waseem (2016)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Like in English, detecting offensive languages in German language has also been increased recently especially due to the shared tasks at Germeval 2018 2 and Germeval 2019 3 . Germeval 2018 provided 5009 categorized tweets as training data sets and 3532 as test data sets. It offered two tasks : (1) a coarse-grained binary classification with the categories OFFENSIVE and OTHER and (2) a fine-grained classification with the four categories PROFANITY, INSULT, ABUSE, and OTHER. The training data set consists of 66.3% tweets as OTHER, 20.4% as ABUSE, and 11.9% as IN-SULT, and only 1.4% as PROFANITY. The best performing system in task 1, TUWienKBS (Montani, 2018), received overall precision, recall, and F-measure of 0.71, 0.65, and 0.68 for OFFENSIVE and 0.82, 0.86, and 0.84 for OTHER respectively. The best performing system, uhhLT (Wiedemann et al., 2018) , for the fine-grained task (task 2) achieved average precision, recall, and f-measure as 0.56, 0.49, and 0.52, respectively.", |
| "cite_spans": [ |
| { |
| "start": 837, |
| "end": 861, |
| "text": "(Wiedemann et al., 2018)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The closest work to ours is the study of linguistic taboos and euphemisms in Nepali by Niraula et al. (2020) . The authors presented how the offensive contents are formed in Nepali and also created a resource containing a list of common offensive terms in Nepali. However, they have not addressed the detection of offensive content itself.", |
| "cite_spans": [ |
| { |
| "start": 87, |
| "end": 108, |
| "text": "Niraula et al. (2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Hate speech is a communication that disparages a person or a group based on some characteristic such as race, color, ethnicity, gender, sexual orientation, nationality, religion, or other characteristic (Schmidt and Wiegand, 2017) . Hate speech can have strong cultural implications (Schmidt and Wiegand, 2017) and thus an utterance can be perceived as offensive or not depending on the observer's cultural background. Besides, the distribution of hate speech can be different in different countries. For example, a country with a mix of religions most likely contains more hate speech related to religions than a country having a singly dominant religion. Therefore, in this section, we discuss different kinds of offensive languages that we observed in Nepali social media. We reviewed several social media posts and comments on Twitter, YouTube, Facebook, Blogs, and News Portals and identified the common hate speech types. We listed the common types in Table 1 with two examples for each. RACIST (OR), SEXIST(OS), and Other Offensive (OO) (e.g. attack to an individual or organization) are the most commonly observed offensive language types in Nepali social media posts. RACIST (OR) and SEXIST (OS) both are specific cases of offensive content. We noticed an enormous amount of offensive content (OOs) that is not SEXIST or RACIST.", |
| "cite_spans": [ |
| { |
| "start": 203, |
| "end": 230, |
| "text": "(Schmidt and Wiegand, 2017)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 283, |
| "end": 310, |
| "text": "(Schmidt and Wiegand, 2017)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Offensive Language in Nepali Social Media", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We can expect more of RACIST comments because Nepali society is a mix of several ethnic groups, casts and regions (pahade -people live in hilly region; madheshi -people live in the south; ethnic groups -gurung, magar; casts -bahun, chhetri, dalit, etc.). The social tensions among these races and ethnic groups are reflected in the posts and comments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Offensive Language in Nepali Social Media", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Hate speeches related to gender and religion are also observed. Interestingly, we observed the hate speech towards females the most when compared with males and the third gender. Targets to Hinduism, Islam, Christianity, and Buddhism are the most common hate speech related to religions. Furthermore, several cases of use of swear words, violent rhetoric, and personal attack towards individuals or organizations are also observed. We categorized them as Other Offensive.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Offensive Language in Nepali Social Media", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Social media text in any language is very noisy and contains ad-hoc typos, abbreviations, acronyms, and hashtags that require a significant amount of preprocessing. In addition to these challenges, Nepali natural language processing requires many other issues to be handled. First, the content can be written in four different ways as shown in Table 1: (a) Nepali text in Devanagari script (b) Nepali text in Roman script, pronunciation-based, (c) pure English text, and (d) Mixed script text that contains both Devanagari and Roman scripts. In addition, cases of Neglish in which the user switches between Nepali and English languages are also found. Furthermore, some interesting cases of code-switching were also found, mostly among Hindi, Nepali, Maithili, and English: \"\u0938\u0939 \u092c\u094b\u0932\u093e \u092d\u093e\u0908\" (Translation: rightly said brother), \"\u0917 \u0921 night\" (Translation: good night) Second, even when the script is written in Devanagari (or Roman), there are several orthographic writing issues one has to deal with while processing Nepali natural language text. The same word (such as \u0935\u094b ) can be written in so many different ways in Devanagari (or in Roman) as they are pronounced almost the same (refer to Table 2 ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1189, |
| "end": 1196, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Challenges in Processing Nepali Social Media Text", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Third, Nepali is morphologically rich and complex. The same base verb, \u092e\u093e\u0928 (to kill) for instance, have different forms (\u092e\u093e\u091b , \u092e\u093e\u091b \u0938 , \u092e\u093e\u091b , \u092e\u093e\u091b \u0938, \u092e\u093e \u0930 , \u092e\u093e \u0930\u0935 , \u092e\u093e \u0930\u0928 \u091b, \u092e\u093e \u0930\u090f\u0932\u093e, \u092e\u093e\u0930 \u091b, \u092e\u093e \u0930\u091b , \u092e\u093e\u0930 \u091b\u0928 , \u092e\u093e \u0930\u090f\u0915\u094b, \u092e\u0930 \u0915\u094b, \u092e\u093e \u0930 , \u0928\u092e\u093e\u0930, \u0928\u092e\u093e\u0928 , etc.) depending on gender, number, honor and tense, giving diverse forms for the same base token. Handling this issue is very crucial for processing Nepali text.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Challenges in Processing Nepali Social Media Text", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Fourth, Nepali is a low-resource language because Nepali natural language processing is in its infancy. There aren't adequate resources available to process the language. For example, there is not even a list of standard vocabulary words available to use. Lemmatization of morphologically rich languages is crucial but currently is not possible for Nepali. There is no reliable public or commercial parts-of-speech tagger available. Fifth, translation of data sets or resources from other languages to Nepali is not straightforward. Commercially available language translation services are poor in translating contents from other languages to Nepali. All of these issues make the processing of Nepali text very challenging.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Challenges in Processing Nepali Social Media Text", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Romanized -1 pagal boksi Romanized -2 pagal bokshi Devanagari -1 \u092a\u093e\u0917\u0932 \u0935\u094b Devanagari -2 \u092a\u093e\u0917\u0932 \u0935\u094b Devanagari -3 \u092a\u093e\u0917\u0932 \u0935\u094b Devanagari -4 \u092a\u093e\u0917\u0932 \u092c\u094b Devanagari -5 \u092a\u093e\u0917\u0932 \u092c\u094b Devanagari -6 \u092a\u093e\u0917\u0932 \u092c\u094b Mixed -1 \u092a\u093e\u0917\u0932 boksi Mixed -2 \u092a\u093e\u0917\u0932 bokshi Mixed -3 pagal \u0935\u094b", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "S.N Content", |
| "sec_num": null |
| }, |
| { |
| "text": "In this section, we describe the data collection, data annotation, and our system to detect offensive lan-guages in Nepali text.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Our goal is to create a labeled data set of hate speech of different types and train machine learning models using it. Since hate speech appears relatively less in social media, annotating a large sample gives just a few offensive contents, making the annotation process very laborious and expensive. To address this problem, researchers apply different strategies to improve the distribution of offensive content Zampieri et al. (2019) . Following these strategies, we made a pool of comments and posts from the sources in social media that have higher chances of containing hate speech. Our pool consists of over 15000 comments and posts from diverse social media platforms such as Facebook, Twitter, YouTube, Nepali Blogs, and News Portals.", |
| "cite_spans": [ |
| { |
| "start": 414, |
| "end": 436, |
| "text": "Zampieri et al. (2019)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Collection", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For Facebook, we first made a list of potentially controversial posts posted to a general audience in open groups and public pages between 2017 and 2019. We then extracted around 7000 comments corresponding to those posts. For Twitter, we followed a bootstrapping approach as done by prior arts (Zampieri et al., 2019) . For this, we first created a small list of Nepali words (in both De-vanagari and Romanized forms) that have higher chances of being used in hate speech. The words themselves are not explicitly offensive but can appear in hate speech depending on the context of their use. For example, the words \"\u092c\u093e \u0928\" (bahunan upper cast in Nepali society) and \"\u092d\u093e\u0932 \" (bhalu -bear) are non-offensive by themselves but can appear in offensive contexts. Offensively, bahun can be used to insult someone racially based on their cast, and bhalu can be used to call someone a prostitute. Using the list of keywords, we performed a targeted search on Twitter and collected about 4000 tweets, approximately 50 tweets per word. These tweets enhanced the pool with diverse and context-sensitive posts. For YouTube, similar to Facebook, we manually created a list of potentially controversial, non-controversial, and neutral videos, and extracted approximately 3500 comments. Video contents are highly engaging. A good length video -especially a controversial one -contained diverse emotions and attributes such as anger, happiness, low and high pitch, etc., and was scrutinized by the viewers. The YouTube video comments also helped to maintain the diversity of data set in the writing form as they were typed in transliterated, mixed, and pure Devanagari font and fulfill our categorical requirements. Besides, they captured the inputs from the diversity of people commenting on the posts. Finally, the rest of the comments, about 500, were gathered from several Nepali blogs and news websites. ", |
| "cite_spans": [ |
| { |
| "start": 295, |
| "end": 318, |
| "text": "(Zampieri et al., 2019)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Collection", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "After constructing the pool of comments and posts, we randomized the records for annotation. To ensure the quality, we used two annotators and asked them to annotate each record into four categories: SEXIST, RACIST, OTHER-OFFENSIVE, and NON-OFFENSIVE. We computed the inter-rater reliability (IRR) between each pair of ratings using Cohen's kappa (k) (McHugh, 2012) . IRR scores were computed for both fine-grained (considering Train 3562 1950 218 68 5798 Test 896 486 49 19 1450 Table 4 : Training and Testing Data Sets all four labels) and coarse-grained (offensive or non-offensive) cases. For the coarse-grained, we considered the three offensive categories SEXIST, RACIST, and OTHER-OFFENSIVE as offensive. The Cohen's kappa coefficients obtained for finegrained and coarse-grained cases were 0.71 and 0.78, respectively, suggesting substantial agreements between the raters. We observed most of the disagreements between human annotators in borderline cases. For example, Kati milyo Parti bat Dr. Sab lai (How much/many did you get from the party 4 , Dr. Sab? ) was marked as offensive by one while non-offensive by the other. This comment could be a personal attack for corruption in certain contexts while non-offensive in some other e.g. receiving compensation or votes. The disagreements were reviewed by the third annotator and resolved on consensus.", |
| "cite_spans": [ |
| { |
| "start": 351, |
| "end": 365, |
| "text": "(McHugh, 2012)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 428, |
| "end": 494, |
| "text": "Train 3562 1950 218 68 5798 Test 896 486 49 19 1450 Table 4", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Annotation and Data Set", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Additionally, the social media posts and comments often contained personally identifiable information such as person names, organization names, and phone numbers. To anonymize the comments, we replace the person/organization names with unique random yet real person/organization names. Since gender information carries vital linguistic properties in the language, we tried preserving the gender as much as possible during the name replacement process. A name with a known gender (i.e. male or female) is replaced with another random name of the same gender.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NO OO OR OS Total", |
| "sec_num": null |
| }, |
| { |
| "text": "The annotators annotated 7462 records altogether. The distribution of the annotation across different categories is presented in Table 3 . We removed the duplicated examples from the annotated corpus and performed 80-20 split randomly to create the training and test data sets. The statistics of these data sets are shown in Table 4 . To encourage the research community for addressing this important task of offensive language detection in Nepali, we have released these gold data sets at https://github.com/nowalab/offensive-nepali.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 129, |
| "end": 136, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 325, |
| "end": 332, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "NO OO OR OS Total", |
| "sec_num": null |
| }, |
| { |
| "text": "As described in Section 3.1, the social media comments and posts came in different forms: comments purely in Devanagari script, transliteration using Roman letters, pure English, or their combinations. In fact, more than 50% of the comments in our pool are written in transliterated or mixed forms. We speculate, due to the ease of writing, this pattern will continue. These observations reiterate the need for text normalization while processing Nepali social media texts. To this end, we consider two different text normalization schemes: (A) Dirghikaran (Prep_Dir): Because multiple characters have the same sound, inconsistencies appear even for the same word written in Devanagari script. We use the following mappings to normalize the character variants: -> , \u25cc -> \u25cc , \u0938 -> \u0936, \u0937 -> \u0936, \u0935 -> \u092c, \u0909 -> \u090a, \u25cc \u0930 -> \u25cc , \u25cc \u0930 -> \u25cc \u0930 , \u0907 -> \u0908, \u25cc -> \u25cc , \u0928 -> \u0923, \u25cc -> \u0919 . This converts the words with different orthographic forms to a normalized form, e.g., \u0915\u0924\u093e\u0935, and \u0915\u0924\u093e\u092c both map to \u0915 \u0924\u093e\u092c. This approach does not affect the tokens that are already transliterated in Romanized form or written in English. (B) Romanization (Prep_Rom): With this scheme, we convert (transliterate) each Nepali word written in Devanagari script to its Romanized form using a number of rules. This rule-based system takes care of the orthographic variants as well. For instance, it converts all \u0915\u0924\u093e\u0935, \u0915\u0924\u093e\u092c, \u0915 \u0924\u093e\u092c, and \u0915 \u0924\u093e\u0935 to kitab. We could have done the reverse way i.e. converting transliterated text in Romanized form to Devanagari script (e.g. kitab -> \u0915\u0924\u093e\u0935) but we found that converting Devanagari text to Romanized using the rules is relatively easier. After this preprocessing, all the comments will be in Romanized forms. This powerful preprocessing technique has not been employed in any of the prior arts and is one of our novel contributions in this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Nepali, as illustrated in Section 3.1, is a morphologically rich language. A verb, for example, can take different forms depending upon gender, number, honor, tense, and their combinations. Therefore, character-based and sub-word features are expected to be useful in classifying offensive languages. For that reason, we considered both word (Unigrams and Bigrams) and character (Character Trigrams) features for our experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We performed experiments to see the effect of preprocessing scheme and classification model, and coarse and fine-grained classification. In all experiments, we reduced the features down to 10000 using KBest algorithm with chi-squared stat.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "Non (A) No Preprocessing (Prep_None) (B) Dirghikaran (Prep_Dir), (C) Romanization (Prep_Rom), and (D) Prep_Dir + Prep_Rom. The first block uses word only, the second block uses character only and the last block uses both word and character features. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Prep.", |
| "sec_num": null |
| }, |
| { |
| "text": "We trained a Logistic Regression classifier for binary classification using four different preprocessing schemes: A. No preprocessing (Prep_None), B. Dirghikaran (Prep_Dir), C. Romanization (Prep_Rom), and D. Both Prep_Dir + Prep_Rom, where + means string concatenation. We considered positive examples as the records with OO, OR, and OS from Table 4 . This yielded the train data set with 3562 negative and 2236 positive examples and the test data set with 896 positive and 554 negative examples.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 343, |
| "end": 350, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Preprocessing", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "We reported the results using the test data in Table 5. The top, middle, and bottom blocks contain the results corresponding to word only, char-acter only, and both word and character features, respectively. The results in the middle block are significantly better than the results in the top block, demonstrating that character-based features are extremely useful. It is expected because Nepali is morphologically very rich and the social media text is very noisy. Adding both word and character features further slightly improved the results (the bottom block).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of Preprocessing", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "Within each block, i.e. given a feature type, the results are better in the order: D > C > B > A, where A is no preprocessing. The preprocessing technique B, \"Dirghikaran\", improved the performance of the classifier compared to A. But the margin of improvement by C, \"Romanization\", is typically higher than that by B. It is especially significant when the word only features are used. This is because Dirghikaran only normalizes the terms written in the Devanagari script but it does not transliterate the text. Romanization, however, transliterates the text written in Devanagari script and makes it uniform with other already transliterated user posts. Combining texts using both Romanization and Dirghikaran, marked with D, slightly improved the results over C.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of Preprocessing", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "For coarse-grained (i.e. binary) classification, we experimented with four machine learning classifiers that are most often used for offensive language detection. Specifically, we used: (A) Logistic Regression (LR): Linear LR with L2 regularization constant 1 and limited-memory BFGS optimization, (B) Support Vector Machine (SVM): Linear SVM with L2 regularization constant 1 and logistic loss function, (C) Random Forests (RF): Averaging probabilistic predictions of 100 randomized decision trees. (D) Multilingual BERT (M-BERT): Current best performing models for offensive language detection utilize BERT (Devlin et al., 2018) based models (Liu et al., 2019; Mozafari et al., 2019; Baruah et al., 2020) . Although there is no BERT model available for Nepali yet, Nepali is included in M-BERT 5 which is trained using the entire Wikipedia dump for each language. We used Hugging Face Transformer library (Wolf et al., 2020) to build the M-BERT classifier.", |
| "cite_spans": [ |
| { |
| "start": 609, |
| "end": 630, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 644, |
| "end": 662, |
| "text": "(Liu et al., 2019;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 663, |
| "end": 685, |
| "text": "Mozafari et al., 2019;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 686, |
| "end": 706, |
| "text": "Baruah et al., 2020)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 907, |
| "end": 926, |
| "text": "(Wolf et al., 2020)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coarse-grained Classification", |
| "sec_num": "4.7" |
| }, |
| { |
| "text": "In addition, we constructed a baseline model using the list of Nepali offensive terms collected by 5 https://github.com/googleresearch/bert/blob/master/multilingual.md Niraula et al. (2020) and is available at GitHub 6 . This data set contains 1078 offensive terms, their transliterated forms, and interestingly their offensiveness scores. The offensiveness score ranges from 1 (slightly offensive) to 5 (absolute offensive e.g. taboo terms). For a given post, our baseline scans for the tokens present in the dictionary and sums the corresponding offensiveness scores. If the sum is 5 or more, it declares the post as offensive.", |
| "cite_spans": [ |
| { |
| "start": 168, |
| "end": 189, |
| "text": "Niraula et al. (2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coarse-grained Classification", |
| "sec_num": "4.7" |
| }, |
| { |
| "text": "For baseline and traditional machine learning models (LR, SVM, and RF), as suggested by the experiments in Section 4.6, we chose the Romanization + Dirghikaran preprocessing strategy and both word and character-based features. In addition, we computed and utilized the indicator features, for each post, by scanning the preprocessed tokens and looking them up in the offensive dictionary. As before, we reduced the features using KBest to 10000 for both train and test data sets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coarse-grained Classification", |
| "sec_num": "4.7" |
| }, |
| { |
| "text": "We trained the models and evaluated them using the binary train and test data sets constructed as described in Section 4.6. The evaluation results are presented in Table 6 . The baseline model which is based on a dictionary obtained the F 1 scores of 0.58 and 0.73 for offensive and nonoffensive categories. All machine learning models performed very well compared to the baseline model. Interestingly, M-BERT model did not perform well compared to the traditional models. This could be because M-BERT model is trained using Wikipedia content which is different from the social media text. Also, the size of Wikipedia for lowresource language Nepali is not huge and thus it is under-represented in the M-BERT model. Logistic Regression and Random Forrest models were the top-performing models, with the latter having a slightly higher F 1 score on the offensive category. For this reason, we chose the Random Forrest classifier for the fine-grained classification which we describe next.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 164, |
| "end": 171, |
| "text": "Table 6", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Coarse-grained Classification", |
| "sec_num": "4.7" |
| }, |
| { |
| "text": "Fine-grained classification can be done by directly training a multi-class classifier over the labeled training data set. However, we followed the principle proposed by Park and Fung (2017) that performed better for this specific task. Following this, we trained a Random Forrest classifier for coarsegrained classification as in Section 4.7. We trained another Random Forrest classifier using only the training data set with labels OO (other offensive), OR (offensive racist), and OS (offensive sexist). During testing, we applied the second classifier only to those test records that the first classifier predicted as offensive to get their fine-grained categories. We assigned a non-offensive label (NO) to each test record for which the first classifier predicted as non-offensive. We reported the experiment results in Table 7 . The F 1 scores for Non-Offensive, Other Offensive, Racist, and Sexist were 0.87, 0.71, 0.45, and 0.01 respectively. The lower performance for the sexist category was mainly due to the fewer training examples available for this category compared to the other categories (see Table 4 ). Gathering these fine-grained labels is a major challenge in the field than obtaining labels with simply offensive and non-offensive (Park and Fung, 2017) . This is more evident in the low-resource language like Nepali.", |
| "cite_spans": [ |
| { |
| "start": 1251, |
| "end": 1272, |
| "text": "(Park and Fung, 2017)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 824, |
| "end": 831, |
| "text": "Table 7", |
| "ref_id": "TABREF9" |
| }, |
| { |
| "start": 1108, |
| "end": 1115, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Fine-grained classification", |
| "sec_num": "4.8" |
| }, |
| { |
| "text": "Most of the errors were due to the lack of world and contextual knowledge to the classifier and is always a challenge for offensive language detection in any language. For instance, thamel ma bhalu ko bigbigi (literal translation: Abundant bears in Thamel) is offensive while jungle ma bhalu ko bigbigi (literal meaning: Abundant bears in jungle) is non-offensive although both of these sentences have the same tokens everywhere except one i.e. Thamel vs. Jungle. Thamel is a famous tourist area in Kathmandu that also has a negative connotation as a brothel and bhalu is a contextually offensive term that can mean a bear or a prostitute depending on the context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "4.9" |
| }, |
| { |
| "text": "In this paper, we presented a systematic study of offensive language detection in Nepali, a topic that has not been explored for this low resource language. We collected diverse social media posts and generated a labeled data set by manually annotating 7248 posts with fine-grained labels. The data set is available at https://github.com/ nowalab/offensive-nepali.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We presented different challenges that need to be addressed to process noisy social media posts in Nepali. We proposed three different preprocessing methods and provided detailed evaluations demonstrating their effectiveness on the model performance. We reported detailed experiments for coarse-grained detection of offensive languages using several conventional machine learning and recent deep learning models and features. We also provided a fine-grained classification of offensive comments using a two-step approach for Nepali language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our data set and baseline algorithms provide foundation for future research in this area to fight against cyberbullying and hate speech, which has been widespread in recent days. We would like to caution to those who use our work (e.g. data sets and algorithms) to avoid over-reliance on keywords and machine learning models. We remind everyone to keep the context in the forefront, and encourage using human review to the ones flagged by the machine learning systems as offensive, especially in cases of false positives.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Future work includes detecting the targets of the offensive comments, which could be an individual organization/person or a group. Leveraging offensive language data sets from other languages to Nepali, e.g. by translation and transfer learning as done by Sohn and Lee (2019) , is another interesting future direction.", |
| "cite_spans": [ |
| { |
| "start": 256, |
| "end": 275, |
| "text": "Sohn and Lee (2019)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "https://www.kaggle.com/c/jigsaw-toxic-commentclassification-challenge", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://projects.fzai.h-da.de/iggsa/germeval-2018/ 3 https://projects.fzai.h-da.de/iggsa/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Party here specifically refers to political organization", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/nowalab/offensive-nepali", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like acknowledge Ms. Monika Shah, professor Dr. Kumar Prasad Koirala, and Mr. Suraj Subedi for their continued support, helpful discussions and encouragements.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Aggression identification in english, hindi and bangla text using bert, roberta and svm", |
| "authors": [ |
| { |
| "first": "Arup", |
| "middle": [], |
| "last": "Baruah", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaushik", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Ferdous", |
| "middle": [], |
| "last": "Barbhuiya", |
| "suffix": "" |
| }, |
| { |
| "first": "Kuntal", |
| "middle": [], |
| "last": "Dey", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Second Workshop on Trolling, Aggression and Cyberbullying", |
| "volume": "", |
| "issue": "", |
| "pages": "76--82", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arup Baruah, Kaushik Das, Ferdous Barbhuiya, and Kuntal Dey. 2020. Aggression identification in en- glish, hindi and bangla text using bert, roberta and svm. In Proceedings of the Second Workshop on Trolling, Aggression and Cyberbullying, pages 76- 82.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Harnessing online news for sarcasm detection in hindi tweets", |
| "authors": [ |
| { |
| "first": "Korra", |
| "middle": [], |
| "last": "Santosh Kumar Bharti", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanjay", |
| "middle": [], |
| "last": "Sathya Babu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jena", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "International Conference on Pattern Recognition and Machine Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "679--686", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Santosh Kumar Bharti, Korra Sathya Babu, and San- jay Kumar Jena. 2017. Harnessing online news for sarcasm detection in hindi tweets. In International Conference on Pattern Recognition and Machine In- telligence, pages 679-686. Springer.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Us and them: identifying cyber hate on twitter across multiple protected characteristics", |
| "authors": [ |
| { |
| "first": "Pete", |
| "middle": [], |
| "last": "Burnap", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "EPJ Data Science", |
| "volume": "5", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pete Burnap and Matthew L Williams. 2016. Us and them: identifying cyber hate on twitter across mul- tiple protected characteristics. EPJ Data Science, 5(1):11.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Insult detection in hindi", |
| "authors": [ |
| { |
| "first": "Chetan", |
| "middle": [], |
| "last": "Dalal", |
| "suffix": "" |
| }, |
| { |
| "first": "Shivyansh", |
| "middle": [], |
| "last": "Tandon", |
| "suffix": "" |
| }, |
| { |
| "first": "Amitabha", |
| "middle": [], |
| "last": "Mukerjee", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Technical report on Artificial Intelligence", |
| "volume": "18", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chetan Dalal, Shivyansh Tandon, and Amitabha Muker- jee. 2014. Insult detection in hindi. Technical report, Technical report on Artificial Intelligence, 18.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Automated hate speech detection and the problem of offensive language", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Davidson", |
| "suffix": "" |
| }, |
| { |
| "first": "Dana", |
| "middle": [], |
| "last": "Warmsley", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Macy", |
| "suffix": "" |
| }, |
| { |
| "first": "Ingmar", |
| "middle": [], |
| "last": "Weber", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Conference on Web and Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Davidson, Dana Warmsley, Michael Macy, and Ingmar Weber. 2017. Automated hate speech detection and the problem of offensive language. In Proceedings of the 11th International Conference on Web and Social Media.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Using convolutional neural networks to classify hatespeech", |
| "authors": [ |
| { |
| "first": "Bj\u00f6rn", |
| "middle": [], |
| "last": "Gamb\u00e4ck", |
| "suffix": "" |
| }, |
| { |
| "first": "Utpal", |
| "middle": [], |
| "last": "Kumar Sikdar", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the First Workshop on Abusive Language Online", |
| "volume": "", |
| "issue": "", |
| "pages": "85--90", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bj\u00f6rn Gamb\u00e4ck and Utpal Kumar Sikdar. 2017. Us- ing convolutional neural networks to classify hate- speech. In Proceedings of the First Workshop on Abusive Language Online, pages 85-90.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A lexicon-based approach for hate speech detection", |
| "authors": [ |
| { |
| "first": "Njagi", |
| "middle": [], |
| "last": "Dennis Gitari", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhang", |
| "middle": [], |
| "last": "Zuping", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanyurwimfura", |
| "middle": [], |
| "last": "Damien", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Long", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Journal of Multimedia and Ubiquitous Engineering", |
| "volume": "10", |
| "issue": "4", |
| "pages": "215--230", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Njagi Dennis Gitari, Zhang Zuping, Hanyurwimfura Damien, and Jun Long. 2015. A lexicon-based approach for hate speech detection. International Journal of Multimedia and Ubiquitous Engineering, 10(4):215-230.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Benchmarking aggression identification in social media", |
| "authors": [ |
| { |
| "first": "Ritesh", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Atul", |
| "middle": [], |
| "last": "Kr Ojha", |
| "suffix": "" |
| }, |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the First Workshop on Trolling, Aggression and Cyberbullying (TRAC-2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--11", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ritesh Kumar, Atul Kr Ojha, Shervin Malmasi, and Marcos Zampieri. 2018. Benchmarking aggression identification in social media. In Proceedings of the First Workshop on Trolling, Aggression and Cyber- bullying (TRAC-2018), pages 1-11.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Nuli at semeval-2019 task 6: Transfer learning for offensive language detection using bidirectional transformers", |
| "authors": [ |
| { |
| "first": "Ping", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Wen", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Zou", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th international workshop on semantic evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "87--91", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ping Liu, Wen Li, and Liang Zou. 2019. Nuli at semeval-2019 task 6: Transfer learning for offensive language detection using bidirectional transformers. In Proceedings of the 13th international workshop on semantic evaluation, pages 87-91.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Interrater reliability: the kappa statistic", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Mary", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mchugh", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Biochemia medica", |
| "volume": "22", |
| "issue": "3", |
| "pages": "276--282", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mary L McHugh. 2012. Interrater reliability: the kappa statistic. Biochemia medica, 22(3):276-282.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Inriafbk at germeval 2018: Identifying offensive tweets using recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Corazza", |
| "middle": [], |
| "last": "Michele", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefano", |
| "middle": [], |
| "last": "Menini", |
| "suffix": "" |
| }, |
| { |
| "first": "Arslan", |
| "middle": [], |
| "last": "Pinar", |
| "suffix": "" |
| }, |
| { |
| "first": "Rachele", |
| "middle": [], |
| "last": "Sprugnoli", |
| "suffix": "" |
| }, |
| { |
| "first": "Cabrio", |
| "middle": [], |
| "last": "Elena", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Tonelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Villata", |
| "middle": [], |
| "last": "Serena", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "80--84", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Corazza Michele, Stefano Menini, Arslan Pinar, Rachele Sprugnoli, Cabrio Elena, Sara Tonelli, and Villata Serena. 2018. Inriafbk at germeval 2018: Identifying offensive tweets using recurrent neural networks. In GermEval 2018, pages 80-84.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Tuwienkbs at germeval 2018: German abusive tweet detection", |
| "authors": [ |
| { |
| "first": "Joaqu\u0131n", |
| "middle": [], |
| "last": "Padilla Montani", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Austrian Academy of Sciences", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joaqu\u0131n Padilla Montani. 2018. Tuwienkbs at germeval 2018: German abusive tweet detection. Austrian Academy of Sciences, Vienna September 21, 2018.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A bert-based transfer learning approach for hate speech detection in online social media", |
| "authors": [ |
| { |
| "first": "Marzieh", |
| "middle": [], |
| "last": "Mozafari", |
| "suffix": "" |
| }, |
| { |
| "first": "Reza", |
| "middle": [], |
| "last": "Farahbakhsh", |
| "suffix": "" |
| }, |
| { |
| "first": "Noel", |
| "middle": [], |
| "last": "Crespi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Conference on Complex Networks and Their Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "928--940", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marzieh Mozafari, Reza Farahbakhsh, and Noel Crespi. 2019. A bert-based transfer learning approach for hate speech detection in online social media. In In- ternational Conference on Complex Networks and Their Applications, pages 928-940. Springer.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Saurab Dulal, and Diwa Koirala. 2020. Linguistic taboos and euphemisms in nepali", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Nobal", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Niraula", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2007.13798" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nobal B Niraula, Saurab Dulal, and Diwa Koirala. 2020. Linguistic taboos and euphemisms in nepali. arXiv preprint arXiv:2007.13798.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "One-step and twostep classification for abusive language detection on twitter", |
| "authors": [ |
| { |
| "first": "Ji", |
| "middle": [], |
| "last": "Ho", |
| "suffix": "" |
| }, |
| { |
| "first": "Park", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1706.01206" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ji Ho Park and Pascale Fung. 2017. One-step and two- step classification for abusive language detection on twitter. arXiv preprint arXiv:1706.01206.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "A survey on hate speech detection using natural language processing", |
| "authors": [ |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Schmidt", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Wiegand", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Fifth International Workshop on Natural Language Processing for Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anna Schmidt and Michael Wiegand. 2017. A survey on hate speech detection using natural language pro- cessing. In Proceedings of the Fifth International Workshop on Natural Language Processing for So- cial Media, pages 1-10.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Towards the automatic classification of offensive language and related phenomena in german tweets", |
| "authors": [ |
| { |
| "first": "Julian Moreno", |
| "middle": [], |
| "last": "Schneider", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Roller", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Bourgonje", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefanie", |
| "middle": [], |
| "last": "Hegele", |
| "suffix": "" |
| }, |
| { |
| "first": "Georg", |
| "middle": [], |
| "last": "Rehm", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Austrian Academy of Sciences", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julian Moreno Schneider, Roland Roller, Peter Bour- gonje, Stefanie Hegele, and Georg Rehm. 2018. To- wards the automatic classification of offensive lan- guage and related phenomena in german tweets. Aus- trian Academy of Sciences, Vienna September 21, 2018.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Mc-bert4hate: Hate speech detection using multi-channel bert for different languages and translations", |
| "authors": [ |
| { |
| "first": "Hajung", |
| "middle": [], |
| "last": "Sohn", |
| "suffix": "" |
| }, |
| { |
| "first": "Hyunju", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "2019 International Conference on Data Mining Workshops (ICDMW)", |
| "volume": "", |
| "issue": "", |
| "pages": "551--559", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hajung Sohn and Hyunju Lee. 2019. Mc-bert4hate: Hate speech detection using multi-channel bert for different languages and translations. In 2019 In- ternational Conference on Data Mining Workshops (ICDMW), pages 551-559. IEEE.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Are you a racist or am i seeing things? annotator influence on hate speech detection on twitter", |
| "authors": [ |
| { |
| "first": "Zeerak", |
| "middle": [], |
| "last": "Waseem", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the first workshop on NLP and computational social science", |
| "volume": "", |
| "issue": "", |
| "pages": "138--142", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeerak Waseem. 2016. Are you a racist or am i seeing things? annotator influence on hate speech detection on twitter. In Proceedings of the first workshop on NLP and computational social science, pages 138- 142.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Transfer learning from lda to bilstm-cnn for offensive language detection in twitter", |
| "authors": [ |
| { |
| "first": "Gregor", |
| "middle": [], |
| "last": "Wiedemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugen", |
| "middle": [], |
| "last": "Ruppert", |
| "suffix": "" |
| }, |
| { |
| "first": "Raghav", |
| "middle": [], |
| "last": "Jindal", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Biemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Austrian Academy of Sciences", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gregor Wiedemann, Eugen Ruppert, Raghav Jindal, and Chris Biemann. 2018. Transfer learning from lda to bilstm-cnn for offensive language detection in twitter. Austrian Academy of Sciences, Vienna September 21, 2018.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Overview of the germeval 2018 shared task on the identification of offensive language. Austrian Academy of Sciences", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Wiegand", |
| "suffix": "" |
| }, |
| { |
| "first": "Melanie", |
| "middle": [], |
| "last": "Siegel", |
| "suffix": "" |
| }, |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Ruppenhofer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Wiegand, Melanie Siegel, and Josef Ruppen- hofer. 2018. Overview of the germeval 2018 shared task on the identification of offensive language. Aus- trian Academy of Sciences, Vienna September 21, 2018.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Transformers: State-of-theart natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Davison", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Shleifer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "38--45", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Julien Chaumond, Lysandre Debut, Vic- tor Sanh, Clement Delangue, Anthony Moi, Pier- ric Cistac, Morgan Funtowicz, Joe Davison, Sam Shleifer, et al. 2020. Transformers: State-of-the- art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Nat- ural Language Processing: System Demonstrations, pages 38-45.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Predicting the type and target of offensive posts in social media", |
| "authors": [ |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "Noura", |
| "middle": [], |
| "last": "Farra", |
| "suffix": "" |
| }, |
| { |
| "first": "Ritesh", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1415--1420", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcos Zampieri, Shervin Malmasi, Preslav Nakov, Sara Rosenthal, Noura Farra, and Ritesh Kumar. 2019. Predicting the type and target of offensive posts in social media. pages 1415-1420.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "0.74 0.90 0.81 0.75 0.49 0.59", |
| "num": null |
| }, |
| "TABREF0": { |
| "text": "Transliterated): sale khate aphu matra educated thhanndo rahexa Translation: \"sale\" \"khate\" (pejorative term for people living in urban slum dwellers) thinks he is the only educated", |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td/><td/><td>Type</td></tr><tr><td>1</td><td>Nepali (Devanagari): \u092e\u093e\u0938\u093e\u0932\u093e \u092a\u093e\u0917\u0932 \u092d\u090f \u091c \u094b \u091b!</td></tr><tr><td/><td>Translation: \"massala\" it seems he got mad</td><td>OO</td></tr><tr><td>2</td><td colspan=\"2\">Nepali (OO</td></tr><tr><td>3</td><td>Nepali (Devanagari): \u092a\u093e\u0917\u0932 \u092c\u093e \u0928</td></tr><tr><td/><td>Translation: lunatic \"bahun\" (an upper cast)</td><td>OR</td></tr><tr><td>4</td><td>Nepali (Transliterated): Rajako kaam chhodi kamiko dewali</td></tr><tr><td/><td>Translation: Going to kami's festival over king's assignment -a traditionally non-tabooed</td><td>OR</td></tr><tr><td/><td>idiom that is considered racist now</td></tr><tr><td>5</td><td>Nepali (Transliterated): Pothi baseko suhaudaina</td></tr><tr><td/><td>Translation: It does not suit a woman to raise her voice (sexist idiom)</td><td>OS</td></tr><tr><td>6</td><td>Nepali (Mixed): \u092a \u0938\u093e\u092e\u093e \u092c \u091b\u0928 \u0915 \u091f \u0939 sala</td></tr><tr><td/><td>Translation: girls get sold with money sala</td><td>OS</td></tr><tr><td>7</td><td>Nepali (Transliterated): ma pani bahun hu tara tapaaik ko kuro chhita bujhena</td></tr><tr><td/><td>Translation: I am also a bramhin, but I am dissatisfied with your words</td><td>NO</td></tr><tr><td>8</td><td>Nepali (Devanagari): \u092f\u093e \u092d\u093e\u0932 \u0939\u093e \u0938\u0930</td></tr><tr><td/><td>Translation: Sir, this is a bear</td><td>NO</td></tr></table>", |
| "num": null |
| }, |
| "TABREF1": { |
| "text": "Examples of common offensive languages found in Nepali social media.", |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Note that they could be typed in</td></tr></table>", |
| "num": null |
| }, |
| "TABREF2": { |
| "text": "Different orthographic forms of writing the text \"mad witch\"", |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "num": null |
| }, |
| "TABREF4": { |
| "text": "The pool of social medial data set.", |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "num": null |
| }, |
| "TABREF6": { |
| "text": "", |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td colspan=\"2\">: Effect of prepossessing techniques and</td></tr><tr><td>features on binary classification.</td><td>Preprocessing</td></tr><tr><td>techniques:</td><td/></tr></table>", |
| "num": null |
| }, |
| "TABREF7": { |
| "text": "Binary classification using different machine learning models.", |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "num": null |
| }, |
| "TABREF9": { |
| "text": "Results for detecting different offensive categories", |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "num": null |
| } |
| } |
| } |
| } |