| { |
| "paper_id": "2022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T05:10:18.800146Z" |
| }, |
| "title": "Multilingual Resources for Offensive Language Detection", |
| "authors": [ |
| { |
| "first": "Aym\u00e9", |
| "middle": [], |
| "last": "Arango", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Chile", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Jorge", |
| "middle": [], |
| "last": "P\u00e9rez", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Chile", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "B\u00e1rbara", |
| "middle": [], |
| "last": "Poblete", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Chile", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Valentina", |
| "middle": [], |
| "last": "Proust", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pontifical Catholic University of Chile", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Magdalena", |
| "middle": [], |
| "last": "Salda\u00f1a", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pontifical Catholic University of Chile", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Most of the published approaches and resources for offensive language and hate speech detection are tailored for the English language. In consequence, cross-lingual and cross-cultural perspectives lack some essential resources. The lack of diversity of the datasets in Spanish is notable. Variations throughout Spanish-speaking countries make existing datasets not enough to encompass the task in the different Spanish variants. We manually annotated 9834 tweets from Chile to enrich the existing Spanish resources with different words and new targets of hate that have not been considered in previous studies. We conducted several cross-dataset evaluation experiments of the models published in the literature using our Chilean dataset and two others in English and Spanish. We propose a comparative framework for quickly conducting comparative experiments using different previously published models. In addition, we set up a Codalab competition for further comparison of new models in a standard scenario, that is, data partitions and evaluation metrics. All resources can be accessed through a centralized repository for researchers to get a complete picture of the progress on the multilingual hate speech and offensive language detection task.", |
| "pdf_parse": { |
| "paper_id": "2022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Most of the published approaches and resources for offensive language and hate speech detection are tailored for the English language. In consequence, cross-lingual and cross-cultural perspectives lack some essential resources. The lack of diversity of the datasets in Spanish is notable. Variations throughout Spanish-speaking countries make existing datasets not enough to encompass the task in the different Spanish variants. We manually annotated 9834 tweets from Chile to enrich the existing Spanish resources with different words and new targets of hate that have not been considered in previous studies. We conducted several cross-dataset evaluation experiments of the models published in the literature using our Chilean dataset and two others in English and Spanish. We propose a comparative framework for quickly conducting comparative experiments using different previously published models. In addition, we set up a Codalab competition for further comparison of new models in a standard scenario, that is, data partitions and evaluation metrics. All resources can be accessed through a centralized repository for researchers to get a complete picture of the progress on the multilingual hate speech and offensive language detection task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Offensive language frequently appears on social network interactions 1 . According to Sigurbergsson and Derczynski (2020) offensive language encompass a range of expressions from profanities to much more severe types of language among which is hate speech. Hate speech is usually defined as communications of animosity or disparagement of an individual or a group on account of a group characteristic 2 . Offensive language and hate speech bring along the risk of encouraging real hate crimes. Due to the large amount of content generated in social media, automatic moderation is necessary to perform offensive content detection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Machine learning models are used in most of the published approach for this purpose. The necessary resources are available almost exclusively for the English language Hosseinmardi et al., 2015; Davidson et al., 2017) . On the other hand, the cross-lingual and cross-cultural perspectives have been under addressed in the related literature. The lack of adequately annotated datasets is one of the limiting factors for developing these subtasks (Yin and Zubiaga, 2021; Fortuna and Nunes, 2018) . In addition, the publicly available resources are accessible through the correspondent description papers. These resources have insufficient lack of centralized repositories for datasets and classification models. This situation makes it difficult for researchers to get a complete picture of the progress on the task.", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 193, |
| "text": "Hosseinmardi et al., 2015;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 194, |
| "end": 216, |
| "text": "Davidson et al., 2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 444, |
| "end": 467, |
| "text": "(Yin and Zubiaga, 2021;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 468, |
| "end": 492, |
| "text": "Fortuna and Nunes, 2018)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Most of these existing datasets contain English examples, though we have gathered some datasets in Portuguese (Fortuna et al., 2019) , Arabic (Mulki et al., 2019) , Italian and Spanish (Pereira-Kohatsu et al., 2019) . In the particular case of the Spanish language, only a few datasets can be found. The geographical origin of them is limited to Spain (Pereira-Kohatsu et al., 2019) , M\u00e9xico (\u00c1lvarez-Carmona et al., 2018) , or unknown (Basile et al., 2019) . Since the hate speech phenomenon depends on the socio-cultural context (Sap et al., 2019) , the targets of hate could change depending on the origin of the messages. The Spanish language specific features spoken in different countries, makes models poorly generalizable when training with these existing resources. We propose a manually annotated dataset for offensive language detection. The dataset is composed of 9834 tweets from Chile and is meant to enrich the existing Spanish resources with different words and new targets of hate that have not been considered in previous studies.", |
| "cite_spans": [ |
| { |
| "start": 110, |
| "end": 132, |
| "text": "(Fortuna et al., 2019)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 142, |
| "end": 162, |
| "text": "(Mulki et al., 2019)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 185, |
| "end": 215, |
| "text": "(Pereira-Kohatsu et al., 2019)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 352, |
| "end": 382, |
| "text": "(Pereira-Kohatsu et al., 2019)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 392, |
| "end": 422, |
| "text": "(\u00c1lvarez-Carmona et al., 2018)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 436, |
| "end": 457, |
| "text": "(Basile et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 531, |
| "end": 549, |
| "text": "(Sap et al., 2019)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We conducted several evaluation experiments of the models published in the literature using our Chilean dataset and two others in English and Spanish. We propose a comparative framework for quickly conducting comparative experiments. This framework facilitates the application of existing models by including each original implementation as sub-models. In addition, we set up a Codalab competition for further comparison of new models in a standard scenario, that is, data partitions and evaluation metrics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In summary, we developed the following resources for multilingual hate speech detection:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "1. Chilean dataset for offensive language detection: We annotated a Spanish Twitter dataset in several categories related to the phenomenon of offensive language, including a hate speech category. This dataset is composed of 9834 Spanish tweets and is, as far as we know, the first one where the data was originated in South America.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We constructed a library of models using published cross-lingual offensiveness detectors. The library facilitates the use of models by providing a common interface. Moreover, we set up a Codalab competition for further comparison of emergent models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparative framework:", |
| "sec_num": "2." |
| }, |
| { |
| "text": "We organized the existing datasets into a structured repository to facilitate authors finding existing resources in several languages. The repository contains annotations of the main characteristics of the existing datasets and direct links for downloading them. In addition to datasets, it contains tools for using existing multilingual hate speech detection models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Resource repository:", |
| "sec_num": "3." |
| }, |
| { |
| "text": "In Section 2, we describe the existing datasets for offensive language detection as well as we comment on the diversity of existing Spanish resources. Next, in Section 3, we describe the Chilean dataset we constructed for offensive language detection, including a hate speech category. Finally, in Section 4, we describe the tools we created for helping the authors to replicate and compare new approaches with the existing ones in a cross-lingual environ-ment. All resources described in the paper will be integrated in our centralized code repository 3 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Resource repository:", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Ethical Considerations: The annotators inferred only female and male genders of the authors and targets of tweets. The genders were inferred from names and pronouns. Due to the non-binary nature of gender, this label should be used carefully to avoid unfair models. OFFENSIVE CONTENT WARNING. Because of the topic of our research, certain examples are potentially offensive. We minimized as much as possible the number of examples and obfuscated offensive words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Resource repository:", |
| "sec_num": "3." |
| }, |
| { |
| "text": "One of the essential steps for the research in offensive language detection using machine learning is dataset acquisition. Even when several social media platforms exist to get data from them, constructing a balanced labeled dataset is a costly task in time and effort. There is not a dataset considered as standard for this task. Therefore researchers have to search in the related literature for the adequate one for their experiment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Most of the existing datasets have been annotated for the English language (Dinakar et al., 2011; Hosseinmardi et al., 2015; Waseem and Hovy, 2016; Founta et al., 2018) though there exist a few in other languages such as Spanish (Basile et al., 2019) , Italian and Arabic (Mubarak et al., 2017) . It is important to mention that even for English, the task is far from being solved (Arango et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 97, |
| "text": "(Dinakar et al., 2011;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 98, |
| "end": 124, |
| "text": "Hosseinmardi et al., 2015;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 125, |
| "end": 147, |
| "text": "Waseem and Hovy, 2016;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 148, |
| "end": 168, |
| "text": "Founta et al., 2018)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 229, |
| "end": 250, |
| "text": "(Basile et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 272, |
| "end": 294, |
| "text": "(Mubarak et al., 2017)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 381, |
| "end": 402, |
| "text": "(Arango et al., 2020)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In most cases, the datasets only contain texts messages and not other information regarding authors, localication, or the conversation to which the tweet belongs. The lack of information makes the datasets out of context and limits the use of different features. Regarding the data sources, most of the datasets have been recovered from the Twitter platform, though a few are composed of Facebook messages or Youtube comments (Dinakar et al., 2011) . As far as we know, there exists one data repository 4 for organizing offensive language datasets.", |
| "cite_spans": [ |
| { |
| "start": 426, |
| "end": 448, |
| "text": "(Dinakar et al., 2011)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Problem:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Spanish Datasets and the Multicultural", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "To the best of our knowledge, there are four different datasets (Basile et al., 2019; Pereira-Kohatsu et al., 2019; \u00c1lvarez-Carmona et al., 2018; in the Spanish language, related to the task of offensive language detection, with a total of 26 000 messages labeled for hate speech or aggressive content. One of these datasets contained messages that originated in Spain (Pereira-Kohatsu et al., 2019) (6000 tweets). Two of them from unknown origin: IberEval 2018 ) (4138 tweets) and SemEval 2019 (Basile et al., 2019 ) (5365 tweets). The remaining dataset was constructed with messages from Mexico: MEX-A3T (11 000 tweets) being the only resource related to the hate speech phenomenon built for Latin-American Spanish.", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 85, |
| "text": "(Basile et al., 2019;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 86, |
| "end": 115, |
| "text": "Pereira-Kohatsu et al., 2019;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 116, |
| "end": 145, |
| "text": "\u00c1lvarez-Carmona et al., 2018;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 495, |
| "end": 515, |
| "text": "(Basile et al., 2019", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Spanish Datasets and the Multicultural", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Being the hate speech phenomenon a cultural problem, we consider that a model trained on these datasets would not be able to generalize over different Spanish data from different cultures.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Spanish Datasets and the Multicultural", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The research in the Spanish language has been limited, in part, due to the lack of resources. As we described in Section 2, the few Spanish available datasets are composed of examples of the variant of Spanish spoken in specific regions of the world with the cultural background associated with it. We consider it necessary to leverage the first dataset representative of the Spanish spoken in South America, particularly Chile. The examples in this dataset would enrich the understanding of pffensive language and hate speech by introducing terms mainly used in this region and targets of hate unconsidered in previous studies. Next, we describe the process of annotation and general features of our datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chilean Dataset for Offensive Language Detection", |
| "sec_num": "3" |
| }, |
| { |
| "text": "For recovering an initial corpus, we followed a strategy commonly used in the related literature (Basile et al., 2019; Waseem and Hovy, 2016) which is identifying words that serve as seeds for querying online platforms. The use of seeds would guarantee a higher probability for hateful content to appear in the crawled data.", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 118, |
| "text": "(Basile et al., 2019;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 119, |
| "end": 141, |
| "text": "Waseem and Hovy, 2016)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Recovering", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Seeds The seeds were gathered by surveying a group of seven Chilean students. The list includes terms (or phrases) used in Chile. Some of these terms are offensive, but others are neutral terms related to polemic subjects such as sexual nature, immigration, and others (e.g. haitianos, ind\u00edgenas, lesbianas). We recovered a total of 132 seeds that can be read in our code repository.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Recovering", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Search Parameters Using the pre-defined seeds and with the help of the Twitter API 5 , we downloaded approximately 61 000 tweets. The tweets' language was restricted to Spanish, and the geolocation was prefixed for the Chile area. Along with each tweet, we recovered the conversation (sequence of tweets) that originated them in case of existing. These conversations serve as context for each tweet (Qian et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 399, |
| "end": 418, |
| "text": "(Qian et al., 2019)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Recovering", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Sample for Annotation From the 61 000 tweets recovered, we selected 10 000 (one-sixth), taking a proportional amount of tweets originating from each seed. In this way, we maintained a representative sample of all sources.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Recovering", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Three external annotators under contract conducted the process of labeling the dataset, all three were native Chileans. First, they went through a training process, where the three of them labeled the same set of tweets to make sure they annotated the content as similarly as possible. They repeated this process with different sets of tweets until achieving an inter-annotator agreement higher than 90% agreement and a Krippendorff's alpha higher than 0.7 in all the pre-defined labels (Neuendorf, 2002) . After the training process, they proceeded to label the final dataset, a portion each. Table 1 contains a summary of this measure obtained during the training process.", |
| "cite_spans": [ |
| { |
| "start": 487, |
| "end": 504, |
| "text": "(Neuendorf, 2002)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 594, |
| "end": 601, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The final dataset contains 9834 tweets annotated with several labels, some of them related to offensive content based on Chen's categorization of uncivil speech (Chen, 2017). In addition, it includes annotations that contextualize the messages, such as the target of offensive speech and the use of irony. As described above, the dataset also contains the conversation that originated each of them. These conversations serve as context for the annotated tweets. Next, we explore the main characteristics of the resulting dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chilean Dataset Description", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Some of the labels in the final dataset encompass different types of offensive content. These labels are hate speech, unintended profanity/vulgarity, insult/appellation, intentional profanity/vulgarity. The other labels are not directly related to the offensive phenomenon, but help contextualize the messages and generalize the dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Offensive Content Labels", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "hate speech The tweet contains hate speech if it includes stereotypical language to offend minority groups such as women, immigrants, sexual or racial minorities.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Offensive Content Labels", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "For example, the tweet: \"La mapuche es un asqueroso trapo y los mapuches; cero aporte, son gasto, da\u00f1o y destrucci\u00f3n, tampoco originarios.\" (\"The Mapuche woman is a disgusting rag and the Mapuche people; zero contribution, they are a waste of money, damage, and destruction, not natives either.\") is labeled as hateful because the author is attributing detrimental characteristics to the mapuche people which are a minority group of indigenous people in Chile and Argentina.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Offensive Content Labels", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "Hate against this particular minority is also an example of the dependence of the hate speech phenomenon on socio-cultural factors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Offensive Content Labels", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "insult/appellation A tweet is labeled as positive for insults or name calling if the tweet includes nicknames, phrases, or words that are not profane but are offensive (such as \"s***id\" or \"j**k\").", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Offensive Content Labels", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "For example: \" est\u00e1 \"mujer\" me da verg\u00fcenza ajena.\" (\" This \"woman\" embarrases me\"), is labeled as containing insulting language because the intention is to offend a person (this woman) without using profane words. On the other hand, the tweet: \"Ma***to flaite hediondo a marihuana.\" (\"D**n marijuana-smelly chav.\") also belongs to this class because of the use of \"flaite\" a pejorative word used in Chile for referring to marginal or uneducated people (Rojas, 2015) .", |
| "cite_spans": [ |
| { |
| "start": 453, |
| "end": 466, |
| "text": "(Rojas, 2015)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Offensive Content Labels", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "unintended profanity/vulgar language Some tweets may contain profane words without the intention of offending anyone, like in: \"Que manera de echar de menos ese estadio por la grand\u00edsima co***a de su madre (\"I really miss that mother f*** stadium\"). This kind of tweet is labeled as containing unintended profanity. In this case, mother f*** is an expression used for making emphasis on how much the author misses the stadium. The column \"Label\" shows each label of the dataset in both, English and in Spanish languages. The column \"Positives (%)\" shows the number and percent of tweets labeled as positive for each label. Finally, the column \"K\" shows the Krippendorff's measure obtained during the training stage for each of the labels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Offensive Content Labels", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "intentional profanity/vulgar language: A different type of profanity can be found in the tweet: \"Les dije que el \u00e1rbitro era un CO***A DE SU MADRE \" (\"I told you the referee was a MOTHER F*** \"). Even when we have the same words as in previous example, in this case, the annotators marked this tweet as containing intentional profanity, as the author has the intention to insult a person using profane words (the referee).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Offensive Content Labels", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "Other labels are meant to enrich the dataset by spotting linguistic and semantic information of the tweets. In this sense, we can find annotations regarding the content of the tweet. male figure: The tweet labels containing male or female figures are the ones, offensive or not, directed to a particular person identified by annotators as male, for example: \"Tremendo hijo de p**a eres Marcos.\" (\"You are a tremendous son of a b***, Marcos.\") is labeled as male or female figure since the message is directed to Antonio, a male.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tweets Content", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "female figure: Similar to the male figure label, the tweet: \"Y q dice la autodenominada candidata feminista al respecto\" (\"And what does the selfappointed feminist candidate have to say about it?\") is labeled as female figure since the author poses a question to a female (feminist candidate). mention to [topic] There are five labels used to mark when a tweet makes reference to different topics such as immigration, domestic politics, marginalized groups and others. As an example of domestic politics is the tweet \"Vamos a botar a la feminazi, #VOTACIONES2021\" (\"We are going to kick out the feminazi, # ELECTIONS2021\")\". sarcasm/irony/mockery The use of humor or sarcasm was also identified in this label. This label could be helpful to disambiguate the message's intention, that is, the intention of hurting. (e.g. \"Aqu\u00ed llenando la piscina con las l\u00e1grimas de los fachos\" (\"Here filling the pool with fascists' tears\").", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tweets Content", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "evidence This category is based on Chen's (Chen, 2017) definition of deliberative speech, a condition set to foster healthy conversations on social media. The tweets are labeled positive for evidence if they provide statistical evidence, citations, or links with extra information instead of a mere opinion. For example: \"Expulsi\u00f3n de migrantes efectuada este domingo en la RM https://t.co/*** v\u00eda @***\" (\"Expulsion of migrants carried out this Sunday in the Metropolitan Region https://t.co/*** via @***\") is labeled as evidence because it includes a link to a news source.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tweets Content", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "legitimate question Also based on Chen's work (2017), a tweet contains a legitimate question if it poses a non-rhetorical question, for example asking for more information about a particular event, like in the tweet: \"\u00bfA los venezolanos le est\u00e1n solicitando visa para entrar a Peru?\" (\"Are Venezuelans requested to have a visa to enter Peru?\"). ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tweets Content", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "All the tweets contain a label of the authors' gender: 2102 tweets were sent by a female author, 4694 by a male author. The rest of the authors were identified as undetermined-gender since the user name does not suggest either a male or female gender (e.g., \"DVM\"; \"Patria y Libertad\"). The annotators also labeled information about the anonymity of the authors. The tweet is labeled as anonymous if the username is a nickname (e.g. \"DVM\") or a name without last name (e.g. \"patricia\"). There are 5371 unique Twitter users in the dataset. The 50,67% of the tweets in any offensive categories were sent by users labeled as males, 20,22% by females and the rest from undetermined-gender users. Table 1 contains a summary of the dataset columns. A sample of the dataset can be found in our repository 6 and will be completly published soon.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 692, |
| "end": 699, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Tweets' Author Information", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "We implemented some baselines for offensive language detection over our dataset. We defined different classification tasks: insult, profanity/vulgarity (intentional or not) and hate speech detection. In addition, we tested baselines to identify if a tweet belongs to any of the offensive classes. Therefore, we set the target offensive if the tweet is labeled as any of the offensive labels (insult or profanity/vulgarity or hate speech. The results were obtained with a 5-Fold cross validation . dummy classifier We predict the values of the classes randomly, making use of the Sklearn 7 dummy classifier.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Offensive Content Detection Baselines", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "seed classifier: To verify that there is no seed bias, we conducted a baseline classification method consisting of labeling as positive those tweets containing one of the offensive seeds previously used to recover the dataset (See Section 3.1). Our results show the best performance on the insult detection task showing a higher bias in this category. The list of offensive seeds can be found in our code repository. This result was expected since this category is positive depending on the existence of certain words. On the other hand, the rest of the tasks showed nearly random results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Offensive Content Detection Baselines", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We tested a third baseline using Spanish FastText embeddings 8 and Random Forest classifier. The word embeddings of 100 dimensions were first averaged into one single vector and used as input for a Random Forest Classifier with default parameters. We show the results for 5-fold cross-validation. The results with this approach, compare to dummy and seed classifiers, showed the best results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "EMB + RF", |
| "sec_num": null |
| }, |
| { |
| "text": "The F-Score obtained with the different methods in the four tasks can be shown in Table 2 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 82, |
| "end": 89, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "EMB + RF", |
| "sec_num": null |
| }, |
| { |
| "text": "In the related literature of offensive language detection, there is a lack of comparative studies. This situation is more noticeable in cross-lingual approaches as a relatively new sub-area. There is no consensus about the best approaches for solving the cross-lingual detection task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparative Framework", |
| "sec_num": "4" |
| }, |
| { |
| "text": "With the purpose of alleviating this situation, we propose two tools for cross-lingual approaches comparison:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparative Framework", |
| "sec_num": "4" |
| }, |
| { |
| "text": "1. A python library that contains published crosslingual hate speech detection models as methods: The library has five published models. Each model consists of the original implementation code as a sub-module, plus a class interface that standardizes all models' input to simplify their use. In addition, the library contains the main class whose attributes are the previously mentioned models and auxiliary tools for evaluation and data management. A brief description of the models can be found in Section 4.1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparative Framework", |
| "sec_num": "4" |
| }, |
| { |
| "text": "2. An open competition in Codalab 9 for further comparison. We set up an open competition in Codalab to promote fair comparison among cross-lingual approaches. Different leaderboards can be found for the different configurations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparative Framework", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We found a few papers describing cross-lingual approaches. We included them in our library using the original companion code.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-lingual Models", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "ACL19 As a preparation stage for the model proposed by Pamungkas et al. 2021, it is necessary to translate the data into the target language. The model consists of training two different LSTM architectures. The first one is trained with the original training data, and the other is trained using the data translated into the testing language. Finally, the two outputs are concatenated and used as input for a final linear output layer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-lingual Models", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "ECML20 In this paper, Aluru et al. (2020) described different approaches for cross-lingual hate speech detection with different architectures.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-lingual Models", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Those are the multilingual Bert model, the GRU model, and a combination of LASER embeddings and Logistic Regression (LR) classifier. The model that combines LASER embeddings and LR classifier turned to be the best approach. Our library includes three types of models, though in Table 3 we only report the best results.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 278, |
| "end": 285, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Cross-lingual Models", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EMNLP20 Ranasinghe and Zampieri (2020) proposed a transfer learning strategy. First, an XLM-R classification model is trained using data from one language, and the weights are saved. Then, these weights are used to initialize the model and predict labels in a different language. We used our library for reproducing the previously mentioned models in a cross-lingual way using three different languages English, Spanish (Basile et al., 2019) , and our Chilean dataset. In Table 3, we show the results we obtained in different cross-lingual experiments.", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 38, |
| "text": "Ranasinghe and Zampieri (2020)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 420, |
| "end": 441, |
| "text": "(Basile et al., 2019)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-lingual Models", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For evaluation, we used the Spanish (ES) and English (EN) datasets constructed for the SemEval 2019 competition (Basile et al., 2019 ). As we mentioned in Section 2, the authors of these datasets did not specify any location for recovering the data. Examining the tweets objects of the Spanish dataset, we noticed only a few with geo-location information, some belonging to Spain, M\u00e9xico, though most of them were unknown. We compare the cross-datasets performance with the performance across different variants of Spanish: general Spanish (ES) and the variant of Spanish spoken in Chile (CL). To this end, we add experiments using our previously described Chilean (CL) dataset. We show precision, recall, and F-score metrics, the commonly used metrics, and the ROC metric.", |
| "cite_spans": [ |
| { |
| "start": 112, |
| "end": 132, |
| "text": "(Basile et al., 2019", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Datasets", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In general, the cross-lingual setup, including the Spanish (ES) dataset, performed better than Chilean (CL). One of the reasons for this could be the data used for pre-trained models; for example, ECML20 model is based on LASER representations. These are multilingual sentence embeddings constructed from parallel data. The data used may not encompass some of the words used in South America, though a more profound analysis is needed. Despite presenting a simple structure (LASER + LR), ECML20 model showed the overall best results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-lingual Results", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "We tested the models in monolingual Spanish setups but using datasets from different socio-cultural (Ranasinghe and Zampieri, 2020) ; ECML20 (Aluru et al., 2020); WEBSCi21 (Vitiugin et al., 2021) .", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 131, |
| "text": "(Ranasinghe and Zampieri, 2020)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 172, |
| "end": 195, |
| "text": "(Vitiugin et al., 2021)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-cultural Results", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "One of the datasets is the SemEval Spanish dataset (Basile et al., 2019) with examples originated in Spain. The other is our dataset, also in Spanish, but originated in Chile. The results in terms of F1 and ROC are shown in Table 4 .", |
| "cite_spans": [ |
| { |
| "start": 51, |
| "end": 72, |
| "text": "(Basile et al., 2019)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 224, |
| "end": 231, |
| "text": "Table 4", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "contexts.", |
| "sec_num": null |
| }, |
| { |
| "text": "The best overall results were obtained using the ECML20 model in the CL \u2192 ES configuration. Despite being datasets from the same language, the knowledge transfer was, in general, poor. All the results were lower than the ones obtained in an inside-dataset experiment shown in Table 2 . These results evidence of the differences between the two Spanish variants, the different hate targets of the two geographical regions, though much more inside in this regard is needed.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 276, |
| "end": 283, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "contexts.", |
| "sec_num": null |
| }, |
| { |
| "text": "To facilitate finding an appropriate dataset, we organized them in a centralized repository. So far, we have listed 39 datasets, 20 of which are in the English language and 19 others in different languages such as Arabic (5), Spanish (4), Italian (3), Portuguese (1), among others.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Repository Description", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In our repository the datasets are separated by languages and have the following structure:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Repository Description", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 Datasets (Link to paper): Abbreviated name of the dataset with a link for downloading the paper description.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Repository Description", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 Objects: Which are the type of objects (e.g. tweets, images, sentences).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Repository Description", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 Size: The number of objects in the dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Repository Description", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 Available: A direct link for downloading the dataset is provided if the dataset is publicly available.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Repository Description", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 Labels: The labels in which the objects are categorized (e.g. (hateful, non-hateful), (racist, sexist, either))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Repository Description", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Approximately, 64% are composed of tweets, but other objects can be found, such as Facebook comments or Twitter users. Although some of the below-listed datasets are not explicitly available, they could be obtained from the authors if requested. ee Our comparative framework (Section 4) facilitates the use of previously published models for cross lingual hate speech detection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Repository Description", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We described three resources for the multilingual offensive language detection task. These resources would be helpful in the development of the multilingual sub-area, which have been under-addressed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We constructed the first Chilean dataset for hate speech and offensive language to alleviate this situation. The dataset contains 9834 tweets in the Spanish language that originated in Chile. The tweets are labeled in several categories related to offensive content. Furthermore, it includes annotations associated with the content of the tweets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Finally, we created a comparative framework (library + competition) to facilitate researchers to compare new models with the existing ones. The library is implemented in python and contains, as submodels, previously published cross-lingual approaches for hate speech detection. The competition is hosted in Codalab and offers a scenario for comparing new models with the existing ones.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The resource repository would facilitate researchers to find, in one place, the datasets that better meet their needs as well as tools for easily comparing their work with previously existing models. From our repository, it is noticeable the lack of available Spanish examples. Moreover, there is a low representation of different types of Spanish spoken worldwide.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "https://www.channel4.com/news/george-floyd-deathhas-led-to-increasing-online-hate-speech-report-claims 2 https://www.encyclopedia.com/international/encyclopediasalmanacs-transcripts-and-maps/hate-speech", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/aymeam/ Datasets-for-Hate-Speech-Detection 4 https://hatespeechdata.com/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://developer.twitter.com/en/docs/twitter-api", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://anonymous.4open.science/r/ Datasets-for-Hate-Speech-Detection-0D50/Chilean% 20dataset/Dataset_sample_500.csv", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://scikit-learn.org/stable/ 8 https://github.com/dccuchile/spanish-word-embeddings", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://codalab.lisn.upsaclay.fr/competitions/1221? secret_key=c1de3893-de48-4ca1-8071-89e82f189039", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Deep learning models for multilingual hate speech detection", |
| "authors": [ |
| { |
| "first": "Binny", |
| "middle": [], |
| "last": "Sai Saketh Aluru", |
| "suffix": "" |
| }, |
| { |
| "first": "Punyajoy", |
| "middle": [], |
| "last": "Mathew", |
| "suffix": "" |
| }, |
| { |
| "first": "Animesh", |
| "middle": [], |
| "last": "Saha", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mukherjee", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sai Saketh Aluru, Binny Mathew, Punyajoy Saha, and Animesh Mukherjee. 2020. Deep learning mod- els for multilingual hate speech detection. CoRR, abs/2004.06465.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Overview of mex-a3t at ibereval 2018: Authorship and aggressiveness analysis in mexican spanish tweets", |
| "authors": [ |
| { |
| "first": "\u00c1", |
| "middle": [], |
| "last": "Miguel", |
| "suffix": "" |
| }, |
| { |
| "first": "Estefan\u0131a", |
| "middle": [], |
| "last": "\u00c1lvarez-Carmona", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuel", |
| "middle": [], |
| "last": "Guzm\u00e1n-Falc\u00f3n", |
| "suffix": "" |
| }, |
| { |
| "first": "Hugo", |
| "middle": [ |
| "Jair" |
| ], |
| "last": "Montes-Y G\u00f3mez", |
| "suffix": "" |
| }, |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Escalante", |
| "suffix": "" |
| }, |
| { |
| "first": "Ver\u00f3nica", |
| "middle": [], |
| "last": "Villasenor-Pineda", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Reyes-Meza", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rico-Sulayes", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Notebook papers of 3rd sepln workshop on evaluation of human language technologies for iberian languages (ibereval)", |
| "volume": "6", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Miguel \u00c1 \u00c1lvarez-Carmona, Estefan\u0131a Guzm\u00e1n- Falc\u00f3n, Manuel Montes-y G\u00f3mez, Hugo Jair Es- calante, Luis Villasenor-Pineda, Ver\u00f3nica Reyes- Meza, and Antonio Rico-Sulayes. 2018. Overview of mex-a3t at ibereval 2018: Authorship and ag- gressiveness analysis in mexican spanish tweets. In Notebook papers of 3rd sepln workshop on evalua- tion of human language technologies for iberian lan- guages (ibereval), seville, spain, volume 6.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Automatic identification and classification of misogynistic language on twitter", |
| "authors": [ |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Anzovino", |
| "suffix": "" |
| }, |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "International Conference on Applications of Natural Language to Information Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "57--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maria Anzovino, Elisabetta Fersini, and Paolo Rosso. 2018. Automatic identification and classification of misogynistic language on twitter. In International Conference on Applications of Natural Language to Information Systems, pages 57-64. Springer.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Hate speech detection is not as easy as you may think: A closer look at model validation (extended version). IS", |
| "authors": [ |
| { |
| "first": "Aym\u00e9", |
| "middle": [], |
| "last": "Arango", |
| "suffix": "" |
| }, |
| { |
| "first": "Jorge", |
| "middle": [], |
| "last": "P\u00e9rez", |
| "suffix": "" |
| }, |
| { |
| "first": "Barbara", |
| "middle": [], |
| "last": "Poblete", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aym\u00e9 Arango, Jorge P\u00e9rez, and Barbara Poblete. 2020. Hate speech detection is not as easy as you may think: A closer look at model validation (extended version). IS, page 101584.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Shared task on multilingual detection of hate", |
| "authors": [ |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| }, |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Rangel", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Valerio Basile, Cristina Bosco, Viviana Patti, Manuela Sanguinetti, Elisabetta Fersini, Debora Nozza, Fran- cisco Rangel, and Paolo Rosso. 2019. Shared task on multilingual detection of hate. SemEval 2019, Task 5.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Overview of the evalita 2018 hate speech detection task", |
| "authors": [ |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Dell'orletta Felice", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Poletto", |
| "suffix": "" |
| }, |
| { |
| "first": "Tesconi", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Maurizio", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "EVALITA 2018-Sixth Evaluation Campaign of Natural Language Processing and Speech Tools for Italian", |
| "volume": "2263", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cristina Bosco, Dell'Orletta Felice, Fabio Poletto, Manuela Sanguinetti, and Tesconi Maurizio. 2018. Overview of the evalita 2018 hate speech detection task. In EVALITA 2018-Sixth Evaluation Campaign of Natural Language Processing and Speech Tools for Italian, volume 2263, pages 1-9. CEUR.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Online incivility and public debate: Nasty talk", |
| "authors": [ |
| { |
| "first": "Gina", |
| "middle": [], |
| "last": "Masullo", |
| "suffix": "" |
| }, |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gina Masullo Chen. 2017. Online incivility and public debate: Nasty talk. Springer.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Automated hate speech detection and the problem of offensive language", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Davidson", |
| "suffix": "" |
| }, |
| { |
| "first": "Dana", |
| "middle": [], |
| "last": "Warmsley", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "W" |
| ], |
| "last": "Macy", |
| "suffix": "" |
| }, |
| { |
| "first": "Ingmar", |
| "middle": [], |
| "last": "Weber", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "512--515", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Davidson, Dana Warmsley, Michael W. Macy, and Ingmar Weber. 2017. Automated hate speech detection and the problem of offensive language. pages 512-515.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Modeling the detection of textual cyberbullying", |
| "authors": [ |
| { |
| "first": "Karthik", |
| "middle": [], |
| "last": "Dinakar", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "Henry", |
| "middle": [], |
| "last": "Lieberman", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "The Social Mobile Web, Papers from the 2011 Workshop (ICWSM)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karthik Dinakar, Roi Reichart, and Henry Lieberman. 2011. Modeling the detection of textual cyberbul- lying. In The Social Mobile Web, Papers from the 2011 Workshop (ICWSM).", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Overview of the task on automatic misogyny identification at ibereval 2018", |
| "authors": [ |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Anzovino", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "IberEval@ SEPLN", |
| "volume": "2150", |
| "issue": "", |
| "pages": "214--228", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elisabetta Fersini, Paolo Rosso, and Maria Anzovino. 2018. Overview of the task on automatic misogyny identification at ibereval 2018. IberEval@ SEPLN, 2150:214-228.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A hierarchically-labeled portuguese hate speech dataset", |
| "authors": [ |
| { |
| "first": "Paula", |
| "middle": [], |
| "last": "Fortuna", |
| "suffix": "" |
| }, |
| { |
| "first": "Joao", |
| "middle": [], |
| "last": "Rocha Da", |
| "suffix": "" |
| }, |
| { |
| "first": "Leo", |
| "middle": [], |
| "last": "Silva", |
| "suffix": "" |
| }, |
| { |
| "first": "S\u00e9rgio", |
| "middle": [], |
| "last": "Wanner", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nunes", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Third Workshop on Abusive Language Online", |
| "volume": "", |
| "issue": "", |
| "pages": "94--104", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paula Fortuna, Joao Rocha da Silva, Leo Wanner, S\u00e9r- gio Nunes, et al. 2019. A hierarchically-labeled por- tuguese hate speech dataset. In Proceedings of the Third Workshop on Abusive Language Online, pages 94-104.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A survey on automatic detection of hate speech in text", |
| "authors": [ |
| { |
| "first": "Paula", |
| "middle": [], |
| "last": "Fortuna", |
| "suffix": "" |
| }, |
| { |
| "first": "S\u00e9rgio", |
| "middle": [], |
| "last": "Nunes", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ACM Computing Surveys (CSUR)", |
| "volume": "51", |
| "issue": "4", |
| "pages": "1--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paula Fortuna and S\u00e9rgio Nunes. 2018. A survey on au- tomatic detection of hate speech in text. ACM Com- puting Surveys (CSUR), 51(4):1-30.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Large scale crowdsourcing and characterization of twitter abusive behavior", |
| "authors": [ |
| { |
| "first": "Antigoni-Maria", |
| "middle": [], |
| "last": "Founta", |
| "suffix": "" |
| }, |
| { |
| "first": "Constantinos", |
| "middle": [], |
| "last": "Djouvas", |
| "suffix": "" |
| }, |
| { |
| "first": "Despoina", |
| "middle": [], |
| "last": "Chatzakou", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilias", |
| "middle": [], |
| "last": "Leontiadis", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Blackburn", |
| "suffix": "" |
| }, |
| { |
| "first": "Gianluca", |
| "middle": [], |
| "last": "Stringhini", |
| "suffix": "" |
| }, |
| { |
| "first": "Athena", |
| "middle": [], |
| "last": "Vakali", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Sirivianos", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Kourtellis", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Twelfth International Conference on Web and Social Media, ICWSM 2018", |
| "volume": "", |
| "issue": "", |
| "pages": "491--500", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antigoni-Maria Founta, Constantinos Djouvas, De- spoina Chatzakou, Ilias Leontiadis, Jeremy Black- burn, Gianluca Stringhini, Athena Vakali, Michael Sirivianos, and Nicolas Kourtellis. 2018. Large scale crowdsourcing and characterization of twitter abusive behavior. In Proceedings of the Twelfth In- ternational Conference on Web and Social Media, ICWSM 2018, Stanford, California, USA, June 25- 28, 2018, pages 491-500. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Analyzing labeled cyberbullying incidents on the instagram social network", |
| "authors": [ |
| { |
| "first": "Homa", |
| "middle": [], |
| "last": "Hosseinmardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Sabrina", |
| "middle": [ |
| "Arredondo" |
| ], |
| "last": "Mattson", |
| "suffix": "" |
| }, |
| { |
| "first": "Rahat", |
| "middle": [], |
| "last": "Ibn Rafiq", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Conference on Social Informatics", |
| "volume": "", |
| "issue": "", |
| "pages": "49--66", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Homa Hosseinmardi, Sabrina Arredondo Mattson, Ra- hat Ibn Rafiq, Richard Han, Qin Lv, and Shivakant Mishra. 2015. Analyzing labeled cyberbullying in- cidents on the instagram social network. In Interna- tional Conference on Social Informatics, pages 49- 66. Springer.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Abusive language detection on arabic social media", |
| "authors": [ |
| { |
| "first": "Hamdy", |
| "middle": [], |
| "last": "Mubarak", |
| "suffix": "" |
| }, |
| { |
| "first": "Kareem", |
| "middle": [], |
| "last": "Darwish", |
| "suffix": "" |
| }, |
| { |
| "first": "Walid", |
| "middle": [], |
| "last": "Magdy", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the First Workshop on Abusive Language Online", |
| "volume": "", |
| "issue": "", |
| "pages": "52--56", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hamdy Mubarak, Kareem Darwish, and Walid Magdy. 2017. Abusive language detection on arabic social media. In Proceedings of the First Workshop on Abu- sive Language Online, pages 52-56.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "L-HSAB: A Levantine Twitter dataset for hate speech and abusive language", |
| "authors": [ |
| { |
| "first": "Hala", |
| "middle": [], |
| "last": "Mulki", |
| "suffix": "" |
| }, |
| { |
| "first": "Hatem", |
| "middle": [], |
| "last": "Haddad", |
| "suffix": "" |
| }, |
| { |
| "first": "Chedi", |
| "middle": [], |
| "last": "Bechikh Ali", |
| "suffix": "" |
| }, |
| { |
| "first": "Halima", |
| "middle": [], |
| "last": "Alshabani", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Third Workshop on Abusive Language Online", |
| "volume": "", |
| "issue": "", |
| "pages": "111--118", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-3512" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hala Mulki, Hatem Haddad, Chedi Bechikh Ali, and Halima Alshabani. 2019. L-HSAB: A Levantine Twitter dataset for hate speech and abusive language. In Proceedings of the Third Workshop on Abusive Language Online, pages 111-118, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Defining content analysis. Content analysis guidebook", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Kimberly", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Neuendorf", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kimberly A Neuendorf. 2002. Defining content anal- ysis. Content analysis guidebook. Thousand Oaks, CA: Sage.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "A joint learning approach with knowledge injection for zero-shot cross-lingual hate speech detection", |
| "authors": [ |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Endang Wahyu Pamungkas", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Information Processing & Management", |
| "volume": "58", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Endang Wahyu Pamungkas, Valerio Basile, and Vi- viana Patti. 2021. A joint learning approach with knowledge injection for zero-shot cross-lingual hate speech detection. Information Processing & Man- agement, 58(4):102544.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Cross-domain and cross-lingual abusive language detection: A hybrid approach with deep learning and a multilingual lexicon", |
| "authors": [ |
| { |
| "first": "Wahyu", |
| "middle": [], |
| "last": "Endang", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Pamungkas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proc. 57th ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "363--370", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Endang Wahyu Pamungkas and Viviana Patti. 2019. Cross-domain and cross-lingual abusive language detection: A hybrid approach with deep learning and a multilingual lexicon. In Proc. 57th ACL, pages 363-370.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Detecting and monitoring hate speech in twitter", |
| "authors": [ |
| { |
| "first": "Juan", |
| "middle": [], |
| "last": "Carlos Pereira-Kohatsu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lara", |
| "middle": [ |
| "Quijano" |
| ], |
| "last": "S\u00e1nchez", |
| "suffix": "" |
| }, |
| { |
| "first": "Federico", |
| "middle": [], |
| "last": "Liberatore", |
| "suffix": "" |
| }, |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Sensors", |
| "volume": "19", |
| "issue": "21", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.3390/s19214654" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Juan Carlos Pereira-Kohatsu, Lara Quijano S\u00e1nchez, Federico Liberatore, and Miguel Camacho-Collados. 2019. Detecting and monitoring hate speech in twit- ter. Sensors, 19(21):4654.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "A benchmark dataset for learning to intervene in online hate speech", |
| "authors": [ |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Bethke", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinyin", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Belding", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "Yang" |
| ], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.04251" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jing Qian, Anna Bethke, Yinyin Liu, Elizabeth Beld- ing, and William Yang Wang. 2019. A bench- mark dataset for learning to intervene in online hate speech. arXiv preprint arXiv:1909.04251.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Multilingual offensive language identification with cross-lingual embeddings", |
| "authors": [ |
| { |
| "first": "Tharindu", |
| "middle": [], |
| "last": "Ranasinghe", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2010.05324" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tharindu Ranasinghe and Marcos Zampieri. 2020. Multilingual offensive language identification with cross-lingual embeddings. arXiv preprint arXiv:2010.05324.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Flaite: algunos apuntes etimol\u00f3gicos", |
| "authors": [ |
| { |
| "first": "Dar\u00edo", |
| "middle": [], |
| "last": "Rojas", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Alpha (Osorno)", |
| "volume": "", |
| "issue": "40", |
| "pages": "193--200", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dar\u00edo Rojas. 2015. Flaite: algunos apuntes etimol\u00f3gi- cos. Alpha (Osorno), (40):193-200.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "An italian twitter corpus of hate speech against immigrants", |
| "authors": [ |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Poletto", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Stranisci", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "2798--2895", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manuela Sanguinetti, Fabio Poletto, Cristina Bosco, Viviana Patti, and Marco Stranisci. 2018. An ital- ian twitter corpus of hate speech against immigrants. pages 2798-2895.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "The risk of racial bias in hate speech detection", |
| "authors": [ |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "Sap", |
| "suffix": "" |
| }, |
| { |
| "first": "Dallas", |
| "middle": [], |
| "last": "Card", |
| "suffix": "" |
| }, |
| { |
| "first": "Saadia", |
| "middle": [], |
| "last": "Gabriel", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019", |
| "volume": "1", |
| "issue": "", |
| "pages": "1668--1678", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maarten Sap, Dallas Card, Saadia Gabriel, Yejin Choi, and Noah A. Smith. 2019. The risk of racial bias in hate speech detection. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28-Au- gust 2, 2019, Volume 1: Long Papers, pages 1668- 1678.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Offensive language and hate speech detection for danish", |
| "authors": [ |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Gudbjartur Ingi Sigurbergsson", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "3498--3508", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gudbjartur Ingi Sigurbergsson and Leon Derczynski. 2020. Offensive language and hate speech detection for danish. In Proceedings of the 12th Language Resources and Evaluation Conference, pages 3498- 3508.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Efficient detection of multilingual hate speech by using interactive attention network with minimal human feedback", |
| "authors": [ |
| { |
| "first": "Fedor", |
| "middle": [], |
| "last": "Vitiugin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yasas", |
| "middle": [], |
| "last": "Senarath", |
| "suffix": "" |
| }, |
| { |
| "first": "Hemant", |
| "middle": [], |
| "last": "Purohit", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "13th ACM Web Science Conference 2021", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fedor Vitiugin, Yasas Senarath, and Hemant Purohit. 2021. Efficient detection of multilingual hate speech by using interactive attention network with minimal human feedback. In 13th ACM Web Science Confer- ence 2021.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Hateful symbols or hateful people? predictive features for hate speech detection on twitter", |
| "authors": [ |
| { |
| "first": "Zeerak", |
| "middle": [], |
| "last": "Waseem", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proc. SRW@HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "88--93", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeerak Waseem and Dirk Hovy. 2016. Hateful sym- bols or hateful people? predictive features for hate speech detection on twitter. In Proc. SRW@HLT- NAACL, pages 88-93.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Towards generalisable hate speech detection: a review on obstacles and solutions", |
| "authors": [ |
| { |
| "first": "Wenjie", |
| "middle": [], |
| "last": "Yin", |
| "suffix": "" |
| }, |
| { |
| "first": "Arkaitz", |
| "middle": [], |
| "last": "Zubiaga", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "PeerJ Computer Science", |
| "volume": "7", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenjie Yin and Arkaitz Zubiaga. 2021. Towards gener- alisable hate speech detection: a review on obstacles and solutions. PeerJ Computer Science, 7:e598.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "num": null, |
| "text": "" |
| }, |
| "TABREF3": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>: The Table shows the F-score obtained us-ing different baselines in different classification tasks over our dataset. Baselines: dummy = random pre-dictions; seed = all messages containing one of the offensive seeds used for recovering the dataset is pre-dicted as positive; EMB+RF = Spanish Glove Embed-dings and Radom Forest Classifier; Tasks: insult, pro-fanity/vulgarity (prof/vulg); hate and offensive (off) de-tection.</td></tr></table>", |
| "num": null, |
| "text": "" |
| }, |
| "TABREF5": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>: Cross-lingual experiments using there differ-ent datasets: English (Basile et al., 2019) (EN), Spanish (Basile et al., 2019) (ES), and our Spanish dataset re-covered from Chile (CL). Models: ACL19 (Pamungkas and Patti, 2019); EMNLP20 (Ranasinghe and Zampieri, 2020); ECML20 (Aluru et al., 2020); WEBSCi21 (Vi-tiugin et al., 2021).</td></tr></table>", |
| "num": null, |
| "text": "" |
| }, |
| "TABREF7": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>: Cross-cultural experiments using two different datasets: Spanish (Basile et al., 2019) (ES) and our Spanish dataset recovered from Chile (CL). Models: ACL19 (Pamungkas and Patti, 2019); EMNLP20</td></tr></table>", |
| "num": null, |
| "text": "" |
| } |
| } |
| } |
| } |