| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T05:10:38.553929Z" |
| }, |
| "title": "HateBERT: Retraining BERT for Abusive Language Detection in English", |
| "authors": [ |
| { |
| "first": "Tommaso", |
| "middle": [], |
| "last": "Caselli", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Groningen University of Turin University of Passau", |
| "location": {} |
| }, |
| "email": "t.caselli@rug.nl" |
| }, |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Groningen University of Turin University of Passau", |
| "location": {} |
| }, |
| "email": "valerio.basile@unito.it" |
| }, |
| { |
| "first": "Jelena", |
| "middle": [], |
| "last": "Mitrovi\u0107", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Groningen University of Turin University of Passau", |
| "location": {} |
| }, |
| "email": "jelena.mitrovic@uni-passau.de" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Granitzer", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Groningen University of Turin University of Passau", |
| "location": {} |
| }, |
| "email": "michael.granitzer@uni-passau.de" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We introduce HateBERT, a retrained BERT model for abusive language detection in English. The model was trained on RAL-E, a large-scale dataset of Reddit comments in English from communities banned for being offensive, abusive, or hateful that we have curated and made available to the public. We present the results of a detailed comparison between a general pre-trained language model and the retrained version on three English datasets for offensive, abusive language and hate speech detection tasks. In all datasets, HateBERT outperforms the corresponding general BERT model. We also discuss a battery of experiments comparing the portability of the fine-tuned models across the datasets, suggesting that portability is affected by compatibility of the annotated phenomena.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We introduce HateBERT, a retrained BERT model for abusive language detection in English. The model was trained on RAL-E, a large-scale dataset of Reddit comments in English from communities banned for being offensive, abusive, or hateful that we have curated and made available to the public. We present the results of a detailed comparison between a general pre-trained language model and the retrained version on three English datasets for offensive, abusive language and hate speech detection tasks. In all datasets, HateBERT outperforms the corresponding general BERT model. We also discuss a battery of experiments comparing the portability of the fine-tuned models across the datasets, suggesting that portability is affected by compatibility of the annotated phenomena.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The development of systems for the automatic identification of abusive language phenomena has followed a common trend in NLP: feature-based linear classifiers (Waseem and Hovy, 2016; Ribeiro et al., 2018; Ibrohim and Budi, 2019) , neural network architectures (e.g., CNN or Bi-LSTM) (Kshirsagar et al., 2018; Mishra et al., 2018; Mitrovi\u0107 et al., 2019; Sigurbergsson and Derczynski, 2020) , and fine-tuning pre-trained language models, e.g., BERT, RoBERTa, a.o., (Liu et al., 2019; Swamy et al., 2019) . Results vary both across datasets and architectures, with linear classifiers qualifying as very competitive, if not better, when compared to neural networks. On the other hand, systems based on pre-trained language models have reached new state-of-the-art results. One issue with these pretrained models is that the training language variety makes them well suited for general-purpose language understanding tasks, and it highlights their limits with more domain-specific language varieties. To address this, there is a growing inter- est in generating domain-specific BERT-like pretrained language models, such as AlBERTo (Polignano et al., 2019) or TweetEval (Barbieri et al., 2020) for Twitter, BioBERT for the biomedical domain in English (Lee et al., 2019) , FinBERT for the financial domain in English (Yang et al., 2020) , and LEGAL-BERT for the legal domain in English (Chalkidis et al., 2020) . We introduce HateBERT, a pre-trained BERT model for abusive language phenomena in social media in English.", |
| "cite_spans": [ |
| { |
| "start": 159, |
| "end": 182, |
| "text": "(Waseem and Hovy, 2016;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 183, |
| "end": 204, |
| "text": "Ribeiro et al., 2018;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 205, |
| "end": 228, |
| "text": "Ibrohim and Budi, 2019)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 283, |
| "end": 308, |
| "text": "(Kshirsagar et al., 2018;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 309, |
| "end": 329, |
| "text": "Mishra et al., 2018;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 330, |
| "end": 352, |
| "text": "Mitrovi\u0107 et al., 2019;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 353, |
| "end": 388, |
| "text": "Sigurbergsson and Derczynski, 2020)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 463, |
| "end": 481, |
| "text": "(Liu et al., 2019;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 482, |
| "end": 501, |
| "text": "Swamy et al., 2019)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 1127, |
| "end": 1151, |
| "text": "(Polignano et al., 2019)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1165, |
| "end": 1188, |
| "text": "(Barbieri et al., 2020)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1247, |
| "end": 1265, |
| "text": "(Lee et al., 2019)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1312, |
| "end": 1331, |
| "text": "(Yang et al., 2020)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 1381, |
| "end": 1405, |
| "text": "(Chalkidis et al., 2020)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Abusive language phenomena fall along a wide spectrum including, a.o., microaggression, stereotyping, offense, abuse, hate speech, threats, and doxxing (Jurgens et al., 2019) . Current approaches have focus on a limited range, namely offensive language, abusive language, and hate speech. The connections among these phenomena have only superficially been accounted for, resulting in a fragmented picture, with a variety of definitions, and (in)compatible annotations (Waseem et al., 2017) . Poletto et al. (2020) introduce a graphical visualisation ( Figure 1 ) of the connections among abusive language phenomena according to the definitions in previous work (Waseem and Hovy, 2016; Fortuna and Nunes, 2018; Malmasi and Zampieri, 2018; Zampieri et al., 2019) . When it comes to offensive language, abusive language, and hate speech, the distinguishing factor is their level of specificity. This makes offensive language the most generic form of abusive language phenomena and hate speech the most specific, with abusive language being somewhere in the middle. Such differences are a major issue for the study of portability of models. Previous work (Karan and\u0160najder, 2018; Benk, 2019; Pamungkas and Patti, 2019; Rizoiu et al., 2019) has addressed this task by conflating portability with generalizability, forcing datasets with different phenomena into homogenous annotations by collapsing labels into (binary) macro-categories. In our portability experiments, we show that the behavior of HateBERT can be explained by accounting for these difference in specificity across the abusive language phenomena.", |
| "cite_spans": [ |
| { |
| "start": 152, |
| "end": 174, |
| "text": "(Jurgens et al., 2019)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 468, |
| "end": 489, |
| "text": "(Waseem et al., 2017)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 492, |
| "end": 513, |
| "text": "Poletto et al. (2020)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 661, |
| "end": 684, |
| "text": "(Waseem and Hovy, 2016;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 685, |
| "end": 709, |
| "text": "Fortuna and Nunes, 2018;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 710, |
| "end": 737, |
| "text": "Malmasi and Zampieri, 2018;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 738, |
| "end": 760, |
| "text": "Zampieri et al., 2019)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 1151, |
| "end": 1175, |
| "text": "(Karan and\u0160najder, 2018;", |
| "ref_id": null |
| }, |
| { |
| "start": 1176, |
| "end": 1187, |
| "text": "Benk, 2019;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1188, |
| "end": 1214, |
| "text": "Pamungkas and Patti, 2019;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1215, |
| "end": 1235, |
| "text": "Rizoiu et al., 2019)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 552, |
| "end": 560, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our key contributions are: (i.) additional evidence that further pre-training is a viable strategy to obtain domain-specific or language varietyoriented models in a fast and cheap way; (ii.) the release of HateBERT, a pre-trained BERT for abusive language phenomena, intended to boost research in this area; (iii.) the release of a large-scale dataset of social media posts in English from communities banned for being offensive, abusive, or hateful.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Further pre-training of transformer based pretrained language models is becoming more and more popular as a competitive, effective, and fast solution to adapt pre-trained language models to new language varieties or domains (Barbieri et al., 2020; Lee et al., 2019; Yang et al., 2020; Chalkidis et al., 2020) , especially in cases where raw data are scarce to generate a BERT-like model from scratch (Gururangan et al., 2020) . This is the case of abusive language phenomena. However, for these phenomena an additional predicament with respect to previous work is that the options for suitable and representative collections of data are very limited. Directly scraping messages containing profanities would not be the best option as lots of potentially useful data may be missed. Graumas et al. (2019) have used tweets about controversial topics to generate offensive-loaded embeddings, but their approach presents some limits. On the other hand, Merenda et al. (2018) have shown the effectiveness of using messages from potentially abusive-oriented on-line communities to generate so-called hate embeddings. More recently, Papakyriakopoulos et al. (2020) have shown that biased word embeddings can be beneficial. We follow the idea of exploiting biased embeddings by creating them using messages from banned communities in Reddit.", |
| "cite_spans": [ |
| { |
| "start": 224, |
| "end": 247, |
| "text": "(Barbieri et al., 2020;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 248, |
| "end": 265, |
| "text": "Lee et al., 2019;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 266, |
| "end": 284, |
| "text": "Yang et al., 2020;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 285, |
| "end": 308, |
| "text": "Chalkidis et al., 2020)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 400, |
| "end": 425, |
| "text": "(Gururangan et al., 2020)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 780, |
| "end": 801, |
| "text": "Graumas et al. (2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 947, |
| "end": 968, |
| "text": "Merenda et al. (2018)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1124, |
| "end": 1155, |
| "text": "Papakyriakopoulos et al. (2020)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HateBERT: Re-training BERT with Abusive Online Communities", |
| "sec_num": "2" |
| }, |
| { |
| "text": "RAL-E: the Reddit Abusive Language English dataset Reddit is a popular social media outlet where users share and discuss content. The website is organized into user-created and user-moderated communities known as subreddits, being de facto on-line communities. In 2015, Reddit strengthened its content policies and banned several subreddits (Chandrasekharan et al., 2017) . We retrieved a large list of banned communities in English from different sources including official posts by the Reddit administrators and Wikipedia pages. 1 We then selected only communities that were banned for being deemed to host or promote offensive, abusive, and/or hateful content (e.g., expressing harassment, bullying, inciting/promoting violence, inciting/promoting hate). We collected the posts from these communities by crawling a publicly available collection of Reddit comments. 2 For each post, we kept only the text and the name of the community. The resulting collection comprises 1,492,740 messages from a period between January 2012 and June 2015, for a total of 43,820,621 tokens. The vocabulary of RAL-E is composed of 342,377 types and the average post length is 32.25 tokens. We further check the presence of explicit signals of abusive language phenomena using a list of offensive words. We selected all words with an offensiveness scores equal or higher than 0.75 from Wiegand et al. (2018)'s dictionary. We found that explicit offensive terms represent 1.2% of the tokens and that only 260,815 messages contain at least one offensive term. RAL-E is skewed since not all communities have the same amount of messages. The list of selected communities with their respective number of retrieved messages is reported in Table A .1 and the top 10 offensive terms are illustrated in Table A .2 in Appendix A.", |
| "cite_spans": [ |
| { |
| "start": 341, |
| "end": 371, |
| "text": "(Chandrasekharan et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 531, |
| "end": 532, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1716, |
| "end": 1724, |
| "text": "Table A", |
| "ref_id": null |
| }, |
| { |
| "start": 1778, |
| "end": 1785, |
| "text": "Table A", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "HateBERT: Re-training BERT with Abusive Online Communities", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Creating HateBERT From the RAL-E dataset, we used 1,478,348 messages (for a total of 43,379,350 tokens) to re-train the English BERT base-uncased model 3 by applying the Masked Language Model (MLM) objective. The remaining 149,274 messages (441,271 tokens) have been used as test set. We retrained for 100 epochs (al-most 2 million steps) in batches of 64 samples, including up to 512 sentencepiece tokens. We used Adam with learning rate 5e-5. We trained using the huggingface code 4 on one Nvidia V100 GPU. The result is a shifted BERT model, HateBERT base-uncased, along two dimensions: (i.) language variety (i.e. social media); and (ii.) polarity (i.e., offense-, abuse-, and hate-oriented model). Since our retraining does not change the vocabulary, we verified that HateBERT has shifted towards abusive language phenomena by using the MLM on five template sentences of the form \"[someone] is a(n)/ are [MASK]\". The template has been selected because it can trigger biases in the model's representations. We changed [someone] with any of the following tokens: \"you\", \"she\", \"he\", \"women\", \"men\" Although not exhaustive, HateBERT consistently present profanities or abusive terms as mask fillers, while this very rarely occurs with the generic BERT. Table 1 illustrates the results for \"women\". ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1255, |
| "end": 1262, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "HateBERT: Re-training BERT with Abusive Online Communities", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To verify the usefulness of HateBERT for detecting abusive language phenomena, we run a set of experiments on three English datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "OffensEval 2019 (Zampieri et al., 2019) the dataset contains 14,100 tweets annotated for offensive language. According to the task definition, a message is labelled as offensive if \"it contains any form of non-acceptable language (profanity) or a targeted offense, which can be veiled or direct.\" (Zampieri et al., 2019, pg. 76 AbusEval (Caselli et al., 2020) This dataset has been obtained by adding a layer of abusive lan-guage annotation to OffensEval 2019. Abusive language is defined as a specific case of offensive language, namely \"hurtful language that a speaker uses to insult or offend another individual or a group of individuals based on their personal qualities, appearance, social status, opinions, statements, or actions.\" (Caselli et al., 2020, pg. 6197 ). The main difference with respect to offensive language is the exclusion of isolated profanities or untargeted messages from the positive class. The size of the dataset is the same as OffensEval 2019.The differences concern the distribution of the positive class which results in 2,749 in training and 178 in test.", |
| "cite_spans": [ |
| { |
| "start": 16, |
| "end": 39, |
| "text": "(Zampieri et al., 2019)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 297, |
| "end": 327, |
| "text": "(Zampieri et al., 2019, pg. 76", |
| "ref_id": null |
| }, |
| { |
| "start": 337, |
| "end": 359, |
| "text": "(Caselli et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 738, |
| "end": 769, |
| "text": "(Caselli et al., 2020, pg. 6197", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "HatEval The English portion of the dataset contains 13,000 tweets annotated for hate speech against migrants and women. The authors define hate speech as \"any communication that disparages a person or a group on the basis of some characteristic such as race, color, ethnicity, gender, sexual orientation, nationality, religion, or other characteristics.\" (Basile et al., 2019, pg. 54) . Only hateful messages targeting migrants and women belong to the positive class, leaving any other message (including offensive or abusive against other targets) to the negative class. The training set is composed of 10,000 messages and the test contains 3,000. Both training and test contain an equal amount of messages with respect to the targets, i.e., 5,000 each in training and 1,500 each in test. This does not hold for the distribution of the positive class, where 4,165 messages are present in the training and 1,252 in the test set.", |
| "cite_spans": [ |
| { |
| "start": 355, |
| "end": 384, |
| "text": "(Basile et al., 2019, pg. 54)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "All datasets are imbalanced between positive and negative classes and they target phenomena that vary along the specificity dimension. This allows us to evaluate both the robusteness and the portability of HateBERT. We applied the same pre-processing steps and hyperparameters when fine-tuning both the generic BERT and HateBERT. Pre-processing steps and hyperparameters (Table A. 3) are more closely detailed in the Appendix B. Table 2 illustrates the results on each dataset (in-dataset evaluation), while Table 3 reports on the portability experiments (cross-dataset evaluation). The same evaluation metric from the original tasks, or paper, is applied, i.e., macro-averaged F1 of the positive and negative classes.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 371, |
| "end": 380, |
| "text": "(Table A.", |
| "ref_id": null |
| }, |
| { |
| "start": 429, |
| "end": 436, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 508, |
| "end": 515, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments and Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The in-domain results confirm the validity of the re-training approach to generate better models for detection of abusive language phenomena, with HateBERT largely outperforming the corre- sponding generic model. A detailed analysis per class shows that the improvements affect both the positive and the negative classes, suggesting that HateBERT is more robust. The use of data from a different social media platform does not harm the fine-tuning stage of the retrained model, opening up possibilities of cross-fertilization studies across social media platforms. HateBERT beats the stateof-the-art for AbusEval, achieving competitive results on OffensEval and HatEval. In particular, HateBERT would rank #4 on OffensEval and #6 on HatEval, obtaining the second best F1 score on the positive class.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The portability experiments were run using the best model for each of the in-dataset experiments. Our results show that HateBERT ensures better portability than a generic BERT model, especially when going from generic abusive language phenomena (i.e., offensive language) towards more specific ones (i.e., abusive language or hate speech). This behaviour is expected and provides empirical evidence to the differences across the annotated phenomena. We also claim that HateBERT consistently obtains better representations of the targeted phenomena. This is evident when looking at the dif- ferences in False Positives and False Negatives for the positive class, measured by means of Precision and Recall, respectively. As illustrated in Table 4 , HateBERT always obtains a higher Precision score than BERT when fine-tuned on a generic abusive phenomenon and applied to more specific ones, at a very low cost for Recall. The unexpected higher Precision of HateBERT fine-tuned on AbusEval and tested on OffensEval 2019 (i.e., from specific to generic) is due to the datasets sharing same data distribution. Indeed, the results of the same model against HatEval support our analysis.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 737, |
| "end": 744, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments and Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "This contribution introduces HateBERT base uncased, 5 a pre-trained language model for abusive language phenomena in English. We confirm that further pre-training is an effective and cheap strategy to port pre-trained language models to other language varieties. The in-dataset evaluation shows that HateBERT consistently outperforms a generic BERT across different abusive language phenomena, such as offensive language (Offen-sEval 2019), abusive language (AbusEval), and hate speech (HatEval). The cross-dataset experiments show that HateBERT obtains robust representations of each abusive language phenomenon against which it has been fine-tuned. In particular, the cross-dataset experiments have provided (i.) further empirical evidence on the relationship among three abusive language phenomena along the dimension of specificity; (ii.) empirical support to the validity of the annotated data; (iii.) a principled explanation for the different performances of HateBERT and BERT.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Directions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "A known issue concerning HateBERT is its bias toward the subreddit r/fatpeoplehate. To address this and other balancing issues, we retrieved an additional1.3M messages. This has allowed us to add 712,583 new messages to 12 subreddits listed in Table A .1, and identify three additional ones (r/uncensorednews, r/europeannationalism, and r/farright), for a total of 597,609 messages. This new data is currently used to extend HateBERT.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 244, |
| "end": 251, |
| "text": "Table A", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion and Future Directions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Future work will focus on two directions: (i.) investigating to what extent the embedding representations of HateBERT are actually different from a general BERT pre-trained model, and (ii.) investigating the connections across the various abusive langauge phenomena.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Directions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The project on which this report is based was funded by the German Federal Ministry of Education and Research (BMBF) under the funding code 01-S20049. The author is responsible for the content of this publication.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| }, |
| { |
| "text": "In this paper, the authors introduce HateBERT, a pre-trained language model for the study of abusive language phenomena in social media in English. HateBERT is unique because (i.) it is based on further pre-training of an existing pre-trained language model (i.e., BERT base-uncased) rather than training it from scratch, thus reducing the environmental impact of its creation; 6 (ii.) it uses a large collection of messages from communities that have been deemed to violate the content policy of a social media platform, namely Reddit, because of expressing harassment, bullying, incitement of violence, hate, offense, and abuse. The judgment on policy violation has been made by the community administrators and moderators. We consider 6 The Nvidia V100 GPU we used is shared and it has a maximum number of continuous reserved time of 72 hours. In total, it took 18 days to complete the 2 million retraining steps.", |
| "cite_spans": [ |
| { |
| "start": 738, |
| "end": 739, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ethical Statement", |
| "sec_num": null |
| }, |
| { |
| "text": "this dataset for further pre-training more ecologically representative of the expressions of different abusive language phenomena in English than the use of manually annotated datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ethical Statement", |
| "sec_num": null |
| }, |
| { |
| "text": "The collection of banned subreddits has been retrieved from a publicly available collection of Reddit, obtained through the Reddit API and in compliance with Reddit's terms of use. From this collection, we generated the RAL-E dataset. RAL-E will be publicly released (it is accessible also at review phase in the Supplementary Materials). While its availability may have an important impact in boosting research on abusive language phenomena, especially by making natural interactions in online communities available, we are also aware of the risks of privacy violations for owners of the messages. This is one of the reasons why at this stage, we only make available in RAL-E the content of the message without metadata such as the screen name of the author and the community where the message was posted. Usernames and subreddit names have not been used to retrain the models. This reduces the risks of privacy leakage from the retrained models. Since the training material comes from banned community it is impossible and impracticable to obtain meaningful consent from the users (or redditers). In compliance with the Association for Internet Researchers Ethical Guidelines 7 , we consider that: not making available the username and the specific community are the only reliable ways to protect users' privacy. We have also manually checked (for a small portion of the messages) whether it is possible to retrieve these messages by actively searching copy-paste the text of the message in Reddit. In none of the cases were we able to obtain a positive result.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ethical Statement", |
| "sec_num": null |
| }, |
| { |
| "text": "There are numerous benefits from using such models to monitor the spread of abusive language phenomena in social media. Among them, we mention the following: (i.) reducing exposure to harmful content in social media; (ii.) contributing to the creation of healthier online interactions; and (iii.) promoting positive contagious behaviors and interactions (Matias, 2019) . Unfortunately, work in this area is not free from potentially negative impacts. The most direct is a risk of promoting misrepresentation. HateBERT is an intrinsically biased pre-trained language model. The fine-tuned models that can be obtained are not overgenerating the positive classes, but they suffer from the biases in the manually annotated data, especially for the offensive language detection task (Sap et al., 2019; Davidson et al., 2019) . Furthermore, we think that such tools must always be used under the supervision of humans. Current datasets are completely lacking the actual context of occurrence of a messsage and the associated meaning nuances that may accompany it, labelling the positive classes only on the basis of superficial linguistic cues. The deployment of models based on HateBERT \"in the wild\" without human supervision requires additional research and suitable datasets for training.", |
| "cite_spans": [ |
| { |
| "start": 354, |
| "end": 368, |
| "text": "(Matias, 2019)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 778, |
| "end": 796, |
| "text": "(Sap et al., 2019;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 797, |
| "end": 819, |
| "text": "Davidson et al., 2019)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ethical Statement", |
| "sec_num": null |
| }, |
| { |
| "text": "We see benefits in the use of HateBERT in research on abusive language phenomena as well as in the availability of RAL-E. Researchers are encouraged to be aware of the intrinsic biased nature of HateBERT and of its impacts in real-world scenarios.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ethical Statement", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/huggingface/ transformers/tree/master/src/ transformers", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "HateBERT, the fine-tuned model, and the RAL-E dataset are available at https://osf.io/tbd58/?view_ only=d90e681c672a494bb555de99fc7ae780", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://aoir.org/reports/ethics3.pdf", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "Pre-processing before pre-training\u2022 all users' mentions have been substituted with a placeholder (@USER);\u2022 all URLs have been substituted with a with a placeholder (URL);\u2022 emojis have been replaced with text (e.g. \u2192 :pleading face:) using Python emoji package;\u2022 hashtag symbol has been removed from hasthtags (e.g. #kadiricinadalet \u2192 kadiricinadalet);\u2022 extra blank spaces have been replac \u00a7ed with a single space;\u2022 extra blank new lines have been removed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| }, |
| { |
| "text": "Pre-processing before fine-tuning For each dataset, we have adopted minimal pre-processing steps. In particular:\u2022 all users' mentions have been substituted with a placeholder (@USER);\u2022 all URLs have been substituted with a with a placeholder (URL);\u2022 emojis have been replaced with text (e.g. \u2192 :pleading face:) using Python emoji package;\u2022 hashtag symbol has been removed from hasthtags (e.g. #kadiricinadalet \u2192 kadiricinadalet);\u2022 extra blank spaces have been replaced with a single space.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Appendix B", |
| "sec_num": null |
| }, |
| { |
| "text": "Learning rate 1e-5 Training Epoch 5 Adam epsilon 1e-8 Max sequence length 100 Batch size 32 Num. warmup steps 0 A.3: Hyperparamters for fine-tuning BERT and Hate-BERT.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hyperparameter Value", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "TweetEval: Unified benchmark and comparative evaluation for tweet classification", |
| "authors": [ |
| { |
| "first": "Francesco", |
| "middle": [], |
| "last": "Barbieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Jose", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "" |
| }, |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Espinosa Anke", |
| "suffix": "" |
| }, |
| { |
| "first": "Leonardo", |
| "middle": [], |
| "last": "Neves", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "1644--1650", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francesco Barbieri, Jose Camacho-Collados, Luis Es- pinosa Anke, and Leonardo Neves. 2020. TweetE- val: Unified benchmark and comparative evaluation for tweet classification. In Findings of the Associ- ation for Computational Linguistics: EMNLP 2020, pages 1644-1650, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "SemEval-2019 task 5: Multilingual detection of hate speech against immigrants and women in twitter", |
| "authors": [ |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco Manuel Rangel", |
| "middle": [], |
| "last": "Pardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "54--63", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Valerio Basile, Cristina Bosco, Elisabetta Fersini, Debora Nozza, Viviana Patti, Francisco Manuel Rangel Pardo, Paolo Rosso, and Manuela San- guinetti. 2019. SemEval-2019 task 5: Multilin- gual detection of hate speech against immigrants and women in twitter. In Proceedings of the 13th Inter- national Workshop on Semantic Evaluation, pages 54-63, Minneapolis, Minnesota, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Data Augmentation in Deep Learning for Hate Speech Detection in Lower Resource Settings", |
| "authors": [ |
| { |
| "first": "Michaela", |
| "middle": [], |
| "last": "Benk", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michaela Benk. 2019. Data Augmentation in Deep Learning for Hate Speech Detection in Lower Re- source Settings. Ph.D. thesis, Universit\u00e4t Z\u00fcrich.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "2020. I feel offended, don't be abusive! implicit/explicit messages in offensive and abusive language", |
| "authors": [ |
| { |
| "first": "Tommaso", |
| "middle": [], |
| "last": "Caselli", |
| "suffix": "" |
| }, |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Jelena", |
| "middle": [], |
| "last": "Mitrovic", |
| "suffix": "" |
| }, |
| { |
| "first": "Inga", |
| "middle": [], |
| "last": "Kartoziya", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Granitzer", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "6193--6202", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tommaso Caselli, Valerio Basile, Jelena Mitrovic, Inga Kartoziya, and Michael Granitzer. 2020. I feel of- fended, don't be abusive! implicit/explicit messages in offensive and abusive language. In Proceedings of The 12th Language Resources and Evaluation Con- ference, pages 6193-6202, Marseille, France. Euro- pean Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Prodromos Malakasiotis, Nikolaos Aletras, and Ion Androutsopoulos", |
| "authors": [ |
| { |
| "first": "Ilias", |
| "middle": [], |
| "last": "Chalkidis", |
| "suffix": "" |
| }, |
| { |
| "first": "Manos", |
| "middle": [], |
| "last": "Fergadiotis", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "2898--2904", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilias Chalkidis, Manos Fergadiotis, Prodromos Malaka- siotis, Nikolaos Aletras, and Ion Androutsopoulos. 2020. LEGAL-BERT: The muppets straight out of law school. In Findings of the Association for Com- putational Linguistics: EMNLP 2020, pages 2898- 2904, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "You can't stay here: The efficacy of reddit's 2015 ban examined through hate speech", |
| "authors": [ |
| { |
| "first": "Eshwar", |
| "middle": [], |
| "last": "Chandrasekharan", |
| "suffix": "" |
| }, |
| { |
| "first": "Umashanthi", |
| "middle": [], |
| "last": "Pavalanathan", |
| "suffix": "" |
| }, |
| { |
| "first": "Anirudh", |
| "middle": [], |
| "last": "Srinivasan", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Glynn", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Eisenstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Gilbert", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the ACM on Human-Computer Interaction", |
| "volume": "1", |
| "issue": "", |
| "pages": "1--22", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3134666" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eshwar Chandrasekharan, Umashanthi Pavalanathan, Anirudh Srinivasan, Adam Glynn, Jacob Eisenstein, and Eric Gilbert. 2017. You can't stay here: The efficacy of reddit's 2015 ban examined through hate speech. Proceedings of the ACM on Human- Computer Interaction, 1:1-22.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Racial bias in hate speech and abusive language detection datasets", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Davidson", |
| "suffix": "" |
| }, |
| { |
| "first": "Debasmita", |
| "middle": [], |
| "last": "Bhattacharya", |
| "suffix": "" |
| }, |
| { |
| "first": "Ingmar", |
| "middle": [], |
| "last": "Weber", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Third Workshop on Abusive Language Online", |
| "volume": "", |
| "issue": "", |
| "pages": "25--35", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-3504" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Davidson, Debasmita Bhattacharya, and Ing- mar Weber. 2019. Racial bias in hate speech and abusive language detection datasets. In Proceedings of the Third Workshop on Abusive Language Online, pages 25-35, Florence, Italy. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A survey on automatic detection of hate speech in text", |
| "authors": [ |
| { |
| "first": "Paula", |
| "middle": [], |
| "last": "Fortuna", |
| "suffix": "" |
| }, |
| { |
| "first": "S\u00e9rgio", |
| "middle": [], |
| "last": "Nunes", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ACM Computing Surveys (CSUR)", |
| "volume": "51", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paula Fortuna and S\u00e9rgio Nunes. 2018. A survey on au- tomatic detection of hate speech in text. ACM Com- puting Surveys (CSUR), 51(4):85.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Twitter-based Polarised Embeddings for Abusive Language Detection", |
| "authors": [ |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Graumas", |
| "suffix": "" |
| }, |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommaso", |
| "middle": [], |
| "last": "Caselli", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "2019 8th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--7", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Leon Graumas, Roy David, and Tommaso Caselli. 2019. Twitter-based Polarised Embeddings for Abu- sive Language Detection. In 2019 8th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW), pages 1-7.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Don't stop pretraining: Adapt language models to domains and tasks", |
| "authors": [ |
| { |
| "first": "Ana", |
| "middle": [], |
| "last": "Suchin Gururangan", |
| "suffix": "" |
| }, |
| { |
| "first": "Swabha", |
| "middle": [], |
| "last": "Marasovi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Swayamdipta", |
| "suffix": "" |
| }, |
| { |
| "first": "Iz", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Doug", |
| "middle": [], |
| "last": "Beltagy", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Downey", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "8342--8360", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.740" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Suchin Gururangan, Ana Marasovi\u0107, Swabha Swayamdipta, Kyle Lo, Iz Beltagy, Doug Downey, and Noah A. Smith. 2020. Don't stop pretraining: Adapt language models to domains and tasks. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8342-8360, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Multi-label Hate Speech and Abusive Language Detection in Indonesian Twitter", |
| "authors": [ |
| { |
| "first": "Muhammad", |
| "middle": [], |
| "last": "Okky Ibrohim", |
| "suffix": "" |
| }, |
| { |
| "first": "Indra", |
| "middle": [], |
| "last": "Budi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Third Workshop on Abusive Language Online", |
| "volume": "", |
| "issue": "", |
| "pages": "46--57", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muhammad Okky Ibrohim and Indra Budi. 2019. Multi-label Hate Speech and Abusive Language De- tection in Indonesian Twitter. In Proceedings of the Third Workshop on Abusive Language Online, pages 46-57.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A just and comprehensive strategy for using NLP to address online abuse", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Jurgens", |
| "suffix": "" |
| }, |
| { |
| "first": "Libby", |
| "middle": [], |
| "last": "Hemphill", |
| "suffix": "" |
| }, |
| { |
| "first": "Eshwar", |
| "middle": [], |
| "last": "Chandrasekharan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3658--3666", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1357" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Jurgens, Libby Hemphill, and Eshwar Chan- drasekharan. 2019. A just and comprehensive strat- egy for using NLP to address online abuse. In Pro- ceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 3658- 3666, Florence, Italy. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Cross-domain detection of abusive language online", |
| "authors": [ |
| { |
| "first": "Mladen", |
| "middle": [], |
| "last": "Karan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan\u0161najder", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2nd Workshop on Abusive Language Online (ALW2)", |
| "volume": "", |
| "issue": "", |
| "pages": "132--137", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-5117" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mladen Karan and Jan\u0160najder. 2018. Cross-domain detection of abusive language online. In Proceed- ings of the 2nd Workshop on Abusive Language On- line (ALW2), pages 132-137, Brussels, Belgium. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Predictive embeddings for hate speech detection on twitter", |
| "authors": [ |
| { |
| "first": "Rohan", |
| "middle": [], |
| "last": "Kshirsagar", |
| "suffix": "" |
| }, |
| { |
| "first": "Tyrus", |
| "middle": [], |
| "last": "Cukuvac", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathy", |
| "middle": [], |
| "last": "Mckeown", |
| "suffix": "" |
| }, |
| { |
| "first": "Susan", |
| "middle": [], |
| "last": "Mcgregor", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2nd Workshop on Abusive Language Online (ALW2)", |
| "volume": "", |
| "issue": "", |
| "pages": "26--32", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-5104" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rohan Kshirsagar, Tyrus Cukuvac, Kathy McKeown, and Susan McGregor. 2018. Predictive embeddings for hate speech detection on twitter. In Proceedings of the 2nd Workshop on Abusive Language Online (ALW2), pages 26-32, Brussels, Belgium. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "BioBERT: a pretrained biomedical language representation model for biomedical text mining", |
| "authors": [ |
| { |
| "first": "Jinhyuk", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Wonjin", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungdong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Donghyeon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunkyu", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Chan", |
| "middle": [], |
| "last": "Ho So", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaewoo", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Bioinformatics", |
| "volume": "36", |
| "issue": "4", |
| "pages": "1234--1240", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/bioinformatics/btz682" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2019. BioBERT: a pre- trained biomedical language representation model for biomedical text mining. Bioinformatics, 36(4):1234-1240.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "NULI at SemEval-2019 task 6: Transfer learning for offensive language detection using bidirectional transformers", |
| "authors": [ |
| { |
| "first": "Ping", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Wen", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Zou", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "87--91", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/S19-2011" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ping Liu, Wen Li, and Liang Zou. 2019. NULI at SemEval-2019 task 6: Transfer learning for offen- sive language detection using bidirectional trans- formers. In Proceedings of the 13th Interna- tional Workshop on Semantic Evaluation, pages 87- 91, Minneapolis, Minnesota, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Challenges in discriminating profanity from hate speech", |
| "authors": [ |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Journal of Experimental & Theoretical Artificial Intelligence", |
| "volume": "30", |
| "issue": "2", |
| "pages": "187--202", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shervin Malmasi and Marcos Zampieri. 2018. Chal- lenges in discriminating profanity from hate speech. Journal of Experimental & Theoretical Artificial In- telligence, 30(2):187-202.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Preventing harassment and increasing group participation through social norms in 2,190 online science discussions", |
| "authors": [ |
| { |
| "first": "Matias", |
| "middle": [], |
| "last": "J Nathan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the National Academy of Sciences", |
| "volume": "116", |
| "issue": "", |
| "pages": "9785--9789", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J Nathan Matias. 2019. Preventing harassment and in- creasing group participation through social norms in 2,190 online science discussions. Proceedings of the National Academy of Sciences, 116(20):9785-9789.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Source-driven Representations for Hate Speech Detection", |
| "authors": [ |
| { |
| "first": "Flavio", |
| "middle": [], |
| "last": "Merenda", |
| "suffix": "" |
| }, |
| { |
| "first": "Claudia", |
| "middle": [], |
| "last": "Zaghi", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommaso", |
| "middle": [], |
| "last": "Caselli", |
| "suffix": "" |
| }, |
| { |
| "first": "Malvina", |
| "middle": [], |
| "last": "Nissim", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 5th Italian Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Flavio Merenda, Claudia Zaghi, Tommaso Caselli, and Malvina Nissim. 2018. Source-driven Representa- tions for Hate Speech Detection. In Proceedings of the 5th Italian Conference on Computational Lin- guistics (CLiC-it 2018), Turin, Italy.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Neural character-based composition models for abuse detection", |
| "authors": [ |
| { |
| "first": "Pushkar", |
| "middle": [], |
| "last": "Mishra", |
| "suffix": "" |
| }, |
| { |
| "first": "Helen", |
| "middle": [], |
| "last": "Yannakoudakis", |
| "suffix": "" |
| }, |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Shutova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2nd Workshop on Abusive Language Online (ALW2)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-5101" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pushkar Mishra, Helen Yannakoudakis, and Ekaterina Shutova. 2018. Neural character-based composi- tion models for abuse detection. In Proceedings of the 2nd Workshop on Abusive Language Online (ALW2), pages 1-10, Brussels, Belgium. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "nlpUP at SemEval-2019 task 6: A deep neural language model for offensive language detection", |
| "authors": [ |
| { |
| "first": "Jelena", |
| "middle": [], |
| "last": "Mitrovi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Bastian", |
| "middle": [], |
| "last": "Birkeneder", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Granitzer", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "722--726", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/S19-2127" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jelena Mitrovi\u0107, Bastian Birkeneder, and Michael Granitzer. 2019. nlpUP at SemEval-2019 task 6: A deep neural language model for offensive lan- guage detection. In Proceedings of the 13th Inter- national Workshop on Semantic Evaluation, pages 722-726, Minneapolis, Minnesota, USA. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Cross-domain and cross-lingual abusive language detection: A hybrid approach with deep learning and a multilingual lexicon", |
| "authors": [ |
| { |
| "first": "Wahyu", |
| "middle": [], |
| "last": "Endang", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Pamungkas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: Student Research Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "363--370", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Endang Wahyu Pamungkas and Viviana Patti. 2019. Cross-domain and cross-lingual abusive language detection: A hybrid approach with deep learning and a multilingual lexicon. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics: Student Research Workshop, pages 363-370.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Bias in word embeddings", |
| "authors": [ |
| { |
| "first": "Orestis", |
| "middle": [], |
| "last": "Papakyriakopoulos", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Hegelich", |
| "suffix": "" |
| }, |
| { |
| "first": "Juan Carlos Medina", |
| "middle": [], |
| "last": "Serrano", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabienne", |
| "middle": [], |
| "last": "Marco", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "FAT* '20: Conference on Fairness, Accountability, and Transparency", |
| "volume": "", |
| "issue": "", |
| "pages": "446--457", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3351095.3372843" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Orestis Papakyriakopoulos, Simon Hegelich, Juan Car- los Medina Serrano, and Fabienne Marco. 2020. Bias in word embeddings. In FAT* '20: Confer- ence on Fairness, Accountability, and Transparency, Barcelona, Spain, January 27-30, 2020, pages 446- 457. ACM.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Resources and Benchmark Corpora for Hate Speech Detection: a Systematic Review", |
| "authors": [ |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Poletto", |
| "suffix": "" |
| }, |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Language Resources and Evaluation", |
| "volume": "54", |
| "issue": "3", |
| "pages": "1--47", |
| "other_ids": { |
| "DOI": [ |
| "https://link.springer.com/article/10.1007/s10579-020-09502-8" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fabio Poletto, Valerio Basile, Manuela Sanguinetti, Cristina Bosco, and Viviana Patti. 2020. Resources and Benchmark Corpora for Hate Speech Detection: a Systematic Review. Language Resources and Evaluation, 54(3):1-47.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Hate speech detection through alberto italian language understanding model", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Polignano", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierpaolo", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "De Gemmis", |
| "suffix": "" |
| }, |
| { |
| "first": "Giovanni", |
| "middle": [], |
| "last": "Semeraro", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 3rd Workshop on Natural Language for Artificial Intelligence co-located with the 18th International Conference of the Italian Association for Artificial Intelligence (AIIA 2019)", |
| "volume": "2521", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Polignano, Pierpaolo Basile, Marco de Gem- mis, and Giovanni Semeraro. 2019. Hate speech detection through alberto italian language under- standing model. In Proceedings of the 3rd Work- shop on Natural Language for Artificial Intelligence co-located with the 18th International Conference of the Italian Association for Artificial Intelligence (AIIA 2019), Rende, Italy, November 19th-22nd, 2019, volume 2521 of CEUR Workshop Proceedings. CEUR-WS.org.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Characterizing and detecting hateful users on twitter", |
| "authors": [ |
| { |
| "first": "Pedro", |
| "middle": [ |
| "H" |
| ], |
| "last": "Manoel Horta Ribeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuri", |
| "middle": [ |
| "A" |
| ], |
| "last": "Calais", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Santos", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "F" |
| ], |
| "last": "Virg\u00edlio", |
| "suffix": "" |
| }, |
| { |
| "first": "Wagner", |
| "middle": [], |
| "last": "Almeida", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Meira", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Twelfth International AAAI Conference on Web and Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manoel Horta Ribeiro, Pedro H Calais, Yuri A Santos, Virg\u00edlio AF Almeida, and Wagner Meira Jr. 2018. Characterizing and detecting hateful users on twit- ter. In Twelfth International AAAI Conference on Web and Social Media.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Transfer learning for hate speech detection in social media", |
| "authors": [ |
| { |
| "first": "Marian-Andrei", |
| "middle": [], |
| "last": "Rizoiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianyu", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabriela", |
| "middle": [], |
| "last": "Ferraro", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanna", |
| "middle": [], |
| "last": "Suominen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.03829" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marian-Andrei Rizoiu, Tianyu Wang, Gabriela Fer- raro, and Hanna Suominen. 2019. Transfer learn- ing for hate speech detection in social media. arXiv preprint arXiv:1906.03829.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "The risk of racial bias in hate speech detection", |
| "authors": [ |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "Sap", |
| "suffix": "" |
| }, |
| { |
| "first": "Dallas", |
| "middle": [], |
| "last": "Card", |
| "suffix": "" |
| }, |
| { |
| "first": "Saadia", |
| "middle": [], |
| "last": "Gabriel", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1668--1678", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1163" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maarten Sap, Dallas Card, Saadia Gabriel, Yejin Choi, and Noah A. Smith. 2019. The risk of racial bias in hate speech detection. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 1668-1678, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Offensive Language and Hate Speech Detection for Danish", |
| "authors": [ |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Gudbjartur Ingi Sigurbergsson", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gudbjartur Ingi Sigurbergsson and Leon Derczynski. 2020. Offensive Language and Hate Speech De- tection for Danish. In Proceedings of the 12th Language Resources and Evaluation Conference. ELRA.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Studying generalisability across abusive language detection datasets", |
| "authors": [ |
| { |
| "first": "Steve", |
| "middle": [ |
| "Durairaj" |
| ], |
| "last": "Swamy", |
| "suffix": "" |
| }, |
| { |
| "first": "Anupam", |
| "middle": [], |
| "last": "Jamatia", |
| "suffix": "" |
| }, |
| { |
| "first": "Bj\u00f6rn", |
| "middle": [], |
| "last": "Gamb\u00e4ck", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "940--950", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K19-1088" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steve Durairaj Swamy, Anupam Jamatia, and Bj\u00f6rn Gamb\u00e4ck. 2019. Studying generalisability across abusive language detection datasets. In Proceed- ings of the 23rd Conference on Computational Nat- ural Language Learning (CoNLL), pages 940-950, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Understanding abuse: A typology of abusive language detection subtasks", |
| "authors": [ |
| { |
| "first": "Zeerak", |
| "middle": [], |
| "last": "Waseem", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Davidson", |
| "suffix": "" |
| }, |
| { |
| "first": "Dana", |
| "middle": [], |
| "last": "Warmsley", |
| "suffix": "" |
| }, |
| { |
| "first": "Ingmar", |
| "middle": [], |
| "last": "Weber", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the First Workshop on Abusive Language Online", |
| "volume": "", |
| "issue": "", |
| "pages": "78--84", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeerak Waseem, Thomas Davidson, Dana Warmsley, and Ingmar Weber. 2017. Understanding abuse: A typology of abusive language detection subtasks. In Proceedings of the First Workshop on Abusive Lan- guage Online, pages 78-84.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Hateful symbols or hateful people? predictive features for hate speech detection on twitter", |
| "authors": [ |
| { |
| "first": "Zeerak", |
| "middle": [], |
| "last": "Waseem", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the NAACL Student Research Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "88--93", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeerak Waseem and Dirk Hovy. 2016. Hateful sym- bols or hateful people? predictive features for hate speech detection on twitter. In Proceedings of the NAACL Student Research Workshop, pages 88-93, San Diego, California. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Inducing a lexicon of abusive words-a feature-based approach", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Wiegand", |
| "suffix": "" |
| }, |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Ruppenhofer", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Schmidt", |
| "suffix": "" |
| }, |
| { |
| "first": "Clayton", |
| "middle": [], |
| "last": "Greenberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1046--1056", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Wiegand, Josef Ruppenhofer, Anna Schmidt, and Clayton Greenberg. 2018. Inducing a lexicon of abusive words-a feature-based approach. In Pro- ceedings of the 2018 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 1 (Long Papers), pages 1046-1056.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Finbert: A pretrained language model for financial communications", |
| "authors": [ |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [ |
| "Christopher" |
| ], |
| "last": "Siy", |
| "suffix": "" |
| }, |
| { |
| "first": "U", |
| "middle": [ |
| "Y" |
| ], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Allen", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi Yang, Mark Christopher Siy UY, and Allen Huang. 2020. Finbert: A pretrained language model for fi- nancial communications.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "SemEval-2019 task 6: Identifying and categorizing offensive language in social media (OffensEval)", |
| "authors": [ |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "Noura", |
| "middle": [], |
| "last": "Farra", |
| "suffix": "" |
| }, |
| { |
| "first": "Ritesh", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "75--86", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/S19-2010" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcos Zampieri, Shervin Malmasi, Preslav Nakov, Sara Rosenthal, Noura Farra, and Ritesh Kumar. 2019. SemEval-2019 task 6: Identifying and catego- rizing offensive language in social media (OffensE- val). In Proceedings of the 13th International Work- shop on Semantic Evaluation, pages 75-86, Min- neapolis, Minnesota, USA. Association for Compu- tational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Abusive language phenomena and their relationships (adapted fromPoletto et al. (2020)).", |
| "uris": null |
| }, |
| "TABREF0": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "text": "MLM top 3 candidates for the templates \"Women are [MASK.]\"." |
| }, |
| "TABREF1": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "text": "). The dataset is split into training and test, with 13,240 messages in training and 860 in test. The positive class (i.e. messages labeled as offensive) are 4,400 in training and 240 in test. No development data is provided." |
| }, |
| "TABREF3": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Train</td><td>Model</td><td>OffensEval 2019</td><td colspan=\"2\">AbusEval HatEval</td></tr><tr><td>OffensEval</td><td>BERT</td><td>-</td><td>.726</td><td>.545</td></tr><tr><td>2019</td><td>HateBERT</td><td>.-</td><td>.750</td><td>.547</td></tr><tr><td>AbusEval</td><td>BERT HateBERT</td><td>.710 .713</td><td>--</td><td>.611 .624</td></tr><tr><td>HatEval</td><td>BERT HateBERT</td><td>.572 .543</td><td>.590 .555</td><td>--</td></tr></table>", |
| "text": "BERT vs. HateBERT: in-dataset. Best scores in bold. For BERT and HateBERT we report the average from 5 runs and its standard deviations. Best corresponds to the best systems in the original shared tasks.Caselli et al. (2020) is the most recent result for AbusEval." |
| }, |
| "TABREF4": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "text": "" |
| }, |
| "TABREF6": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "text": "BERT vs. HateBERT: Portability -Precision and Recall for the positive class. Rows show the dataset used to train the model and columns the dataset used for testing. Best scores are underlined." |
| } |
| } |
| } |
| } |