| { |
| "paper_id": "2022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T05:10:40.035891Z" |
| }, |
| "title": "HATE-ITA: Hate Speech Detection in Italian Social Media Text", |
| "authors": [ |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Bocconi University", |
| "location": { |
| "addrLine": "Via Sarfatti 25", |
| "settlement": "Milan", |
| "country": "Italy" |
| } |
| }, |
| "email": "debora.nozza@unibocconi.it" |
| }, |
| { |
| "first": "Federico", |
| "middle": [], |
| "last": "Bianchi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Bocconi University", |
| "location": { |
| "addrLine": "Via Sarfatti 25", |
| "settlement": "Milan", |
| "country": "Italy" |
| } |
| }, |
| "email": "f.bianchi@unibocconi.it" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Attanasio", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Bocconi University", |
| "location": { |
| "addrLine": "Via Sarfatti 25", |
| "settlement": "Milan", |
| "country": "Italy" |
| } |
| }, |
| "email": "giuseppe.attanasio3@unibocconi.it" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Warning: This paper contains examples of language that some people may find offensive. Online hate speech is a dangerous phenomenon that can (and should) be promptly counteracted properly. While Natural Language Processing has been successfully used for the purpose, many of the research efforts are directed toward the English language. This choice severely limits the classification power in non-English languages. In this paper, we test several learning frameworks for identifying hate speech in Italian text. We release HATE-ITA, a set of multilanguage models trained on a large set of English data and available Italian datasets. HATE-ITA performs better than mono-lingual models and seems to adapt well also on languagespecific slurs. We believe our findings will encourage research in other mid-to-low resource communities and provide a valuable benchmarking tool for the Italian community.", |
| "pdf_parse": { |
| "paper_id": "2022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Warning: This paper contains examples of language that some people may find offensive. Online hate speech is a dangerous phenomenon that can (and should) be promptly counteracted properly. While Natural Language Processing has been successfully used for the purpose, many of the research efforts are directed toward the English language. This choice severely limits the classification power in non-English languages. In this paper, we test several learning frameworks for identifying hate speech in Italian text. We release HATE-ITA, a set of multilanguage models trained on a large set of English data and available Italian datasets. HATE-ITA performs better than mono-lingual models and seems to adapt well also on languagespecific slurs. We believe our findings will encourage research in other mid-to-low resource communities and provide a valuable benchmarking tool for the Italian community.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Online hate speech is a dangerous phenomenon that can (and should) be promptly counteracted properly. While Natural Language Processing supplies algorithms to achieve that, most research efforts are directed toward the English language. Indeed, there is now a plethora of approaches and corpora (Indurthi et al., 2019; Kennedy et al., 2020b; D'Sa et al., 2020; Mollas et al., 2022; Kiela et al., 2021, inter alia) , that can be adopted for addressing English hate speech detection.", |
| "cite_spans": [ |
| { |
| "start": 295, |
| "end": 318, |
| "text": "(Indurthi et al., 2019;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 319, |
| "end": 341, |
| "text": "Kennedy et al., 2020b;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 342, |
| "end": 360, |
| "text": "D'Sa et al., 2020;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 361, |
| "end": 381, |
| "text": "Mollas et al., 2022;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 382, |
| "end": 413, |
| "text": "Kiela et al., 2021, inter alia)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "However, this choice strongly limits the classification power in other languages where fewer resources are available, like Italian. Researchers have put a great effort into improving Italian models (Fersini et al., 2018; Sanguinetti et al., , 2020 . However, previous work does not address the task systematically, resulting in no clear evidence of the performance of these models. Consider also that a competitive baseline for hate speech detection in Italian", |
| "cite_spans": [ |
| { |
| "start": 198, |
| "end": 220, |
| "text": "(Fersini et al., 2018;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 221, |
| "end": 247, |
| "text": "Sanguinetti et al., , 2020", |
| "ref_id": "BIBREF43" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "does not yet exist. Current datasets are not broad enough to cover all the protected categories and are generally based on a few thousand samples. Data annotation is a costly process, and annotating hate speech requires tremendous care.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Multi-lingual models give a possible way out of this issue. Nozza (2021) shows that combining multiple languages in training can help overcome the apparent limitations of hate speech detection models. We start from those conclusions to build up our work by collecting a large dataset of English hate speech data that we combine with some data in Italian. We use this new collection to train multi-lingual models and show the performance and examples across different Italian datasets.", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 72, |
| "text": "Nozza (2021)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The contribution of this short workshop paper is thus straightforward: we thoroughly evaluate and release to the community a set of models for Italian hate speech detection obtained through fine-tuning of multi-lingual models (HATE-ITA). 1 These models are wrapped in high-level API that will allow the community to access and use these models for future research easily. These models set a new baseline on two state-of-the-art hate speech detection datasets in Italian. To the best of our knowledge, this is the first paper that showcases the use of a large English dataset in combination with a small portion of Italian to create a robust resource for hate speech detection in Italian.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Contribution 1) our experiments show that multi-lingual models can effectively be used to cover missing ground in some mid-to-low resource languages; 2) while providing researchers with strong baselines, our models can also be used to study which areas and targets are still not yet covered, thus guiding directions for future research (see Section 4.4). We release HATE-ITA as an open-source Python library 2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we consider the task of hate speech as binary (hate/non-hate). To control the number of samples for each protected group in the training data, we consider the target of the hateful messages. We select six target attributes based on the type of discrimination, namely origin, gender identity, sexual orientation, religious affiliation, and disability. We consider these targets as the superset of classes able to cover the majority of dataset-specific labels. We discarded the other and none class from all the datasets because they might represent other classes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We describe the datasets we included in the training set in this work. The English corpora have been selected by filtering the ones covering our desired targets from a public list 3 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State-of-the-art Corpora", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Italian For Italian, we consider two different corpora proposed for Evalita shared tasks (Caselli et al., 2018) : the automatic misogyny identification challenge (AMI18) (Fersini et al., 2018) for hate speech towards women and the hate speech detection shared task (HaSpeeDe18) for the part related to hate speech towards immigrants proposed in . Both datasets comprise 2,500 instances for training, 500 for validation, and 1,000 for testing.", |
| "cite_spans": [ |
| { |
| "start": 89, |
| "end": 111, |
| "text": "(Caselli et al., 2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 170, |
| "end": 192, |
| "text": "(Fersini et al., 2018)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State-of-the-art Corpora", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "English Ousidhoum et al. (2019) present MlMa, a multi-lingual multi-aspect hate speech analysis dataset in Arabic, English, and French. The dataset consists of tweets collected by querying languagespecific keywords. Mollas et al. (2022) propose ETHOS, a multilabel English hate speech detection dataset of Reddit posts. They employ an automatic pre-annotation process where the posts are first labeled with a machine learning classifier. Only the uncertain ones (within the [.4, .6] probability range) are manually labeled using a crowdsourcing platform. Following the authors, we binarise the values of each label (if value \u2265 0.5 \u2192 1 else value \u2192 0). The targets are identified only when the post is hateful, so we discard the non-hateful ones. Here, we map the targets national_origin and race to origin.", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 31, |
| "text": "Ousidhoum et al. (2019)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 216, |
| "end": 236, |
| "text": "Mollas et al. (2022)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "State-of-the-art Corpora", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Kennedy et al. (2020c) collected a large set of comments from different social media sources (YouTube, Twitter, and Reddit). The annotation process has been performed via a crowdsourcing platform where each comment receives four ratings. The authors further ensured that every annotator received comments across all the hate speech scale. Since the dataset is annotated with a continuous hate score, we used a threshold set to binarise the problem: if value < -1 \u2192 0 and if value > 0.5 \u2192 1. We merged origin and race classes into the origin class. Mathew et al. (2021) collected English posts from the social media platforms Twitter and Gab. Then, they used a crowdsourcing platform for annotating each post as hate, offensive, or normal speech; annotators also have to select the target communities mentioned in the posts. Labels are aggregated, and the final one is obtained through majority voting. We discard the instance when there is no majority (i.e., the three annotators have assigned a different label). Here, we binarise the targets as suggested by the authors into toxic (hatespeech or offensive) and non-toxic (normal). We also map the targets based on the grouping made in the paper (see Table 3 in (Mathew et al., 2021) ), with the only exception of Indigenous and Refugee that we assign to origin class. Kennedy et al. (2020a) presented the Gab Hate Corpus (GHC), a multi-label English corpus of posts from the social network gab.com. Comments were annotated by at least three trained annotators with the following classes: Call for Violence, Assault on Human Dignity, or Not Hateful. Following Kennedy et al. (2020b), we aggregate the first two for obtaining the hateful class. We selected only the targets used in our study (removing political) and merged nationality/regionalism and race or ethnicity classes into the origin class. Kiela et al. (2021) introduced a novel framework for dynamically creating benchmark corpora. The annotators are asked to find adversarial examples, i.e., hard examples that a target model would misclassify. The obtained dataset also provides the target group. 4 Here, we mapped their targets to ours, removing the ones not covered. ", |
| "cite_spans": [ |
| { |
| "start": 548, |
| "end": 568, |
| "text": "Mathew et al. (2021)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 1213, |
| "end": 1234, |
| "text": "(Mathew et al., 2021)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 1320, |
| "end": 1342, |
| "text": "Kennedy et al. (2020a)", |
| "ref_id": null |
| }, |
| { |
| "start": 1851, |
| "end": 1870, |
| "text": "Kiela et al. (2021)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 2111, |
| "end": 2112, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1202, |
| "end": 1209, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "State-of-the-art Corpora", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Our experimental setup illustrates three aspects: 1) the performance of the different models on a train, validation, and test setup that we construct on our data, 2) the performance on different datasets (also considering two new additional datasets that we take as out-of-domain) and 3) a qualitative evaluation section in which we use explainability methods to assess which words are contributing more to the prediction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Methodology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this paper, we tested different pretrained language models. As multi-lingual models: the XLM Roberta base and large models from (Conneau et al., 2020 ) (XLM-Base, XLM-Large), multilin-gualBERT 5 (mBERT), and a model pre-trained on multi-lingual twitter data (XLM-Twitter) (Barbieri et al., 2021) . As mono-lingual models for Italian: dbmdz/bert-base-italian-xxl-cased (ITA-Base-XXL) and dbmdz/bert-base-italian-cased (ITA-Base). 6 In addition, we used DeHateBert (Aluru et al., 2020), a fine-tuned mBERT model trained on . For the models we train, we run three different experimental frameworks: 1) mono-lingual (MONO), in which we train our models only on Italian data; 2) multi-lingual (MULTI), in which we combine the Italian and the English data for training; 3) zero shot, cross-lingual (ZERO), in which we train a model only with English data. All the models are tested on the Italian test data (Fersini et al., 2018; .", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 152, |
| "text": "(Conneau et al., 2020", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 275, |
| "end": 298, |
| "text": "(Barbieri et al., 2021)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 432, |
| "end": 433, |
| "text": "6", |
| "ref_id": null |
| }, |
| { |
| "start": 904, |
| "end": 926, |
| "text": "(Fersini et al., 2018;", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We used the splits provided by the associated shared tasks for the Italian dataset. This setup en- 2018, we isolated 500 instances from the training to be used as the validation set. For the combined English data, we isolate 20% with stratified sampling to be used as the validation set. The details of the parameters used to fine-tune the models can be found in the Appendix A. Models are trained for 5 epochs and evaluated every 50 steps, and we select the best checkpoint considering the validation loss. Table 2 shows the results only for the models that we trained by testing on the official splits of each Italian dataset (see Section 2.2). We have found two crucial takeaways. First, the best multi-lingual model (XLM-Large) performs sensibly better than the best model trained only on mono-lingual data (mBERT). Second, models subject to multi-lingual training always outperforms mono-lingual ones. Recent research (Nozza et al., 2020) has shown that language-specific datasets are more effective when used to fine-tune language-specific models; this research suggests that training only on the small set of Italian data is not enough even when using a language-specific model: joint fine-tuning with larger datasets is an effective way of obtaining more accurate hate speech classifiers. This is a very interesting result: considering the small amount of Italian data used by the multi-lingual model, this opens future applications of multi-lingual pipelines to low-resource languages. Finally, the increase in performance of the multi-lingual framework comes directly from the Italian data we added to the training since the performance of the purely zero-shot cross-lingual models is much worse than the monolingual one. ", |
| "cite_spans": [ |
| { |
| "start": 923, |
| "end": 943, |
| "text": "(Nozza et al., 2020)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 508, |
| "end": 515, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Setup", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "This section shows the results split by datasets for our multi-lingual best models and for DeHate-Bert. We show the results on the test sets of Sanguinetti et al. (2018) and AMI18 (Fersini et al., 2018) . Moreover, we also test on the complete test set of HaSpeeDe18, and the shared task re-runs HaSpeeDe20 (Sanguinetti et al., 2020) and AMI20 (Fersini et al., 2020b) . Unfortunately, DeHateBert was not fine-tuned following the guidelines described in as the authors used different splits. For this reason, we cannot evaluate the performance of this model on HaSpeeDe18 and (some examples of the examples in the test sets are used for training). Table 3 shows the results for each dataset. We do not show results for Italian models as they perform much worse (see Table 2 ). These results show that our models have consistent performance over most categories. Indeed, XLM-Twitter, beats De-HateBert by 39 and 19 points in F1 on AMI18 and AMI20 respectively. This outcome further demonstrates the need for protected group coverage in the training set.", |
| "cite_spans": [ |
| { |
| "start": 180, |
| "end": 202, |
| "text": "(Fersini et al., 2018)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 307, |
| "end": 333, |
| "text": "(Sanguinetti et al., 2020)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 344, |
| "end": 367, |
| "text": "(Fersini et al., 2020b)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 647, |
| "end": 654, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 765, |
| "end": 772, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results by Dataset", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We also use the recently introduced Multi-Lingual HateCheck (MHC) (R\u00f6ttger et al., 2022) . MHC is a suite of functional tests for multi-lingual hate speech detection models that extend the original English HateCheck (R\u00f6ttger et al., 2021) . MHC tests several functionalities that can affect hate prediction (e.g., counterspeech, spelling variations, use of slurs). Here, we used only the Italian subset. MHC should serve as an external testbed to validate our models.", |
| "cite_spans": [ |
| { |
| "start": 66, |
| "end": 88, |
| "text": "(R\u00f6ttger et al., 2022)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 216, |
| "end": 238, |
| "text": "(R\u00f6ttger et al., 2021)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results on Multi-Lingual HateCheck", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Results in Table 4 show the consistent performance of our models. XLM-Twitter and XLM-Large strongly outperform the results of the original baseline proposed by R\u00f6ttger et al. (2022) . Table 4 : Results on different MULTILINGUAL HATE-CHECK. We report F1 score the for hateful and nonhateful cases, and the overall macro-F1 score. Figure 1 reports token contribution explanations of four correct predictions from our multi-lingual XLM-Large. The texts are complex examples in Italian, as standard models usually misclassify them (Nozza, 2021) . We extracted token contributions using the interpretability suite provided in Attanasio et al. (2022b) . The first two examples regard the taboo Italian expression p*rca p*ttana (literally p*rca (pig) + p*ttana (sl*t)). When used separately (porca e puttana (pig and slut)), they should be considered literally; when used together, the two words form taboo expressions that do not have a misogynistic connotation. The latter two examples regard the ambiguous Italian term finocchi. The word means fennels in a food-related context, but can also be translated to f*ggots when refereed to individuals.", |
| "cite_spans": [ |
| { |
| "start": 161, |
| "end": 182, |
| "text": "R\u00f6ttger et al. (2022)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 528, |
| "end": 541, |
| "text": "(Nozza, 2021)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 622, |
| "end": 646, |
| "text": "Attanasio et al. (2022b)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 11, |
| "end": 18, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 185, |
| "end": 192, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 330, |
| "end": 338, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results on Multi-Lingual HateCheck", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "National evaluation campaigns and shared tasks played a significant role in releasing non-English corpora for hate speech detection (Wiegand et al., 2018; Mulki and Ghanem, 2021; Basile et al., 2019; Ptaszynski et al., 2019) . Indeed, the research of hate speech detection in Italian in mono-lingual settings mainly revolves around the datasets (Fersini et al., 2018; Sanguinetti et al., 2020; Fersini et al., 2020b) released for shared tasks (Bakarov, 2018; Cimino et al., 2018; Attanasio and Pastor, 2020; Lees et al., 2020; Lavergne et al., 2020; Fersini et al., 2020a; Attanasio et al., 2022a, inter alia) . In NLP, the scarcity of data in languages beyond English has generated an interest in zero-shot learning (Srivastava et al., 2018; Ponti et al., 2019; Pfeiffer et al., 2020; Wu et al., 2020; Bianchi et al., 2021 Bianchi et al., , 2022 and the application of this to hate speech detection methods (Corazza et al., 2020; Stappen et al., 2020; Aluru et al., 2020; Leite et al., 2020; Rodr\u00edguez et al., 2021; Feng et al., 2020; Pelicon et al., 2021) . In particular, Aluru et al. (2020) exploited several deep learning models and multi-lingual embeddings for performing an extensive analysis on 16 datasets in 9 different languages in few-and zero-shot learning settings. Rodr\u00edguez et al. (2021) use the pre-trained Language Agnostic BERT Sentence Embeddings (Feng et al., 2020) obtaining good results. Other research efforts focused on translating English data to enrich data availability in other languages with mixed results: Ibrohim and Budi (2019) shows that translations do not bring good results using traditional machine learning classifiers. However, more sophisticated pipelines of translation and pre-training can indeed provide some improvement over standard benchmarks (Pamungkas et al., 2021; Wang and Banko, 2021) .", |
| "cite_spans": [ |
| { |
| "start": 132, |
| "end": 154, |
| "text": "(Wiegand et al., 2018;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 155, |
| "end": 178, |
| "text": "Mulki and Ghanem, 2021;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 179, |
| "end": 199, |
| "text": "Basile et al., 2019;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 200, |
| "end": 224, |
| "text": "Ptaszynski et al., 2019)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 345, |
| "end": 367, |
| "text": "(Fersini et al., 2018;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 368, |
| "end": 393, |
| "text": "Sanguinetti et al., 2020;", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 394, |
| "end": 416, |
| "text": "Fersini et al., 2020b)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 443, |
| "end": 458, |
| "text": "(Bakarov, 2018;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 459, |
| "end": 479, |
| "text": "Cimino et al., 2018;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 480, |
| "end": 507, |
| "text": "Attanasio and Pastor, 2020;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 508, |
| "end": 526, |
| "text": "Lees et al., 2020;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 527, |
| "end": 549, |
| "text": "Lavergne et al., 2020;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 550, |
| "end": 572, |
| "text": "Fersini et al., 2020a;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 573, |
| "end": 609, |
| "text": "Attanasio et al., 2022a, inter alia)", |
| "ref_id": null |
| }, |
| { |
| "start": 717, |
| "end": 742, |
| "text": "(Srivastava et al., 2018;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 743, |
| "end": 762, |
| "text": "Ponti et al., 2019;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 763, |
| "end": 785, |
| "text": "Pfeiffer et al., 2020;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 786, |
| "end": 802, |
| "text": "Wu et al., 2020;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 803, |
| "end": 823, |
| "text": "Bianchi et al., 2021", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 824, |
| "end": 846, |
| "text": "Bianchi et al., , 2022", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 908, |
| "end": 930, |
| "text": "(Corazza et al., 2020;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 931, |
| "end": 952, |
| "text": "Stappen et al., 2020;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 953, |
| "end": 972, |
| "text": "Aluru et al., 2020;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 973, |
| "end": 992, |
| "text": "Leite et al., 2020;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 993, |
| "end": 1016, |
| "text": "Rodr\u00edguez et al., 2021;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 1017, |
| "end": 1035, |
| "text": "Feng et al., 2020;", |
| "ref_id": null |
| }, |
| { |
| "start": 1036, |
| "end": 1057, |
| "text": "Pelicon et al., 2021)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 1367, |
| "end": 1386, |
| "text": "(Feng et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 1779, |
| "end": 1814, |
| "text": "benchmarks (Pamungkas et al., 2021;", |
| "ref_id": null |
| }, |
| { |
| "start": 1815, |
| "end": 1836, |
| "text": "Wang and Banko, 2021)", |
| "ref_id": "BIBREF47" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "This paper presents a novel resource for Italian hate speech detection on social media text, HATE-ITA. Researchers can use this new set of models to assess the quality of new systems by providing a more reliable benchmark. However, this is just the first step. Indeed, we do not claim to have released the final model for Italian hate speech detection; HATE-ITA requires careful benchmarking to understand if it can accurately capture hate speech on other targets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 IT: Come si fa a rompere la lavatrice p*rca p*ttana", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 EN: How the hell can you break the washing machine", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 IT: Sono arrivati i finocchi", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 EN: Here come the f*ggots", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 IT: \u00c8 arrivata l'insalata di finocchi", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 EN: Here it comes the fennel salad", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://huggingface.co/MilaNLProc 2 https://github.com/MilaNLProc/ hate-ita", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://hatespeechdata.com/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/bvidgen/ Dynamically-Generated-Hate-Speech-Dataset", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This project has partially received funding from the European Research Council (ERC) under the European Union's Horizon 2020 research and innovation program (grant agreement No. 949944, INTE-GRATOR), and by Fondazione Cariplo (grant No. 2020-4288, MONICA). Debora Nozza, Federico Bianchi, and Giuseppe Attanasio are members of the MilaNLP group and the Data and Marketing Insights Unit of the Bocconi Institute for Data Science and Analysis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| }, |
| { |
| "text": "While promising, the results in this work should not be interpreted as a definitive assessment of the performance of hate speech detection in Italian. We are unsure if our model can maintain a stable and fair precision across the different targets and categories. HATE-ITA might overlook some sensible details, which practitioners should treat with care.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ethical Statement", |
| "sec_num": null |
| }, |
| { |
| "text": "All the models are trained with the same pipeline. We report the shared parameters in Table 5 . As mentioned in the table we fix the maximum number of epochs, but we evaluate the models during training and eventually select the model with the best validation loss. All the experiments are run 5 times and we take the average of the results. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 86, |
| "end": 93, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Model Training", |
| "sec_num": null |
| }, |
| { |
| "text": "We provide as literal as possible translations.\u2022 IT: Sei una p*rca e p*ttana\u2022 EN: you are pig and sl*t", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Examples Translation", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "A deep dive into multilingual hate speech classification", |
| "authors": [ |
| { |
| "first": "Binny", |
| "middle": [], |
| "last": "Sai Saketh Aluru", |
| "suffix": "" |
| }, |
| { |
| "first": "Punyajoy", |
| "middle": [], |
| "last": "Mathew", |
| "suffix": "" |
| }, |
| { |
| "first": "Animesh", |
| "middle": [], |
| "last": "Saha", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mukherjee", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Machine Learning and Knowledge Discovery in Databases. Applied Data Science and Demo Track: European Conference, ECML PKDD 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "423--439", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/978-3-030-67670-4_26" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sai Saketh Aluru, Binny Mathew, Punyajoy Saha, and Animesh Mukherjee. 2020. A deep dive into multi- lingual hate speech classification. In Machine Learn- ing and Knowledge Discovery in Databases. Applied Data Science and Demo Track: European Confer- ence, ECML PKDD 2020, Ghent, Belgium, Septem- ber 14-18, 2020, Proceedings, Part V, page 423-439, Berlin, Heidelberg. Springer-Verlag.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Entropy-based attention regularization frees unintended bias mitigation from lists", |
| "authors": [ |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Attanasio", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Baralis", |
| "suffix": "" |
| } |
| ], |
| "year": 2022, |
| "venue": "Findings of the Association for Computational Linguistics: ACL2022 (Forthcoming)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Giuseppe Attanasio, Debora Nozza, Dirk Hovy, and Elena Baralis. 2022a. Entropy-based attention regu- larization frees unintended bias mitigation from lists. In Findings of the Association for Computational Lin- guistics: ACL2022 (Forthcoming). Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Benchmarking post-hoc interpretability approaches for transformer-based misogyny detection", |
| "authors": [ |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Attanasio", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Eliana", |
| "middle": [], |
| "last": "Pastor", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2022, |
| "venue": "Proceedings of the First Workshop on Efficient Benchmarking in NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Giuseppe Attanasio, Debora Nozza, Eliana Pastor, and Dirk Hovy. 2022b. Benchmarking post-hoc inter- pretability approaches for transformer-based misog- yny detection. In Proceedings of the First Workshop on Efficient Benchmarking in NLP. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "PoliTeam @ AMI: Improving sentence embedding similarity with misogyny lexicons for automatic misogyny identificationin italian tweets", |
| "authors": [ |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Attanasio", |
| "suffix": "" |
| }, |
| { |
| "first": "Eliana", |
| "middle": [], |
| "last": "Pastor", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "EVALITA Evaluation of NLP and Speech Tools for Italian -December 17th", |
| "volume": "", |
| "issue": "", |
| "pages": "48--54", |
| "other_ids": { |
| "DOI": [ |
| "10.4000/books.aaccademia.6807" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Giuseppe Attanasio and Eliana Pastor. 2020. PoliTeam @ AMI: Improving sentence embedding similarity with misogyny lexicons for automatic misogyny iden- tificationin italian tweets. In Valerio Basile, Danilo Croce, Maria Maro, and Lucia C. Passaro, editors, EVALITA Evaluation of NLP and Speech Tools for Italian -December 17th, 2020, pages 48-54. Ac- cademia University Press.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Vector space models for automatic misogyny identification (short paper)", |
| "authors": [ |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Bakarov", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Sixth Evaluation Campaign of Natural Language Processing and Speech Tools for Italian. Final Workshop (EVALITA 2018) co-located with the Fifth Italian Conference on Computational Linguistics", |
| "volume": "2263", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amir Bakarov. 2018. Vector space models for automatic misogyny identification (short paper). In Proceed- ings of the Sixth Evaluation Campaign of Natural Language Processing and Speech Tools for Italian. Final Workshop (EVALITA 2018) co-located with the Fifth Italian Conference on Computational Linguis- tics (CLiC-it 2018), Turin, Italy, December 12-13, 2018, volume 2263 of CEUR Workshop Proceedings. CEUR-WS.org.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Xlm-t: A multilingual language model toolkit for twitter", |
| "authors": [ |
| { |
| "first": "Francesco", |
| "middle": [], |
| "last": "Barbieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Espinosa Anke", |
| "suffix": "" |
| }, |
| { |
| "first": "Jose", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2104.12250" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francesco Barbieri, Luis Espinosa Anke, and Jose Camacho-Collados. 2021. Xlm-t: A multilingual language model toolkit for twitter. arXiv preprint arXiv:2104.12250.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Semeval-2019 task 5: Multilingual detection of hate speech against immigrants and women in twitter", |
| "authors": [ |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Nozza", |
| "middle": [], |
| "last": "Debora", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco Manuel Rangel", |
| "middle": [], |
| "last": "Pardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "54--63", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Valerio Basile, Cristina Bosco, Elisabetta Fersini, Nozza Debora, Viviana Patti, Francisco Manuel Rangel Pardo, Paolo Rosso, Manuela Sanguinetti, et al. 2019. Semeval-2019 task 5: Multilingual detection of hate speech against immigrants and women in twitter. In 13th International Workshop on Semantic Evalua- tion, pages 54-63. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "XLM-EMO: Multilingual emotion prediction in social media text", |
| "authors": [ |
| { |
| "first": "Federico", |
| "middle": [], |
| "last": "Bianchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2022, |
| "venue": "Proceedings of the 12th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Federico Bianchi, Debora Nozza, and Dirk Hovy. 2022. XLM-EMO: Multilingual emotion prediction in so- cial media text. In Proceedings of the 12th Workshop on Computational Approaches to Subjectivity, Sen- timent and Social Media Analysis. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Cross-lingual contextualized topic models with zero-shot learning", |
| "authors": [ |
| { |
| "first": "Federico", |
| "middle": [], |
| "last": "Bianchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Silvia", |
| "middle": [], |
| "last": "Terragni", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume", |
| "volume": "", |
| "issue": "", |
| "pages": "1676--1683", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2021.eacl-main.143" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Federico Bianchi, Silvia Terragni, Dirk Hovy, Debora Nozza, and Elisabetta Fersini. 2021. Cross-lingual contextualized topic models with zero-shot learning. In Proceedings of the 16th Conference of the Euro- pean Chapter of the Association for Computational Linguistics: Main Volume, pages 1676-1683, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Overview of the EVALITA 2018 hate speech detection task", |
| "authors": [ |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Dell'orletta Felice", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Poletto", |
| "suffix": "" |
| }, |
| { |
| "first": "Tesconi", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Maurizio", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Sixth Evaluation Campaign of Natural Language Processing and Speech Tools for Italian. Final Workshop (EVALITA 2018)", |
| "volume": "2263", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cristina Bosco, Dell'Orletta Felice, Fabio Poletto, Manuela Sanguinetti, and Tesconi Maurizio. 2018. Overview of the EVALITA 2018 hate speech de- tection task. In Proceedings of the Sixth Eval- uation Campaign of Natural Language Process- ing and Speech Tools for Italian. Final Workshop (EVALITA 2018), volume 2263, pages 1-9, Turin, Italy. CEUR.org.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "EVALITA 2018: Overview of the 6th Evaluation Campaign of Natural Language Processing and Speech Tools for Italian", |
| "authors": [ |
| { |
| "first": "Tommaso", |
| "middle": [], |
| "last": "Caselli", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicole", |
| "middle": [], |
| "last": "Novielli", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of Sixth Evaluation Campaign of Natural Language Processing and Speech Tools for Italian. Final Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tommaso Caselli, Nicole Novielli, Viviana Patti, and Paolo Rosso. 2018. EVALITA 2018: Overview of the 6th Evaluation Campaign of Natural Language Pro- cessing and Speech Tools for Italian. In Proceedings of Sixth Evaluation Campaign of Natural Language Processing and Speech Tools for Italian. Final Work- shop (EVALITA 2018), Turin, Italy. CEUR.org.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Proceedings of the Sixth Evaluation Campaign of Natural Language Processing and Speech Tools for Italian. Final Workshop (EVALITA 2018) co-located with the Fifth Italian Conference on Computational Linguistics", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Cimino", |
| "suffix": "" |
| }, |
| { |
| "first": "Lorenzo", |
| "middle": [ |
| "De" |
| ], |
| "last": "Mattei", |
| "suffix": "" |
| }, |
| { |
| "first": "Felice", |
| "middle": [], |
| "last": "Dell'orletta", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "2263", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrea Cimino, Lorenzo De Mattei, and Felice Dell'Orletta. 2018. Multi-task learning in deep neu- ral networks at EVALITA 2018. In Proceedings of the Sixth Evaluation Campaign of Natural Lan- guage Processing and Speech Tools for Italian. Fi- nal Workshop (EVALITA 2018) co-located with the Fifth Italian Conference on Computational Linguis- tics (CLiC-it 2018), Turin, Italy, December 12-13, 2018, volume 2263 of CEUR Workshop Proceedings. CEUR-WS.org.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Unsupervised cross-lingual representation learning at scale", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kartikay", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Wenzek", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "8440--8451", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.747" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Pro- ceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 8440- 8451, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Hybrid Emoji-Based Masked Language Models for Zero-Shot Abusive Language Detection", |
| "authors": [ |
| { |
| "first": "Michele", |
| "middle": [], |
| "last": "Corazza", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefano", |
| "middle": [], |
| "last": "Menini", |
| "suffix": "" |
| }, |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Cabrio", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Tonelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Serena", |
| "middle": [], |
| "last": "Villata", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "943--949", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.findings-emnlp.84" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michele Corazza, Stefano Menini, Elena Cabrio, Sara Tonelli, and Serena Villata. 2020. Hybrid Emoji- Based Masked Language Models for Zero-Shot Abu- sive Language Detection. In Findings of the Associ- ation for Computational Linguistics: EMNLP 2020, pages 943-949, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Label propagation-based semi-supervised learning for hate speech classification", |
| "authors": [ |
| { |
| "first": "Ashwin", |
| "middle": [], |
| "last": "Geet", |
| "suffix": "" |
| }, |
| { |
| "first": "D'", |
| "middle": [], |
| "last": "Sa", |
| "suffix": "" |
| }, |
| { |
| "first": "Irina", |
| "middle": [], |
| "last": "Illina", |
| "suffix": "" |
| }, |
| { |
| "first": "Dominique", |
| "middle": [], |
| "last": "Fohr", |
| "suffix": "" |
| }, |
| { |
| "first": "Dietrich", |
| "middle": [], |
| "last": "Klakow", |
| "suffix": "" |
| }, |
| { |
| "first": "Dana", |
| "middle": [], |
| "last": "Ruiter", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the First Workshop on Insights from Negative Results in NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "54--59", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.insights-1.8" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashwin Geet D'Sa, Irina Illina, Dominique Fohr, Di- etrich Klakow, and Dana Ruiter. 2020. Label propagation-based semi-supervised learning for hate speech classification. In Proceedings of the First Workshop on Insights from Negative Results in NLP, pages 54-59, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Naveen Arivazhagan, and Wei Wang. 2020. Language-agnostic bert sentence embedding", |
| "authors": [ |
| { |
| "first": "Fangxiaoyu", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinfei", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.48550/ARXIV.2007.01852" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fangxiaoyu Feng, Yinfei Yang, Daniel Cer, Naveen Ari- vazhagan, and Wei Wang. 2020. Language-agnostic bert sentence embedding.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Profiling Italian misogynist: An empirical study", |
| "authors": [ |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Giulia", |
| "middle": [], |
| "last": "Boifava", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Workshop on Resources and Techniques for User and Author Profiling in Abusive Language", |
| "volume": "", |
| "issue": "", |
| "pages": "9--13", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elisabetta Fersini, Debora Nozza, and Giulia Boifava. 2020a. Profiling Italian misogynist: An empirical study. In Proceedings of the Workshop on Resources and Techniques for User and Author Profiling in Abu- sive Language, pages 9-13, Marseille, France. Euro- pean Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Proceedings of the 6th evaluation campaign of Natural Language Processing and Speech tools for", |
| "authors": [ |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Italian (", |
| "volume": "12", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elisabetta Fersini, Debora Nozza, and Paolo Rosso. 2018. Overview of the EVALITA 2018 task on auto- matic misogyny identification (AMI). Proceedings of the 6th evaluation campaign of Natural Language Processing and Speech tools for Italian (EVALITA 2018), 12:59.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "AMI @ EVALITA2020: Automatic misogyny identification", |
| "authors": [ |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 7th evaluation campaign of Natural Language Processing and Speech tools for Italian", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elisabetta Fersini, Debora Nozza, and Paolo Rosso. 2020b. AMI @ EVALITA2020: Automatic misog- yny identification. In Proceedings of the 7th eval- uation campaign of Natural Language Processing and Speech tools for Italian (EVALITA 2020), Online. CEUR.org.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Translated vs non-translated method for multilingual hate speech identification in twitter", |
| "authors": [ |
| { |
| "first": "Muhammad", |
| "middle": [], |
| "last": "Okky Ibrohim", |
| "suffix": "" |
| }, |
| { |
| "first": "Indra", |
| "middle": [], |
| "last": "Budi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Journal on Advanced Science, Engineering and Information Technology", |
| "volume": "9", |
| "issue": "4", |
| "pages": "1116--1123", |
| "other_ids": { |
| "DOI": [ |
| "10.18517/ijaseit.9.4.8123" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muhammad Okky Ibrohim and Indra Budi. 2019. Trans- lated vs non-translated method for multilingual hate speech identification in twitter. International Journal on Advanced Science, Engineering and Information Technology, 9(4):1116-1123.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "FERMI at SemEval-2019 task 5: Using sentence embeddings to identify hate speech against immigrants and women in Twitter", |
| "authors": [ |
| { |
| "first": "Vijayasaradhi", |
| "middle": [], |
| "last": "Indurthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Bakhtiyar", |
| "middle": [], |
| "last": "Syed", |
| "suffix": "" |
| }, |
| { |
| "first": "Manish", |
| "middle": [], |
| "last": "Shrivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Chakravartula", |
| "suffix": "" |
| }, |
| { |
| "first": "Manish", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Vasudeva", |
| "middle": [], |
| "last": "Varma", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "70--74", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/S19-2009" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vijayasaradhi Indurthi, Bakhtiyar Syed, Manish Shri- vastava, Nikhil Chakravartula, Manish Gupta, and Vasudeva Varma. 2019. FERMI at SemEval-2019 task 5: Using sentence embeddings to identify hate speech against immigrants and women in Twitter. In Proceedings of the 13th International Workshop on Semantic Evaluation, pages 70-74, Minneapo- lis, Minnesota, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Beth Meyerowitz, and Morteza Dehghani. 2020a. The Gab Hate Corpus: A Collection of 27k Posts Annotated for Hate Speech", |
| "authors": [ |
| { |
| "first": "Brendan", |
| "middle": [], |
| "last": "Kennedy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Atari", |
| "suffix": "" |
| }, |
| { |
| "first": "Aida", |
| "middle": [ |
| "Mostafazadeh" |
| ], |
| "last": "Davani", |
| "suffix": "" |
| }, |
| { |
| "first": "Leigh", |
| "middle": [], |
| "last": "Yeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Ali", |
| "middle": [], |
| "last": "Omrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Yehsong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Kris", |
| "middle": [], |
| "last": "Coombs", |
| "suffix": "" |
| }, |
| { |
| "first": "Gwenyth", |
| "middle": [], |
| "last": "Shreya Havaldar", |
| "suffix": "" |
| }, |
| { |
| "first": "Elaine", |
| "middle": [], |
| "last": "Portillo-Wightman", |
| "suffix": "" |
| }, |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Gonzalez", |
| "suffix": "" |
| }, |
| { |
| "first": "Aida", |
| "middle": [], |
| "last": "Hoover", |
| "suffix": "" |
| }, |
| { |
| "first": "*", |
| "middle": [], |
| "last": "Azatian", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabriel", |
| "middle": [], |
| "last": "Cardenas", |
| "suffix": "" |
| }, |
| { |
| "first": "*", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Alyzeh", |
| "middle": [], |
| "last": "Hussain", |
| "suffix": "" |
| }, |
| { |
| "first": "*", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Austin", |
| "middle": [], |
| "last": "Lara", |
| "suffix": "" |
| }, |
| { |
| "first": "*", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Omary", |
| "suffix": "" |
| }, |
| { |
| "first": "*", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Christina", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| }, |
| { |
| "first": "*", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Xin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "*", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Clarisa", |
| "middle": [], |
| "last": "Wijaya", |
| "suffix": "" |
| }, |
| { |
| "first": "*", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.31234/osf.io/hqjxn" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brendan Kennedy, Mohammad Atari, Aida Mostafazadeh Davani, Leigh Yeh, Ali Omrani, Yehsong Kim, Kris Coombs Jr., Shreya Havaldar, Gwenyth Portillo-Wightman, Elaine Gonzalez, Joe Hoover, Aida Azatian*, Gabriel Cardenas*, Alyzeh Hussain*, Austin Lara*, Adam Omary*, Christina Park*, Xin Wang*, Clarisa Wijaya*, Yong Zhang*, Beth Meyerowitz, and Morteza Dehghani. 2020a. The Gab Hate Corpus: A Collection of 27k Posts Annotated for Hate Speech.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Contextualizing hate speech classifiers with post-hoc explanation", |
| "authors": [ |
| { |
| "first": "Brendan", |
| "middle": [], |
| "last": "Kennedy", |
| "suffix": "" |
| }, |
| { |
| "first": "Xisen", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| }, |
| { |
| "first": "Aida", |
| "middle": [ |
| "Mostafazadeh" |
| ], |
| "last": "Davani", |
| "suffix": "" |
| }, |
| { |
| "first": "Morteza", |
| "middle": [], |
| "last": "Dehghani", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiang", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "5435--5442", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.483" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brendan Kennedy, Xisen Jin, Aida Mostafazadeh Da- vani, Morteza Dehghani, and Xiang Ren. 2020b. Contextualizing hate speech classifiers with post-hoc explanation. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 5435-5442, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Constructing interval variables via faceted rasch measurement and multitask deep learning: a hate speech application", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Chris", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoff", |
| "middle": [], |
| "last": "Kennedy", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Bacon", |
| "suffix": "" |
| }, |
| { |
| "first": "Claudia", |
| "middle": [ |
| "Von" |
| ], |
| "last": "Sahn", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vacano", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2009.10277" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris J Kennedy, Geoff Bacon, Alexander Sahn, and Claudia von Vacano. 2020c. Constructing interval variables via faceted rasch measurement and multi- task deep learning: a hate speech application. arXiv preprint arXiv:2009.10277.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Dynabench: Rethinking benchmarking in NLP", |
| "authors": [ |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Bartolo", |
| "suffix": "" |
| }, |
| { |
| "first": "Yixin", |
| "middle": [], |
| "last": "Nie", |
| "suffix": "" |
| }, |
| { |
| "first": "Divyansh", |
| "middle": [], |
| "last": "Kaushik", |
| "suffix": "" |
| }, |
| { |
| "first": "Atticus", |
| "middle": [], |
| "last": "Geiger", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengxuan", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bertie", |
| "middle": [], |
| "last": "Vidgen", |
| "suffix": "" |
| }, |
| { |
| "first": "Grusha", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanpreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Pratik", |
| "middle": [], |
| "last": "Ringshia", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyi", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Tristan", |
| "middle": [], |
| "last": "Thrush", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| }, |
| { |
| "first": "Zeerak", |
| "middle": [], |
| "last": "Waseem", |
| "suffix": "" |
| }, |
| { |
| "first": "Pontus", |
| "middle": [], |
| "last": "Stenetorp", |
| "suffix": "" |
| }, |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| }, |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "4110--4124", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2021.naacl-main.324" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douwe Kiela, Max Bartolo, Yixin Nie, Divyansh Kaushik, Atticus Geiger, Zhengxuan Wu, Bertie Vid- gen, Grusha Prasad, Amanpreet Singh, Pratik Ring- shia, Zhiyi Ma, Tristan Thrush, Sebastian Riedel, Zeerak Waseem, Pontus Stenetorp, Robin Jia, Mohit Bansal, Christopher Potts, and Adina Williams. 2021. Dynabench: Rethinking benchmarking in NLP. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 4110-4124, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Thenorth @ haspeede 2: Bertbased language model fine-tuning for italian hate speech detection (short paper)", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Lavergne", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajkumar", |
| "middle": [], |
| "last": "Saini", |
| "suffix": "" |
| }, |
| { |
| "first": "Gy\u00f6rgy", |
| "middle": [], |
| "last": "Kov\u00e1cs", |
| "suffix": "" |
| }, |
| { |
| "first": "Killian", |
| "middle": [], |
| "last": "Murphy", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Seventh Evaluation Campaign of Natural Language Processing and Speech Tools for Italian. Final Workshop", |
| "volume": "2765", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eric Lavergne, Rajkumar Saini, Gy\u00f6rgy Kov\u00e1cs, and Killian Murphy. 2020. Thenorth @ haspeede 2: Bert- based language model fine-tuning for italian hate speech detection (short paper). In Proceedings of the Seventh Evaluation Campaign of Natural Language Processing and Speech Tools for Italian. Final Work- shop (EVALITA 2020), Online event, December 17th, 2020, volume 2765 of CEUR Workshop Proceedings. CEUR-WS.org.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Jigsaw @ AMI and haspeede2: Fine-tuning a pretrained comment-domain BERT model", |
| "authors": [ |
| { |
| "first": "Alyssa", |
| "middle": [], |
| "last": "Lees", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Sorensen", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Kivlichan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Seventh Evaluation Campaign of Natural Language Processing and Speech Tools for Italian. Final Workshop (EVALITA 2020)", |
| "volume": "2765", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alyssa Lees, Jeffrey Sorensen, and Ian Kivlichan. 2020. Jigsaw @ AMI and haspeede2: Fine-tuning a pre- trained comment-domain BERT model. In Proceed- ings of the Seventh Evaluation Campaign of Natural Language Processing and Speech Tools for Italian. Final Workshop (EVALITA 2020), Online event, De- cember 17th, 2020, volume 2765 of CEUR Workshop Proceedings. CEUR-WS.org.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Toxic language detection in social media for Brazilian Portuguese: New dataset and multilingual analysis", |
| "authors": [ |
| { |
| "first": "Diego", |
| "middle": [], |
| "last": "Jo\u00e3o Augusto Leite", |
| "suffix": "" |
| }, |
| { |
| "first": "Kalina", |
| "middle": [], |
| "last": "Silva", |
| "suffix": "" |
| }, |
| { |
| "first": "Carolina", |
| "middle": [], |
| "last": "Bontcheva", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Scarton", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 1st Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 10th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "914--924", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jo\u00e3o Augusto Leite, Diego Silva, Kalina Bontcheva, and Carolina Scarton. 2020. Toxic language detec- tion in social media for Brazilian Portuguese: New dataset and multilingual analysis. In Proceedings of the 1st Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 10th International Joint Conference on Natural Lan- guage Processing, pages 914-924, Suzhou, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "A unified approach to interpreting model predictions", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Scott", |
| "suffix": "" |
| }, |
| { |
| "first": "Su-In", |
| "middle": [], |
| "last": "Lundberg", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "30", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Scott M Lundberg and Su-In Lee. 2017. A unified approach to interpreting model predictions. In Ad- vances in Neural Information Processing Systems, volume 30. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Hatexplain: A benchmark dataset for explainable hate speech detection", |
| "authors": [ |
| { |
| "first": "Binny", |
| "middle": [], |
| "last": "Mathew", |
| "suffix": "" |
| }, |
| { |
| "first": "Punyajoy", |
| "middle": [], |
| "last": "Saha", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Seid Muhie Yimam", |
| "suffix": "" |
| }, |
| { |
| "first": "Pawan", |
| "middle": [], |
| "last": "Biemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Animesh", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mukherjee", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "35", |
| "issue": "", |
| "pages": "14867--14875", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Binny Mathew, Punyajoy Saha, Seid Muhie Yimam, Chris Biemann, Pawan Goyal, and Animesh Mukher- jee. 2021. Hatexplain: A benchmark dataset for ex- plainable hate speech detection. In Proceedings of the AAAI Conference on Artificial Intelligence, vol- ume 35, pages 14867-14875.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Ethos: a multi-label hate speech detection dataset", |
| "authors": [ |
| { |
| "first": "Ioannis", |
| "middle": [], |
| "last": "Mollas", |
| "suffix": "" |
| }, |
| { |
| "first": "Zoe", |
| "middle": [], |
| "last": "Chrysopoulou", |
| "suffix": "" |
| } |
| ], |
| "year": 2022, |
| "venue": "Complex & Intelligent Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ioannis Mollas, Zoe Chrysopoulou, Stamatis Karlos, and Grigorios Tsoumakas. 2022. Ethos: a multi-label hate speech detection dataset. Complex & Intelligent Systems, pages 1-16.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Working notes of the workshop arabic misogyny identification (armi-2021)", |
| "authors": [ |
| { |
| "first": "Hala", |
| "middle": [], |
| "last": "Mulki", |
| "suffix": "" |
| }, |
| { |
| "first": "Bilal", |
| "middle": [], |
| "last": "Ghanem", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Forum for Information Retrieval Evaluation, FIRE 2021", |
| "volume": "", |
| "issue": "", |
| "pages": "7--8", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3503162.3503178" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hala Mulki and Bilal Ghanem. 2021. Working notes of the workshop arabic misogyny identification (armi- 2021). In Forum for Information Retrieval Evalu- ation, FIRE 2021, page 7-8, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Exposing the limits of zero-shot cross-lingual hate speech detection", |
| "authors": [ |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
| "volume": "2", |
| "issue": "", |
| "pages": "907--914", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2021.acl-short.114" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Debora Nozza. 2021. Exposing the limits of zero-shot cross-lingual hate speech detection. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 907-914, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "What the [MASK]? Making sense of language-specific BERT models", |
| "authors": [ |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Federico", |
| "middle": [], |
| "last": "Bianchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2003.02912" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Debora Nozza, Federico Bianchi, and Dirk Hovy. 2020. What the [MASK]? Making sense of language-specific BERT models. arXiv preprint arXiv:2003.02912.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Multilingual and multi-aspect hate speech analysis", |
| "authors": [ |
| { |
| "first": "Nedjma", |
| "middle": [], |
| "last": "Ousidhoum", |
| "suffix": "" |
| }, |
| { |
| "first": "Zizheng", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongming", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yangqiu", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Dit-Yan", |
| "middle": [], |
| "last": "Yeung", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4675--4684", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1474" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nedjma Ousidhoum, Zizheng Lin, Hongming Zhang, Yangqiu Song, and Dit-Yan Yeung. 2019. Multi- lingual and multi-aspect hate speech analysis. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 4675- 4684, Hong Kong, China. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "A joint learning approach with knowledge injection for zero-shot cross-lingual hate speech detection", |
| "authors": [ |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Endang Wahyu Pamungkas", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Information Processing & Management", |
| "volume": "58", |
| "issue": "4", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.ipm.2021.102544" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Endang Wahyu Pamungkas, Valerio Basile, and Viviana Patti. 2021. A joint learning approach with knowl- edge injection for zero-shot cross-lingual hate speech detection. Information Processing & Management, 58(4):102544.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Investigating crosslingual training for offensive language detection", |
| "authors": [ |
| { |
| "first": "Andra\u017e", |
| "middle": [], |
| "last": "Pelicon", |
| "suffix": "" |
| }, |
| { |
| "first": "Ravi", |
| "middle": [], |
| "last": "Shekhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Bla\u017e", |
| "middle": [], |
| "last": "\u0160krlj", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Purver", |
| "suffix": "" |
| }, |
| { |
| "first": "Senja", |
| "middle": [], |
| "last": "Pollak", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "PeerJ Computer Science", |
| "volume": "7", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.7717/peerj-cs.559" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andra\u017e Pelicon, Ravi Shekhar, Bla\u017e \u0160krlj, Matthew Purver, and Senja Pollak. 2021. Investigating cross- lingual training for offensive language detection. PeerJ Computer Science, 7:e559. Publisher: PeerJ Inc.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "MAD-X: An Adapter-Based Framework for Multi-Task Cross-Lingual Transfer", |
| "authors": [ |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Pfeiffer", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "7654--7673", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.617" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonas Pfeiffer, Ivan Vuli\u0107, Iryna Gurevych, and Se- bastian Ruder. 2020. MAD-X: An Adapter-Based Framework for Multi-Task Cross-Lingual Transfer. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 7654-7673, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Towards zeroshot language modeling", |
| "authors": [ |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Edoardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Ponti", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "2900--2910", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1288" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edoardo Maria Ponti, Ivan Vuli\u0107, Ryan Cotterell, Roi Reichart, and Anna Korhonen. 2019. Towards zero- shot language modeling. In Proceedings of the 2019 Conference on Empirical Methods in Natu- ral Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2900-2910, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Results of the PolEval 2019 shared task 6: First dataset and open shared task for automatic cyberbullying detection in Polish Twitter", |
| "authors": [ |
| { |
| "first": "Michal", |
| "middle": [], |
| "last": "Ptaszynski", |
| "suffix": "" |
| }, |
| { |
| "first": "Agata", |
| "middle": [], |
| "last": "Pieciukiewicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Pawe\u0142", |
| "middle": [], |
| "last": "Dyba\u0142a", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the PolEval 2019 Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michal Ptaszynski, Agata Pieciukiewicz, and Pawe\u0142 Dy- ba\u0142a. 2019. Results of the PolEval 2019 shared task 6: First dataset and open shared task for automatic cy- berbullying detection in Polish Twitter. Proceedings of the PolEval 2019 Workshop, page 89.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Detecting Hate Speech in Cross-Lingual and Multi-lingual Settings Using Language Agnostic Representations", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Sebasti\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "H\u00e9ctor", |
| "middle": [], |
| "last": "Rodr\u00edguez", |
| "suffix": "" |
| }, |
| { |
| "first": "H\u00e9ctor", |
| "middle": [], |
| "last": "Allende-Cid", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Allende", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Progress in Pattern Recognition, Image Analysis, Computer Vision, and Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "77--87", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/978-3-030-93420-0_8" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebasti\u00e1n E. Rodr\u00edguez, H\u00e9ctor Allende-Cid, and H\u00e9c- tor Allende. 2021. Detecting Hate Speech in Cross- Lingual and Multi-lingual Settings Using Language Agnostic Representations. In Progress in Pattern Recognition, Image Analysis, Computer Vision, and Applications, pages 77-87, Cham. Springer Interna- tional Publishing.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Multilingual Hate-Check: Functional tests for multilingual hate speech detection models", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "R\u00f6ttger", |
| "suffix": "" |
| }, |
| { |
| "first": "Haitham", |
| "middle": [], |
| "last": "Seelawi", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Zeerak", |
| "middle": [], |
| "last": "Talat", |
| "suffix": "" |
| }, |
| { |
| "first": "Bertie", |
| "middle": [], |
| "last": "Vidgen", |
| "suffix": "" |
| } |
| ], |
| "year": 2022, |
| "venue": "Proceedings of the 6th Workshop on Online Abuse and Harms (WOAH 2022)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul R\u00f6ttger, Haitham Seelawi, Debora Nozza, Zeerak Talat, and Bertie Vidgen. 2022. Multilingual Hate- Check: Functional tests for multilingual hate speech detection models. In Proceedings of the 6th Work- shop on Online Abuse and Harms (WOAH 2022). Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "HateCheck: Functional tests for hate speech detection models", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "R\u00f6ttger", |
| "suffix": "" |
| }, |
| { |
| "first": "Bertie", |
| "middle": [], |
| "last": "Vidgen", |
| "suffix": "" |
| }, |
| { |
| "first": "Dong", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zeerak", |
| "middle": [], |
| "last": "Waseem", |
| "suffix": "" |
| }, |
| { |
| "first": "Helen", |
| "middle": [], |
| "last": "Margetts", |
| "suffix": "" |
| }, |
| { |
| "first": "Janet", |
| "middle": [], |
| "last": "Pierrehumbert", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "41--58", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2021.acl-long.4" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul R\u00f6ttger, Bertie Vidgen, Dong Nguyen, Zeerak Waseem, Helen Margetts, and Janet Pierrehumbert. 2021. HateCheck: Functional tests for hate speech detection models. In Proceedings of the 59th An- nual Meeting of the Association for Computational Linguistics and the 11th International Joint Confer- ence on Natural Language Processing (Volume 1: Long Papers), pages 41-58, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Haspeede 2 @ EVALITA2020: overview of the EVALITA 2020 hate speech detection task", |
| "authors": [ |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| }, |
| { |
| "first": "Gloria", |
| "middle": [], |
| "last": "Comandini", |
| "suffix": "" |
| }, |
| { |
| "first": "Elisa", |
| "middle": [ |
| "Di" |
| ], |
| "last": "Nuovo", |
| "suffix": "" |
| }, |
| { |
| "first": "Simona", |
| "middle": [], |
| "last": "Frenda", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Stranisci", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommaso", |
| "middle": [], |
| "last": "Caselli", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| }, |
| { |
| "first": "Irene", |
| "middle": [], |
| "last": "Russo", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Seventh Evaluation Campaign of Natural Language Processing and Speech Tools for Italian. Final Workshop (EVALITA 2020)", |
| "volume": "2765", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manuela Sanguinetti, Gloria Comandini, Elisa Di Nuovo, Simona Frenda, Marco Stranisci, Cristina Bosco, Tommaso Caselli, Viviana Patti, and Irene Russo. 2020. Haspeede 2 @ EVALITA2020: overview of the EVALITA 2020 hate speech detection task. In Proceedings of the Seventh Evaluation Cam- paign of Natural Language Processing and Speech Tools for Italian. Final Workshop (EVALITA 2020), Online event, December 17th, 2020, volume 2765 of CEUR Workshop Proceedings. CEUR-WS.org.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "An Italian Twitter corpus of hate speech against immigrants", |
| "authors": [ |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Poletto", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Stranisci", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manuela Sanguinetti, Fabio Poletto, Cristina Bosco, Vi- viana Patti, and Marco Stranisci. 2018. An Italian Twitter corpus of hate speech against immigrants. In Proceedings of the Eleventh International Confer- ence on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Re- sources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Zero-shot learning of classifiers from natural language quantification", |
| "authors": [ |
| { |
| "first": "Shashank", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Igor", |
| "middle": [], |
| "last": "Labutov", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "306--316", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1029" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shashank Srivastava, Igor Labutov, and Tom Mitchell. 2018. Zero-shot learning of classifiers from natural language quantification. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 306-316, Melbourne, Australia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Cross-lingual zero-and few-shot hate speech detection utilising frozen transformer language models and AXEL. CoRR, abs", |
| "authors": [ |
| { |
| "first": "Lukas", |
| "middle": [], |
| "last": "Stappen", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabian", |
| "middle": [], |
| "last": "Brunn", |
| "suffix": "" |
| }, |
| { |
| "first": "Bj\u00f6rn", |
| "middle": [ |
| "W" |
| ], |
| "last": "Schuller", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lukas Stappen, Fabian Brunn, and Bj\u00f6rn W. Schuller. 2020. Cross-lingual zero-and few-shot hate speech detection utilising frozen transformer language mod- els and AXEL. CoRR, abs/2004.13850.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Practical transformer-based multilingual text classification", |
| "authors": [ |
| { |
| "first": "Cindy", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michele", |
| "middle": [], |
| "last": "Banko", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Industry Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "121--129", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2021.naacl-industry.16" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cindy Wang and Michele Banko. 2021. Practical transformer-based multilingual text classification. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies: Industry Papers, pages 121-129, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Overview of the GermEval 2018 Shared Task on the Identification of Offensive Language", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Wiegand", |
| "suffix": "" |
| }, |
| { |
| "first": "Melanie", |
| "middle": [], |
| "last": "Siegel", |
| "suffix": "" |
| }, |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Ruppenhofer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of GermEval 2018, 14th Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Wiegand, Melanie Siegel, and Josef Ruppen- hofer. 2018. Overview of the GermEval 2018 Shared Task on the Identification of Offensive Language. In Proceedings of GermEval 2018, 14th Conference on Natural Language Processing (KONVENS 2018).", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Scalable zeroshot entity linking with dense entity retrieval", |
| "authors": [ |
| { |
| "first": "Ledell", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Petroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Josifoski", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "6397--6407", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.519" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ledell Wu, Fabio Petroni, Martin Josifoski, Sebastian Riedel, and Luke Zettlemoyer. 2020. Scalable zero- shot entity linking with dense entity retrieval. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6397-6407, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Examples of predictions with SHAP (Lundberg and Lee, 2017) contributions on a color scale; color scale: blue (not-hate), red (hate). Translation available in Appendix B." |
| }, |
| "TABREF0": { |
| "content": "<table><tr><td/><td colspan=\"2\">Hate Non-hate</td><td>Total</td></tr><tr><td>Disability</td><td>3,128</td><td>1,488</td><td>4,526</td></tr><tr><td>Gender</td><td>22,655</td><td>24,182</td><td>46,829</td></tr><tr><td>Origin</td><td>44,047</td><td>31,211</td><td>75,327</td></tr><tr><td>Religion</td><td>17,010</td><td>10,840</td><td>27,864</td></tr><tr><td>Sex. Orientation</td><td>9,980</td><td>12,312</td><td>22,313</td></tr><tr><td>Total</td><td>97,014</td><td colspan=\"2\">80,729 177,749</td></tr></table>", |
| "num": null, |
| "text": "shows the size of the dataset created by combining all the afore-mentioned English corpora.", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF1": { |
| "content": "<table/>", |
| "num": null, |
| "text": "Statistics of the English dataset.", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF3": { |
| "content": "<table><tr><td>: Macro-F1 results. The most frequent class classifier has a Macro-F1 of 36.85.</td></tr><tr><td>sures performance comparability. For Sanguinetti</td></tr><tr><td>et al.</td></tr></table>", |
| "num": null, |
| "text": "", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF5": { |
| "content": "<table/>", |
| "num": null, |
| "text": "Results on different benchmark datasets for the multi-lingual models.", |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |