| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T05:10:34.368318Z" |
| }, |
| "title": "Fine-Grained Fairness Analysis of Abusive Language Detection Systems with CheckList", |
| "authors": [ |
| { |
| "first": "Marta", |
| "middle": [ |
| "Marchiori" |
| ], |
| "last": "Manerba", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Pisa", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Tonelli", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "satonelli@fbk.eu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Current abusive language detection systems have demonstrated unintended bias towards sensitive features such as nationality or gender. This is a crucial issue, which may harm minorities and underrepresented groups if such systems were integrated in real-world applications. In this paper, we create ad hoc tests through the CheckList tool (Ribeiro et al., 2020) to detect biases within abusive language classifiers for English. We compare the behaviour of two BERT-based models, one trained on a generic abusive language dataset and the other on a dataset for misogyny detection. Our evaluation shows that, although BERT-based classifiers achieve high accuracy levels on a variety of natural language processing tasks, they perform very poorly as regards fairness and bias, in particular on samples involving implicit stereotypes, expressions of hate towards minorities and protected attributes such as race or sexual orientation. We release both the notebooks implemented to extend the Fairness tests and the synthetic datasets usable to evaluate systems bias independently of CheckList.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Current abusive language detection systems have demonstrated unintended bias towards sensitive features such as nationality or gender. This is a crucial issue, which may harm minorities and underrepresented groups if such systems were integrated in real-world applications. In this paper, we create ad hoc tests through the CheckList tool (Ribeiro et al., 2020) to detect biases within abusive language classifiers for English. We compare the behaviour of two BERT-based models, one trained on a generic abusive language dataset and the other on a dataset for misogyny detection. Our evaluation shows that, although BERT-based classifiers achieve high accuracy levels on a variety of natural language processing tasks, they perform very poorly as regards fairness and bias, in particular on samples involving implicit stereotypes, expressions of hate towards minorities and protected attributes such as race or sexual orientation. We release both the notebooks implemented to extend the Fairness tests and the synthetic datasets usable to evaluate systems bias independently of CheckList.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "At every stage of a supervised learning process, biases can arise and be introduced in the pipeline, ultimately leading to harm (Suresh and Guttag, 2020; Dixon et al., 2018) . When it comes to systems whose goal is to automatically detect abusive language, this issue becomes particularly serious, since unintended bias towards sensitive attributes such as gender, sexual orientation or nationality can harm underrepresented groups. Sap et al. (2019) , for example, show that annotators tend to label messages in Afro-American English more frequently than when annotating other messages, which could lead to the training of a system reproducing the same kind of bias.", |
| "cite_spans": [ |
| { |
| "start": 128, |
| "end": 153, |
| "text": "(Suresh and Guttag, 2020;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 154, |
| "end": 173, |
| "text": "Dixon et al., 2018)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 433, |
| "end": 450, |
| "text": "Sap et al. (2019)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The role of the datasets used to train these models is crucial: as pointed out by (Wiegand et al., 2019a) , there may be multiple reasons why a dataset is biased, e.g. due to skewed sampling strategies, prevalence of a specific subject (topic bias) or of content written by a specific author (author bias). Mitigation strategies may involve assessing which terms are frequent in the presence of certain labels and implementing techniques to balance the data by including neutral samples containing those same terms to prevent the model from learning inaccurate correlations (Wiegand et al., 2019a) . Furthermore, it is important to distinguish between different types of hatred, depending on the target group addressed: for example, misogynistic expressions show different linguistic peculiarities than racist ones. It is therefore crucial to create specialised datasets addressing different phenomena of abusive language, so that systems can be tuned to the complex and nuanced scenario of online speech.", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 105, |
| "text": "(Wiegand et al., 2019a)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 574, |
| "end": 597, |
| "text": "(Wiegand et al., 2019a)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Given the sensitive context in which abusive language detection systems are deployed, a robust value-oriented evaluation of the model's fairness is necessary, in order to assess unintended biases and avoid, as far as possible, explicit harm or the amplification of pre-existing social biases. However, this bias-assessment process is complicated by the partial effectiveness of proposed methods that only work with certain definitions of bias and fairness, as well as by the limited availability of recognised benchmark datasets (Ntoutsi et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 529, |
| "end": 551, |
| "text": "(Ntoutsi et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Concerning the different definitions of fairness, they have been collected and organised both in (Suresh and Guttag, 2020) and (Mehrabi et al., 2019) , with the awareness that a single definition is not sufficient to address the multi-faceted problem of fairness in its entirety. In this work, we adopt a definition for fairness that is strongly contextual to abusive language detection. We define unfairness as the sensitivity of an abusive language detection classifier with respect to the presence in the record to be classified of entities belonging to protected groups or minorities. Specifically, a classifier is considered unfair or biased if the prediction changes according to the identities present, i.e. in similar sentences, the degree of hate is increased if terms such as white or straight are replaced by adjectives such as black or non-binary, revealing imbalances, possibly resulting from skewed and unrepresentative training data. Fairness, on the other hand, is defined as the behaviour of producing similar predictions for similar protected mentions, i.e. regardless of the specific value assumed by sensitive attributes like race and gender, without disadvantaging minorities or amplifying pre-existing social prejudices.", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 122, |
| "text": "(Suresh and Guttag, 2020)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 127, |
| "end": 149, |
| "text": "(Mehrabi et al., 2019)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We deploy the CheckList tool (Ribeiro et al., 2020), which was originally created to evaluate general linguistic capabilities of NLP models, extending it to test fairness of abusive language detection systems. Embracing CheckList systematic framework, we create tests from hand-coded templates, reproducing stereotyped opinions and social biases, such as sexism and racism. The aim is to assess the performances of these models identifying the most frequent errors and detecting a range of unintended biases towards sensitive categories and topics. This last objective is motivated by evidence that NLP systems tend, in certain contexts, to rely for the classification on identity terms and sensitive attributes, as well as to generalize misleading correlations learnt from training datasets. As ultimate goal, the analysis of the failures could therefore lead to a general overview of the models' fairness: the ideal outcome would be to establish a proactive pipeline that allows the improvement of the systems, having highlighted the shortages through CheckList ad hoc synthetic testing. To the best of our knowledge, there has not yet been any work carried out with CheckList in this research direction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Several tools and approaches have been proposed to identify the most frequent errors done by NLP tools. For example, Errudite (Wu et al., 2019 ) is a tool that allows interactive error analysis through counterfactuals generation, but it is limited to the tasks of Question Answering and Visual Question Answering.", |
| "cite_spans": [ |
| { |
| "start": 126, |
| "end": 142, |
| "text": "(Wu et al., 2019", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "TextAttack (Morris et al., 2020) -which, among other packages, deploys CheckList -is a modelagnostic framework useful for the expansion of the datasets and the increase of models robustness through adversarial attacks. Compared to Check-List, however, it is more complicated to handle and deploy for users with little NLP skills. An interesting aspect is that TextAttack includes in the package the so-called \"recipes\", i.e. attacks from the literature ready to run, that build a common ground for the assessment and comparison of models' performances.", |
| "cite_spans": [ |
| { |
| "start": 11, |
| "end": 32, |
| "text": "(Morris et al., 2020)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As outlined in (Ribeiro et al., 2020) , some methods to identify errors by NLP systems are taskspecific, such as or (Belinkov and Bisk, 2018) , while others focus on particular NLP components such as word embeddings, as in (Tsvetkov et al., 2016) or (Rogers et al., 2018) . Compared to existing approaches, one of Check-List's major strengths lies in including the testing phase within a comprehensive framework. The evaluation, conducted through adaptable templates and a range of relevant linguistic capabilities, is on one hand more granular than overall measures such as accuracy; on the other hand it is more versatile, because it leaves liberty to the developer to enrich and expand the tests within new and more suitable capabilities, depending on the task and model under consideration.", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 37, |
| "text": "(Ribeiro et al., 2020)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 116, |
| "end": 141, |
| "text": "(Belinkov and Bisk, 2018)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 223, |
| "end": 246, |
| "text": "(Tsvetkov et al., 2016)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 250, |
| "end": 271, |
| "text": "(Rogers et al., 2018)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "On the topic of fairness and biases, (Kiritchenko et al., 2020) conduct an in-depth discussion on NLP works dealing with ethical issues and challenges in automatic abusive language detection. Among others, a perspective analyzed is the principle of fairness and non-discrimination throughout every stage of supervised machine learning processes. A recent survey by (Blodgett et al., 2020) also analyzes and criticizes the formalization of bias within NLP systems, revealing inconsistency, lack of normativity and common rationale in several works. Furthermore, the visibility reached by corporate tools, such as IBM AI Fairness 360 or Amazon SageMaker Clarify, which are designed and promoted by large IT companies, raises several questions: is self-regulation right? What would be the advantages and risks of conducting independent external auditing? Several metrics 1 , generic tools and python packages 2 are available. Nevertheless, no consensus related to the above questions has been reached yet among the involved players.", |
| "cite_spans": [ |
| { |
| "start": 37, |
| "end": 63, |
| "text": "(Kiritchenko et al., 2020)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Concerning existing datasets specifically designed to assess biases within Machine Learning models, (Mehrabi et al., 2019) list several of the widely used ones, which differ according to size, type of records (numerical, images, texts) and tackled domain (e.g. financial, facial recognition, etc.). The only language dataset cited is WiNo-Bias, (Zhao et al., 2018) 3 also used in this work as a lexical resource, which pertains to the field of co-reference resolution. Our contribution instead aims to broaden fairness evaluation, specifically testing biases in abusive language detection systems through CheckList facilities.", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 122, |
| "text": "(Mehrabi et al., 2019)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 345, |
| "end": 364, |
| "text": "(Zhao et al., 2018)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Concerning abusive language detection, a number of approaches has been proposed to perform both coarse-grained (i.e. binary) and fine-grained classification. 87 systems participated in the last Offenseval competition for English (Zampieri et al., 2020) , which included a binary task on offensive language identification, one on offensive language categorization and another on target identification. As reported by the organisers, the majority of teams used some kind of pre-trained embeddings such as contextualized Transformers (Vaswani et al., 2017) and ELMo (Peters et al., 2018) embeddings. The most popular Transformers were BERT (Devlin et al., 2019) and RoBERTa (Liu et al., 2019b) , which showed to achieve state-of-the-art results for English, especially when used in ensemble configurations. For this reason, we use BERT also in the experiments presented in the following sections.", |
| "cite_spans": [ |
| { |
| "start": 229, |
| "end": 252, |
| "text": "(Zampieri et al., 2020)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 531, |
| "end": 553, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 563, |
| "end": 584, |
| "text": "(Peters et al., 2018)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 637, |
| "end": 658, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 671, |
| "end": 690, |
| "text": "(Liu et al., 2019b)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Usually, the generalization capability of NLP models is evaluated based on the performance obtained on a held-out dataset, by measuring F1 or accuracy. This process, although widely adopted by the NLP community as a way to compare systems performances and approaches, lacks informativeness since it does not provide insights into how to improve the models through the analysis of errors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction to CheckList", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In order to tackle this issue, CheckList (Ribeiro et al., 2020) was developed as a comprehensive task-agnostic framework, inspired by behavioral testing, in order to encourage more robust checking and to facilitate the assessment of models' general linguistic capabilities. The package allows the generation of data through the construction of different ad hoc tests by generalizations from templates and lexicons, general-purpose perturbations, tests expectations on the labels and context-aware suggestions using RoBERTa fill-ins (Liu et al., 2019b) as prompter for specific masked tokens. The tests created can be saved, shared and utilized for different systems.", |
| "cite_spans": [ |
| { |
| "start": 532, |
| "end": 551, |
| "text": "(Liu et al., 2019b)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction to CheckList", |
| "sec_num": "3" |
| }, |
| { |
| "text": "CheckList includes three test types and a number of linguistic capabilities to be tested. The three types of tests are:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction to CheckList", |
| "sec_num": "3" |
| }, |
| { |
| "text": "1. Minimum Functionality Test (MFT): the basic type of test, involving the standard classification of records with the corresponding labels. Each group of MFTs is designed to prove and explore how the model handles specific challenges related to a language capability, e.g. vocabulary, negation, etc.;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction to CheckList", |
| "sec_num": "3" |
| }, |
| { |
| "text": "2. Invariance Test (INV): verifies that model predictions do not change significantly with respect to a record and its variants, generated by altering the original sentence through the replacement of specific terms with similar expressions;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction to CheckList", |
| "sec_num": "3" |
| }, |
| { |
| "text": "3. Directional Expectation Test (DIR): verifies that model predictions change as a result of the record perturbation, i.e. the score should raise or fall according to the modification applied.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction to CheckList", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Concerning linguistic capabilities, CheckList covers a number of aspects that are usually relevant when evaluating NLP systems, such as robustness, named entity recognition, temporal awareness of the models and negation. While we also evaluated these aspects, our main focus here is models Fairness, which verifies that systems predictions do not change as a function of protected features. While the Fairness capability already proposed in Check-List involved the perturbation of sensitive attributes, namely expressions referring to gender, sexual orientation, nationality or religion, we first extend it by adding \"professions\" as protected attribute in order to assess whether predictions change if a male or a female assumes a specific job role. We then enrich the capability designing hand-coded templates, belonging to the MFT test type, resulting from the exploration of representative constructions and stereotypes annotated in the Social Bias Inference Corpus (Sap et al., 2020) . The resulting samples exemplify several sexist, racist and ableist comments and opinions: all of them are new aspects compared to the suites released by the authors (Ribeiro et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 970, |
| "end": 988, |
| "text": "(Sap et al., 2020)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1156, |
| "end": 1178, |
| "text": "(Ribeiro et al., 2020)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction to CheckList", |
| "sec_num": "3" |
| }, |
| { |
| "text": "As described in the introduction, CheckList provides built-in tools to assist users in the creation of tests. Among others, WordNet allows the selection of synonyms, antonyms, hypernyms, etc. for a given expression. CheckList's templates take shape from these sets of semantically related words. We develop a further extension of the tool by integrating SentiWordNet (Baccianella et al., 2010) , a lexical resource in which WordNet synsets have been associated with a sentiment score (negative, neutral or positive). In this way, CheckList can benefit from the sentiment-dimension of SentiWord-Net. Indeed, during the development of templates and the perturbations of the records, SentiWordNet enables the selection of suitable linguistic substitutions for a given term, according to the label of the sentence to be created. An example: seeking a synonym that has a similar connotation as the adjective happy for the phrase \"The girl is happy\", the results returned include glad, with a positive denotations of 0.5. In this case, through SentiWord-Net, it is possible to select a synonym term with a similar polarity, in order to create variants of the original sentence that preserve a similar semantic content and to assess how the model behaves with slightly different terms.", |
| "cite_spans": [ |
| { |
| "start": 367, |
| "end": 393, |
| "text": "(Baccianella et al., 2010)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction to CheckList", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Suites are objects designed by CheckList authors (Ribeiro et al., 2020) that enable users to organise, combine and save sets of tests, in order to reuse them several times and to aggregate results (i.e. failure rates) in a single run. Once a test is designed, it is added to the suite, specifying the test type (MFT, INV or DIR), a name, the language capability within which it is situated and a brief description. The suite will thus be composed of one or more capabilities, each of which is assessed through several tests. After the suite is created, it can be run to evaluate the output of a given classifier, provided that the system has been previously launched to label the records created for each test providing for each record a class and the respective probabilities. The results of the run of the suite are displayed through a visual and interactive summary, which reports misclassified samples and the various failure percentages obtained in each test (see Fig. 1 for an example). The core of our work takes off from the notebooks released by CheckList authors (Ribeiro et al., 2020) , specifically from the suite for the task of Sentiment Analysis 4 , that builds a series of tests consisting in tweets about airline companies. In order to target a different task, which relies on binary decisions, we modify all the templates adjusting them for the task of abusive language detection. Our main contribution is the extension of the Fairness capability, which we enrich with several tests addressing diverse abuse targets and dealing with different types of biases.", |
| "cite_spans": [ |
| { |
| "start": 1073, |
| "end": 1095, |
| "text": "(Ribeiro et al., 2020)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 969, |
| "end": 975, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Suite for Abusive Language Detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The tests developed for analysing Fairness deal with social biases towards specific different targets and topics, such as sexism, racism and ableism. With respect to the three test types available within CheckList, we have only utilized MFT and INV, because the third type, DIR, tests for a change in prediction as a result of sentence modifications, whereas for the definition of Fairness we adopt we intend to assess the opposite, i.e. that the prediction does not change as a function of perturbations of protected attributes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The Fairness tests developed are grouped into categories which we have previously identified through the manual inspection of representative constructions and stereotypes annotated in the Social Bias Inference Corpus (Sap et al., 2020) . The samples chosen are mainly abusive, and the assigned labels are the same as the examples from which we have generalised within the dataset. The groups of biases are not exhaustive, but they are still representative, as resulting also by exploring the most frequently occurring abuse targets in datasets for abusive language detection systems such as (Waseem and Hovy, 2016) , (Golbeck et al., 2017) , (Founta et al., 2018) and (Basile et al., 2019) . We briefly describe the tests designed, grouping them by target and reporting between brackets the test type of each:", |
| "cite_spans": [ |
| { |
| "start": 217, |
| "end": 235, |
| "text": "(Sap et al., 2020)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 591, |
| "end": 614, |
| "text": "(Waseem and Hovy, 2016)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 617, |
| "end": 639, |
| "text": "(Golbeck et al., 2017)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 642, |
| "end": 663, |
| "text": "(Founta et al., 2018)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 668, |
| "end": 689, |
| "text": "(Basile et al., 2019)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Misogyny, gender and sexual orientation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Perturbing gender and sexual orientation (INV): changing mentions of the protected attributes with similar values, i.e. replacing \"gay\" with \"non-binary\";", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Stereotyped female vs male work roles and Stereotyped male vs. female work roles (INV): perturbing stereotyped professions connected to the \"unconventional\" gender 5 , i.e. \"Jane is a driver\" and \"John is a secretary\";", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Unintended bias in misogyny detection (MFT): extracting random records from the dataset of , testing unintended biases within automatic misogyny identification;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Gender stereotypes (MFT): simulating hateful sexist comments and opinions;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Body image stereotypes (MFT): reporting common biases on body image;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Toxic masculinity stereotypes (MFT): reporting common biases on toxic masculinity;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Neutral statements feminism-related (MFT): generating neutral statements where an individual is identified as feminist, i.e. \"Jane is feminist\" or \"John is feminist\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Race, nationality and religion:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Perturbing race (INV): changing mentions of the protected attributes with similar values, i.e. replacing \"white\" with \"black\";", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Perturbing nationality (INV): changing mentions of the protected attributes with similar values, i.e. replacing \"English\" with \"Italian\";", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Perturbing religion (INV): changing mentions of the protected attributes with similar values, i.e. replacing \"christian\" with \"jew\";", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Racial stereotypes (MFT): simulating hateful racist comments and opinions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness tests", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Ableist stereotypes (MFT): simulating hateful ableist comments and opinions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Disability:", |
| "sec_num": null |
| }, |
| { |
| "text": "The hand-coded templates about body image and toxic masculinity, belonging to the MFT test type, do not always use explicitly hateful terms: occasionally prejudices are expressed in a more subtle way that models are often not able to handle. Sample records for the body image 5 The list used to identify the \"swapped\" professions is https://github.com/uclanlp/corefBias/ tree/master/WinoBias/wino. topic are \"Janet is blonde, that's why she is not smart\" and \"Never let a [offensive woman expression] with no make-up on tell you what to [verb]\". Examples of toxic masculinity are found in \"Harry, you're not a real man if you don't have muscles\" and \"John, you're not a real man if you don't have a job\". Regarding examples of insults toward a specific nationality or religion, the examples crafted consist in sentences like \"Nobody really like [nationality] The INVariance test type involves assessing whether changing the value of sensitive attributes causes also a change in the label predicted or in the probabilities (i.e. without reason, revealing biases). Race, gender, sexual orientation, religion and nationality are examples of sensitive features. From simple templates like \"[name] is [nationality]\" are generated records as \"Jane is Chinese\", \"Jane is Egyptian\" and similar (for each test, it is possible to specify the exact number of instances to produce). Using instead the keys gender and sexual orientation will result in \"Jane is asexual\", \"Jane is queer\" and more. Applying this same procedure for the other sensitive keys will result in simplified (because the sentences are very similar) but very targeted synthetic data (see Section 4.2). The tests involving the perturbation of race, nationality, religion, gender and sexual orientation are those developed by CheckList's authors; we extend them by adding \"professions\" as protected attribute, in order to assess whether predictions change if a male or a female assumes a specific job role.", |
| "cite_spans": [ |
| { |
| "start": 276, |
| "end": 277, |
| "text": "5", |
| "ref_id": null |
| }, |
| { |
| "start": 845, |
| "end": 858, |
| "text": "[nationality]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Disability:", |
| "sec_num": null |
| }, |
| { |
| "text": "After constructing the tests 6 , we export the records created through the templates to make them available and usable independently of CheckList framework: in fact, this additional step, i.e. creating datasets, is separate from the standard CheckList process, which instead requires the creation of data within the tests, framed in the capabilities and executed during the suite run. Specifically, we export the test records together with their corresponding labels, when applicable. In fact, only the MFT test type features a precise label, whereas the other two types (INV and DIR) involve an expectation of whether or not the probabilities will change and therefore cannot be conceptually formalised in a dataset, where labels are required.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic datasets generation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The exported data results in the creation of three synthetic datasets covering different types of bias grouped by target (listed in 4.1), namely sexism, racism and ableism. The reason for distinguishing the records by abuse targets is due to the need for specialised datasets addressing different phenomena of abusive language with a fine-grained approach. The resulting data do not contain samples from datasets under license: the contents we release are therefore freely available 7 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic datasets generation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Briefly, the first dataset on sexism contains 1,200 non-hateful and 4,423 hateful samples; the second one on racism contains 400 non-hateful and 1,500 hateful records; the last one on ableism contains 220 hateful sentences. The label distribution is radically different from traditional abusive language datasets, where the prevalent class is non-hateful. This choice is motivated by the fact that we want to mainly focus on the phenomena surrounding social prejudices providing realistic and diverse examples, with the aim of exploring in depth the language used to convey biases. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic datasets generation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We run our evaluation using a standard BERT-based classifier for English, a language representation model developed by Google Research (Devlin et al., 2019) , whose deep learning architecture obtained state-of-the-art results in several natural language processing tasks including sentiment analysis, natural language inference, textual entailment (Devlin et al., 2019) and hate speech detection (Liu et al., 2019a) . BERT can be fine-tuned and adapted to specific tasks by adding just one additional output layer to the neural network. We use this approach because language models like BERT, or variants like ALBERT and RoBERTa (Wiedemann et al., 2020) , have been used by the vast majority of participants in the last Offenseval campaign (Zampieri et al., 2020) , yielding a very good performance on English (> 0.90 F1). For our experiments, we use the base model of BERT for English 8 , trained on 3.3 billion words, which is made available on the project website (https:// github.com/google-research/bert). We train two different classifiers in order to compare their behaviour w.r.t. biases. The first one is for generic abusive language detection, and is obtained by finetuning BERT on the (Founta et al., 2018) corpus. This dataset includes around 100K tweets annotated with four labels: hateful, abusive, spam or none. Differently from the other datasets, this was not created starting from a set of predefined offensive terms or hashtags to reduce bias, which is a main issue in abusive language datasets (Wiegand et al., 2019a) . This should make this dataset more challenging for classification. For our experiments, we removed the spam class, and we mapped both hateful and abusive tweets to the abusive class, based on the assumption that hateful messages are the most serious form of abusive language and that the term 'abusive' is more appropriate to cover the cases of interest for our study (Caselli et al., 2020 ). The second model is trained with the AMI 2018 dataset (Fersini et al., 2018) , which contains 4,000 tweets manually annotated as misogynistic or not. The purpose of this comparison is to assess potential changes in bias recognition, once a system has been specifically exposed to data dealing with these sensitive issues. Although BERT and similar language models may already encode biases (Bender et al., 2021) , fine-tuning on different datasets may indeed lead to a change in classification behaviour and therefore in its implicit biases.", |
| "cite_spans": [ |
| { |
| "start": 135, |
| "end": 156, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 348, |
| "end": 369, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 396, |
| "end": 415, |
| "text": "(Liu et al., 2019a)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 629, |
| "end": 653, |
| "text": "(Wiedemann et al., 2020)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 740, |
| "end": 763, |
| "text": "(Zampieri et al., 2020)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 1196, |
| "end": 1217, |
| "text": "(Founta et al., 2018)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1514, |
| "end": 1537, |
| "text": "(Wiegand et al., 2019a)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 1908, |
| "end": 1929, |
| "text": "(Caselli et al., 2020", |
| "ref_id": null |
| }, |
| { |
| "start": 1987, |
| "end": 2009, |
| "text": "(Fersini et al., 2018)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 2323, |
| "end": 2344, |
| "text": "(Bender et al., 2021)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System description", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In Table 1 , we report a general overview of the performance of the two trained models on fairness tests. Each test involves 500 records randomly extracted from a larger subset, except for neutral statements feminism-related (200) and ableist stereotypes (220): the total number of records, considering all tests, amounts to 5,920. The metric computed by CheckList framework and reported in the table is the failure rate, i.e. the percentage of the records misclassified over the total number of records for that specific test 9 . Unlike metrics such as accuracy, the lower the failure rate (i.e. the closer to 0%) the better the model performs. In general, we notice that the overall failures are extremely high.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Using the generic classifier trained on the dataset by (Founta et al., 2018) , we observe that the handcoded templates about body image and toxic masculinity, belonging to the MFT test type, are the most misclassified (respectively 92.8% and 99.2%).", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 76, |
| "text": "(Founta et al., 2018)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness in Abusive Language Detection", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Regarding examples of insults toward a specific nationality or religion, the failure rate is of 30.2%. On stereotypes about disability, homeless people and old people, the model performs worse, reaching a failure rate of 43.2%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness in Abusive Language Detection", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "9 Other significant metrics could be computed to strengthen the statistics obtained. Since this work is deeply rooted in CheckList framework, we focus our analysis on the options provided by the tool.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness in Abusive Language Detection", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "With respect to the samples related to the perturbation of stereotyped professions connected to the \"unconventional\" gender, verified with the INVariance test type, the model shows zero failure. The issues arise when the sensitive features involved are race, gender, sexual orientation and religion (respectively 94%, 100% and 90.8% failures). This result means that overall the model is sensitive to alterations in these categories: probably this is caused by skewed training data, where e.g. the words \"asexual\" or \"jew\" in neutral, non-offensive contexts are not frequently attested. In addition, some sensitivity is demonstrated in changing the value of the protected attribute nationality (33.2% failure).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness in Abusive Language Detection", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Using the model trained on the AMI dataset (Fersini et al., 2018) , we observe some differences with respect to the generic abusive language model, as reported in in Table 1 . The case where the change is most notable concerns stereotypes related to body image, for which the error drops from 92.8% to 8.6%. Analysing the perturbations of race, gender, sexual orientation and religion, we report a large decrease in errors: respectively from 94.0%, 100% and 90.8% for the first model to 14.8%, 54.0% and 1.6% for the second one. Surprisingly, comparing to the zero failures of the original model with respect to the perturbation of stereotyped professions, this last model reports 62% failures for stereotyped female work roles changed with \"traditional\" male positions. The same outcome is obtained for neutral identification statements related to feminism, where the first model reports zero failures, while the second one achieves 76.5% failure.", |
| "cite_spans": [ |
| { |
| "start": 43, |
| "end": 65, |
| "text": "(Fersini et al., 2018)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 166, |
| "end": 173, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Fairness in Misogyny Detection", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "This could be partially motivated by the fact that the Misogyny Classifier could have generalized a stereotyped conception of reality from skewed data on Misogyny Detection, e.g. learning to associate a high degree of toxicity with neutral posts containing terms such as feminist or negative correlation about women in positions of responsibility, since we can hypothesise that most of the examples the system was trained on contained references to these identities in offensive context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fairness in Misogyny Detection", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "The approach that CheckList proposes should complement the evaluation of NLP models carried out by applying standard metrics such as F1 and accuracy. Indeed, in addition to the traditional held-out datasets, the creation of ad hoc examples, from the most basic ones to the most complex, contribute to highlight weaknesses that cannot be easily detected through large existing datasets. Furthermore, CheckList provides a way to explore the models' dynamics: through the analysis of the errors, we can infer which linguistic phenomena the system has not yet acquired from the data. However, in order to enable this fine-grained evaluation, several specific tests and templates should be created that, like in our case, may contain a small amount of examples because of the difficulty to create or retrieve a varied sample of records covering specific phenomena, e.g. feminist and ableist stereotypes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "7" |
| }, |
| { |
| "text": "A significant drawback, closely related to Check-List deployment on abusive language detection systems, concerns the difficulty of including and dealing with contextual information (Menini et al., 2021) . Sensitive real-world statements often acquire a different connotation w.r.t. the degree of hatred if a certain race, gender, or nationality is present, due to historical or social references (Sap et al., 2019) . In our work, we temporarily avoid such risks using synthetic templates strongly polarized on the one hand towards offensiveness, on the other towards neutrality. Perturbing real-world data would seriously require taking into account these nuances by implementing a more flexible and accurate inspection of prediction variations.", |
| "cite_spans": [ |
| { |
| "start": 181, |
| "end": 202, |
| "text": "(Menini et al., 2021)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 396, |
| "end": 414, |
| "text": "(Sap et al., 2019)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Although state-of-the-art models such as BERTbased models achieve high accuracy levels on a variety of natural language processing tasks, including abusive language detection, we have shown through diverse tests that these systems perform very poorly concerning bias on samples involving implicit stereotypes and sensitive features such as gender or sexual orientation. Whether these biases in BERT-based systems emerge from the classification algorithm, the pretraining phase or the training data will have to be investigated and further explored in the future. As a preliminary analysis, our results show that training sets play a relevant role in this, as already highlighted in previous works (Wiegand et al., 2019b) . For some phenomena, such as body image stereotypes or feminism-related statements, different training sets make the classifier behave very differently, in a way that we were able to quantify through our approach. Moreover, the notebooks through which we built the suite are made available and the tests are easily editable and adaptable to specific data or linguistic aspects to be investigated.", |
| "cite_spans": [ |
| { |
| "start": 697, |
| "end": 720, |
| "text": "(Wiegand et al., 2019b)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "7" |
| }, |
| { |
| "text": "A future direction of this work might be to expand the package integrating other linguistic resources, such as emotion or sentiment lexica. Concerning linguistic capabilities, for Fairness other stereotypes from a wider range of datasets could be more thoroughly explored and formalised into templates. It would be also interesting to analyse whether classification that takes into account the broader discourse context (Menini et al., 2021) is less prone to biases. Suites for other languages could be built as well, given that datasets for abusive language detection are available in many languages beyond English (Corazza et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 420, |
| "end": 441, |
| "text": "(Menini et al., 2021)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 616, |
| "end": 638, |
| "text": "(Corazza et al., 2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "7" |
| }, |
| { |
| "text": "As suggested in (Dobbe et al., 2018) , proposing a contribution within the Machine Learning domain responsibly and consciously means foremost acknowledging our own biases. In particular, we are referring to the implementation of hand-coded templates, that we generalized within the Check-List framework starting from real-user examples. The selection and the way in which the tests have been built certainly shaped the results.", |
| "cite_spans": [ |
| { |
| "start": 16, |
| "end": 36, |
| "text": "(Dobbe et al., 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Surely, this paper is not a complete or comprehensive work: for example, a direct interaction with the targeted users and the different stake-holders affected could have enriched the perspective and the insights retrieved. Furthermore, it is important to be aware that any solely technological solution will be partial, as not considering the broader social issue that is the source of these biases means simplifying and \"fixing\" only on the surface (Ntoutsi et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 450, |
| "end": 472, |
| "text": "(Ntoutsi et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Regardless, we strongly believe that abusive language classifiers need a robust value-sensitive evaluation, in order to assess unintended biases and avoid, as far as possible, explicit harm or the amplification of pre-existing social biases, trying to ultimately build systems that contributes in a beneficial way to the society and all its citizens.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Among others: Equal Accuracy, Equal Opportunity(Hardt et al., 2016), Demographic Parity.2 Fairlearn, Dalex, InterpretML, FAT Forensics, Captum.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/uclanlp/corefBias/ tree/master/WinoBias/wino", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For reference, the notebook on Sentiment Analysis at https://github.com/marcotcr/checklist/ blob/master/notebooks/Sentiment.ipynb.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Data Statement(Bender and Friedman, 2018): templates and related labels were manually defined by the first author, a non-native English speaker.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Uncased, 12-layer, 768-hidden, 12-heads, 110M parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "Part of this work has been funded by the KID ACTIONS REC-AG project (n. 101005518) on \"Kick-off preventIng and responDing to children and AdolesCenT cyberbullyIng through innovative mOnitoring and educatioNal technologieS\", https://www.kidactions.eu/.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "SentiWordNet 3.0: An enhanced lexical resource for sentiment analysis and opinion mining", |
| "authors": [ |
| { |
| "first": "Stefano", |
| "middle": [], |
| "last": "Baccianella", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Esuli", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabrizio", |
| "middle": [], |
| "last": "Sebastiani", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC'10)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefano Baccianella, Andrea Esuli, and Fabrizio Sebas- tiani. 2010. SentiWordNet 3.0: An enhanced lexi- cal resource for sentiment analysis and opinion min- ing. In Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC'10), Valletta, Malta. European Language Re- sources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "SemEval-2019 task 5: Multilingual detection of hate speech against immigrants and women in Twitter", |
| "authors": [ |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco Manuel Rangel", |
| "middle": [], |
| "last": "Pardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "54--63", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/S19-2007" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Valerio Basile, Cristina Bosco, Elisabetta Fersini, Debora Nozza, Viviana Patti, Francisco Manuel Rangel Pardo, Paolo Rosso, and Manuela San- guinetti. 2019. SemEval-2019 task 5: Multilin- gual detection of hate speech against immigrants and women in Twitter. In Proceedings of the 13th Inter- national Workshop on Semantic Evaluation, pages 54-63, Minneapolis, Minnesota, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Synthetic and natural noise both break neural machine translation", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Bisk", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Belinkov and Yonatan Bisk. 2018. Synthetic and natural noise both break neural machine transla- tion.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Data statements for natural language processing: Toward mitigating system bias and enabling better science", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Emily", |
| "suffix": "" |
| }, |
| { |
| "first": "Batya", |
| "middle": [], |
| "last": "Bender", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Friedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "6", |
| "issue": "", |
| "pages": "587--604", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily M Bender and Batya Friedman. 2018. Data statements for natural language processing: Toward mitigating system bias and enabling better science. Transactions of the Association for Computational Linguistics, 6:587-604.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "On the dangers of stochastic parrots: Can language models be too big?", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [ |
| "M" |
| ], |
| "last": "Bender", |
| "suffix": "" |
| }, |
| { |
| "first": "Timnit", |
| "middle": [], |
| "last": "Gebru", |
| "suffix": "" |
| }, |
| { |
| "first": "Angelina", |
| "middle": [], |
| "last": "Mcmillan-Major", |
| "suffix": "" |
| }, |
| { |
| "first": "Shmargaret", |
| "middle": [], |
| "last": "Shmitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency, FAccT '21", |
| "volume": "", |
| "issue": "", |
| "pages": "610--623", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3442188.3445922" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily M. Bender, Timnit Gebru, Angelina McMillan- Major, and Shmargaret Shmitchell. 2021. On the dangers of stochastic parrots: Can language models be too big? In Proceedings of the 2021 ACM Confer- ence on Fairness, Accountability, and Transparency, FAccT '21, page 610-623, New York, NY, USA. As- sociation for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Language (technology) is power: A critical survey of bias in nlp", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Su Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Solon", |
| "middle": [], |
| "last": "Blodgett", |
| "suffix": "" |
| }, |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Barocas", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanna", |
| "middle": [], |
| "last": "Wallach", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2005.14050" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Su Lin Blodgett, Solon Barocas, Hal Daum\u00e9 III, and Hanna Wallach. 2020. Language (technology) is power: A critical survey of bias in nlp. arXiv preprint arXiv:2005.14050.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "2020. I feel offended, don't be abusive! implicit/explicit messages in offensive and abusive language", |
| "authors": [ |
| { |
| "first": "Tommaso", |
| "middle": [], |
| "last": "Caselli", |
| "suffix": "" |
| }, |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Jelena", |
| "middle": [], |
| "last": "Mitrovi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Inga", |
| "middle": [], |
| "last": "Kartoziya", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Granitzer", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "6193--6202", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tommaso Caselli, Valerio Basile, Jelena Mitrovi\u0107, Inga Kartoziya, and Michael Granitzer. 2020. I feel of- fended, don't be abusive! implicit/explicit messages in offensive and abusive language. In Proceedings of the 12th Language Resources and Evaluation Con- ference, pages 6193-6202, Marseille, France. Euro- pean Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A multilingual evaluation for online hate speech detection", |
| "authors": [ |
| { |
| "first": "Michele", |
| "middle": [], |
| "last": "Corazza", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefano", |
| "middle": [], |
| "last": "Menini", |
| "suffix": "" |
| }, |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Cabrio", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Tonelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Serena", |
| "middle": [], |
| "last": "Villata", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ACM Trans. Internet Techn", |
| "volume": "20", |
| "issue": "2", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3377323" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michele Corazza, Stefano Menini, Elena Cabrio, Sara Tonelli, and Serena Villata. 2020. A multilingual evaluation for online hate speech detection. ACM Trans. Internet Techn., 20(2):10:1-10:22.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Measuring and mitigating unintended bias in text classification", |
| "authors": [ |
| { |
| "first": "Lucas", |
| "middle": [], |
| "last": "Dixon", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Sorensen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nithum", |
| "middle": [], |
| "last": "Thain", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucy", |
| "middle": [], |
| "last": "Vasserman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, AIES '18", |
| "volume": "", |
| "issue": "", |
| "pages": "67--73", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3278721.3278729" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lucas Dixon, John Li, Jeffrey Sorensen, Nithum Thain, and Lucy Vasserman. 2018. Measuring and mitigat- ing unintended bias in text classification. In Pro- ceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, AIES '18, page 67-73, New York, NY, USA. Association for Computing Machin- ery.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A broader view on bias in automated decisionmaking: Reflecting on epistemology and dynamics", |
| "authors": [ |
| { |
| "first": "Roel", |
| "middle": [], |
| "last": "Dobbe", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarah", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Gilbert", |
| "suffix": "" |
| }, |
| { |
| "first": "Nitin", |
| "middle": [], |
| "last": "Kohli", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roel Dobbe, Sarah Dean, T. Gilbert, and Nitin Kohli. 2018. A broader view on bias in automated decision- making: Reflecting on epistemology and dynamics. ArXiv, abs/1807.00553.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Overview of the evalita 2018 task on automatic misogyny identification (ami)", |
| "authors": [ |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "12", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elisabetta Fersini, Debora Nozza, and Paolo Rosso. 2018. Overview of the evalita 2018 task on auto- matic misogyny identification (ami). EVALITA Eval- uation of NLP and Speech Tools for Italian, 12:59.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Large scale crowdsourcing and characterization of twitter abusive behavior", |
| "authors": [ |
| { |
| "first": "Antigoni-Maria", |
| "middle": [], |
| "last": "Founta", |
| "suffix": "" |
| }, |
| { |
| "first": "Constantinos", |
| "middle": [], |
| "last": "Djouvas", |
| "suffix": "" |
| }, |
| { |
| "first": "Despoina", |
| "middle": [], |
| "last": "Chatzakou", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilias", |
| "middle": [], |
| "last": "Leontiadis", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Blackburn", |
| "suffix": "" |
| }, |
| { |
| "first": "Gianluca", |
| "middle": [], |
| "last": "Stringhini", |
| "suffix": "" |
| }, |
| { |
| "first": "Athena", |
| "middle": [], |
| "last": "Vakali", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Sirivianos", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Kourtellis", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "11th International Conference on Web and Social Media, ICWSM 2018", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antigoni-Maria Founta, Constantinos Djouvas, De- spoina Chatzakou, Ilias Leontiadis, Jeremy Black- burn, Gianluca Stringhini, Athena Vakali, Michael Sirivianos, and Nicolas Kourtellis. 2018. Large scale crowdsourcing and characterization of twitter abusive behavior. In 11th International Conference on Web and Social Media, ICWSM 2018. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A large labeled corpus for online harassment research", |
| "authors": [ |
| { |
| "first": "Jennifer", |
| "middle": [], |
| "last": "Golbeck", |
| "suffix": "" |
| }, |
| { |
| "first": "Zahra", |
| "middle": [], |
| "last": "Ashktorab", |
| "suffix": "" |
| }, |
| { |
| "first": "Rashad", |
| "middle": [ |
| "O" |
| ], |
| "last": "Banjo", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Berlinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Siddharth", |
| "middle": [], |
| "last": "Bhagwan", |
| "suffix": "" |
| }, |
| { |
| "first": "Cody", |
| "middle": [], |
| "last": "Buntain", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Cheakalos", |
| "suffix": "" |
| }, |
| { |
| "first": "Alicia", |
| "middle": [ |
| "A" |
| ], |
| "last": "Geller", |
| "suffix": "" |
| }, |
| { |
| "first": "Quint", |
| "middle": [], |
| "last": "Gergory", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajesh", |
| "middle": [], |
| "last": "Kumar Gnanasekaran", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 ACM on Web Science Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "229--233", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jennifer Golbeck, Zahra Ashktorab, Rashad O Banjo, Alexandra Berlinger, Siddharth Bhagwan, Cody Buntain, Paul Cheakalos, Alicia A Geller, Quint Ger- gory, Rajesh Kumar Gnanasekaran, et al. 2017. A large labeled corpus for online harassment research. In Proceedings of the 2017 ACM on Web Science Conference, pages 229-233. ACM.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Equality of opportunity in supervised learning", |
| "authors": [ |
| { |
| "first": "Moritz", |
| "middle": [], |
| "last": "Hardt", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Price", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Srebro", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1610.02413" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Moritz Hardt, Eric Price, and Nathan Srebro. 2016. Equality of opportunity in supervised learning. arXiv preprint arXiv:1610.02413.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Confronting abusive language online: A survey from the ethical and human rights perspective", |
| "authors": [ |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Isar", |
| "middle": [], |
| "last": "Nejadgholi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathleen C", |
| "middle": [], |
| "last": "Fraser", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2012.12305" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Svetlana Kiritchenko, Isar Nejadgholi, and Kathleen C Fraser. 2020. Confronting abusive language online: A survey from the ethical and human rights perspec- tive. arXiv preprint arXiv:2012.12305.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "NULI at SemEval-2019 task 6: Transfer learning for offensive language detection using bidirectional transformers", |
| "authors": [ |
| { |
| "first": "Ping", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Wen", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Zou", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "87--91", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/S19-2011" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ping Liu, Wen Li, and Liang Zou. 2019a. NULI at SemEval-2019 task 6: Transfer learning for of- fensive language detection using bidirectional trans- formers. In Proceedings of the 13th Interna- tional Workshop on Semantic Evaluation, pages 87- 91, Minneapolis, Minnesota, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Roberta: A robustly optimized BERT pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019b. Roberta: A robustly optimized BERT pretraining ap- proach. CoRR, abs/1907.11692.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A survey on bias and fairness in machine learning", |
| "authors": [ |
| { |
| "first": "Ninareh", |
| "middle": [], |
| "last": "Mehrabi", |
| "suffix": "" |
| }, |
| { |
| "first": "Fred", |
| "middle": [], |
| "last": "Morstatter", |
| "suffix": "" |
| }, |
| { |
| "first": "Nripsuta", |
| "middle": [], |
| "last": "Saxena", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Lerman", |
| "suffix": "" |
| }, |
| { |
| "first": "Aram", |
| "middle": [], |
| "last": "Galstyan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ninareh Mehrabi, Fred Morstatter, Nripsuta Saxena, Kristina Lerman, and Aram Galstyan. 2019. A sur- vey on bias and fairness in machine learning.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Abuse is contextual, what about nlp? the role of context in abusive language annotation and detection", |
| "authors": [ |
| { |
| "first": "Stefano", |
| "middle": [], |
| "last": "Menini", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessio", |
| "middle": [], |
| "last": "Palmero Aprosio", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Tonelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefano Menini, Alessio Palmero Aprosio, and Sara Tonelli. 2021. Abuse is contextual, what about nlp? the role of context in abusive language annotation and detection. CoRR, abs/2103.14916.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Textattack: A framework for adversarial attacks, data augmentation", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [ |
| "X" |
| ], |
| "last": "Morris", |
| "suffix": "" |
| }, |
| { |
| "first": "Eli", |
| "middle": [], |
| "last": "Lifland", |
| "suffix": "" |
| }, |
| { |
| "first": "Jin", |
| "middle": [ |
| "Yong" |
| ], |
| "last": "Yoo", |
| "suffix": "" |
| }, |
| { |
| "first": "Jake", |
| "middle": [], |
| "last": "Grigsby", |
| "suffix": "" |
| }, |
| { |
| "first": "Di", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanjun", |
| "middle": [], |
| "last": "Qi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John X. Morris, Eli Lifland, Jin Yong Yoo, Jake Grigsby, Di Jin, and Yanjun Qi. 2020. Textattack: A framework for adversarial attacks, data augmenta- tion, and adversarial training in nlp.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Unintended bias in misogyny detection", |
| "authors": [ |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Claudia", |
| "middle": [], |
| "last": "Volpetti", |
| "suffix": "" |
| }, |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "IEEE/WIC/ACM International Conference on Web Intelligence, WI '19", |
| "volume": "", |
| "issue": "", |
| "pages": "149--155", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3350546.3352512" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Debora Nozza, Claudia Volpetti, and Elisabetta Fersini. 2019. Unintended bias in misogyny detection. In IEEE/WIC/ACM International Conference on Web Intelligence, WI '19, page 149-155, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Bias in datadriven artificial intelligence systems-An introductory survey", |
| "authors": [ |
| { |
| "first": "Eirini", |
| "middle": [], |
| "last": "Ntoutsi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavlos", |
| "middle": [], |
| "last": "Fafalios", |
| "suffix": "" |
| }, |
| { |
| "first": "Ujwal", |
| "middle": [], |
| "last": "Gadiraju", |
| "suffix": "" |
| }, |
| { |
| "first": "Vasileios", |
| "middle": [], |
| "last": "Iosifidis", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Nejdl", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria-Esther", |
| "middle": [], |
| "last": "Vidal", |
| "suffix": "" |
| }, |
| { |
| "first": "Salvatore", |
| "middle": [], |
| "last": "Ruggieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Franco", |
| "middle": [], |
| "last": "Turini", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "WIREs Data Mining and Knowledge Discovery", |
| "volume": "10", |
| "issue": "3", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1002/widm.1356" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eirini Ntoutsi, Pavlos Fafalios, Ujwal Gadiraju, Vasileios Iosifidis, Wolfgang Nejdl, Maria-Esther Vi- dal, Salvatore Ruggieri, Franco Turini, Symeon Pa- padopoulos, Emmanouil Krasanakis, Ioannis Kom- patsiaris, Katharina Kinder-Kurlanda, Claudia Wag- ner, Fariba Karimi, Miriam Fernandez, Harith Alani, Bettina Berendt, Tina Kruegel, Christian Heinze, Klaus Broelemann, Gjergji Kasneci, Thanassis Tiropanis, and Steffen Staab. 2020. Bias in data- driven artificial intelligence systems-An introduc- tory survey. WIREs Data Mining and Knowledge Discovery, 10(3):e1356.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 2227- 2237.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Are red roses red? evaluating consistency of question-answering models", |
| "authors": [ |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Marco Tulio Ribeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Guestrin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "6174--6184", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1621" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Tulio Ribeiro, Carlos Guestrin, and Sameer Singh. 2019. Are red roses red? evaluating con- sistency of question-answering models. In Proceed- ings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 6174-6184, Florence, Italy. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Beyond accuracy: Behavioral testing of NLP models with CheckList", |
| "authors": [ |
| { |
| "first": "Tongshuang", |
| "middle": [], |
| "last": "Marco Tulio Ribeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Guestrin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "4902--4912", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.442" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Tulio Ribeiro, Tongshuang Wu, Carlos Guestrin, and Sameer Singh. 2020. Beyond accuracy: Be- havioral testing of NLP models with CheckList. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4902- 4912, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "What's in your embedding, and how it predicts task performance", |
| "authors": [ |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rogers", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Shashwath Hosur Ananthakrishna", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rumshisky", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2690--2703", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anna Rogers, Shashwath Hosur Ananthakrishna, and Anna Rumshisky. 2018. What's in your embedding, and how it predicts task performance. In Proceed- ings of the 27th International Conference on Com- putational Linguistics, pages 2690-2703, Santa Fe, New Mexico, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "The risk of racial bias in hate speech detection", |
| "authors": [ |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "Sap", |
| "suffix": "" |
| }, |
| { |
| "first": "Dallas", |
| "middle": [], |
| "last": "Card", |
| "suffix": "" |
| }, |
| { |
| "first": "Saadia", |
| "middle": [], |
| "last": "Gabriel", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1668--1678", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1163" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maarten Sap, Dallas Card, Saadia Gabriel, Yejin Choi, and Noah A. Smith. 2019. The risk of racial bias in hate speech detection. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 1668-1678, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Social bias frames: Reasoning about social and power implications of language", |
| "authors": [ |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "Sap", |
| "suffix": "" |
| }, |
| { |
| "first": "Saadia", |
| "middle": [], |
| "last": "Gabriel", |
| "suffix": "" |
| }, |
| { |
| "first": "Lianhui", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Noah", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maarten Sap, Saadia Gabriel, Lianhui Qin, Dan Juraf- sky, Noah A Smith, and Yejin Choi. 2020. Social bias frames: Reasoning about social and power im- plications of language. In ACL.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "A framework for understanding unintended consequences of machine learning", |
| "authors": [ |
| { |
| "first": "Harini", |
| "middle": [], |
| "last": "Suresh", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "V" |
| ], |
| "last": "Guttag", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Harini Suresh and John V. Guttag. 2020. A framework for understanding unintended consequences of ma- chine learning.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Correlation-based intrinsic evaluation of word vector representations", |
| "authors": [ |
| { |
| "first": "Yulia", |
| "middle": [], |
| "last": "Tsvetkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Manaal", |
| "middle": [], |
| "last": "Faruqui", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 1st Workshop on Evaluating Vector-Space Representations for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "111--115", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W16-2520" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yulia Tsvetkov, Manaal Faruqui, and Chris Dyer. 2016. Correlation-based intrinsic evaluation of word vec- tor representations. In Proceedings of the 1st Work- shop on Evaluating Vector-Space Representations for NLP, pages 111-115, Berlin, Germany. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaiser", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 31st International Conference on Neural Information Processing Systems, NIPS'17", |
| "volume": "", |
| "issue": "", |
| "pages": "6000--6010", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, undefine- dukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Proceedings of the 31st Interna- tional Conference on Neural Information Processing Systems, NIPS'17, page 6000-6010, Red Hook, NY, USA. Curran Associates Inc.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Hateful Symbols or Hateful People? Predictive Features for Hate Speech Detection on Twitter", |
| "authors": [ |
| { |
| "first": "Zeerak", |
| "middle": [], |
| "last": "Waseem", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the NAACL Student Research Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "88--93", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeerak Waseem and Dirk Hovy. 2016. Hateful Sym- bols or Hateful People? Predictive Features for Hate Speech Detection on Twitter. In Proceedings of the NAACL Student Research Workshop, pages 88-93, San Diego, California.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "UHH-LT at SemEval-2020 task 12: Fine-tuning of pre-trained transformer networks for offensive language detection", |
| "authors": [ |
| { |
| "first": "Gregor", |
| "middle": [], |
| "last": "Wiedemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Seid Muhie Yimam", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Biemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fourteenth Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "1638--1644", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gregor Wiedemann, Seid Muhie Yimam, and Chris Biemann. 2020. UHH-LT at SemEval-2020 task 12: Fine-tuning of pre-trained transformer networks for offensive language detection. In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 1638-1644, Barcelona (online). International Com- mittee for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Detection of Abusive Language: the Problem of Biased Datasets", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Wiegand", |
| "suffix": "" |
| }, |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Ruppenhofer", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Kleinbauer", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "602--608", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1060" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Wiegand, Josef Ruppenhofer, and Thomas Kleinbauer. 2019a. Detection of Abusive Language: the Problem of Biased Datasets. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 602-608, Minneapolis, Minnesota. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Detecting Derogatory Compounds -An Unsupervised Approach", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Wiegand", |
| "suffix": "" |
| }, |
| { |
| "first": "Maximilian", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Ruppenhofer", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2076--2081", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1211" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Wiegand, Maximilian Wolf, and Josef Rup- penhofer. 2019b. Detecting Derogatory Compounds -An Unsupervised Approach. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long and Short Papers), pages 2076-2081, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Errudite: Scalable, reproducible, and testable error analysis", |
| "authors": [ |
| { |
| "first": "Tongshuang", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [ |
| "Tulio" |
| ], |
| "last": "Ribeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Heer", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Weld", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "747--763", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1073" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tongshuang Wu, Marco Tulio Ribeiro, Jeffrey Heer, and Daniel Weld. 2019. Errudite: Scalable, repro- ducible, and testable error analysis. In Proceed- ings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 747-763, Flo- rence, Italy. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "SemEval-2020 task 12: Multilingual offensive language identification in social media (Offen-sEval 2020)", |
| "authors": [ |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "Pepa", |
| "middle": [], |
| "last": "Atanasova", |
| "suffix": "" |
| }, |
| { |
| "first": "Georgi", |
| "middle": [], |
| "last": "Karadzhov", |
| "suffix": "" |
| }, |
| { |
| "first": "Hamdy", |
| "middle": [], |
| "last": "Mubarak", |
| "suffix": "" |
| }, |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fourteenth Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "1425--1447", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcos Zampieri, Preslav Nakov, Sara Rosenthal, Pepa Atanasova, Georgi Karadzhov, Hamdy Mubarak, Leon Derczynski, Zeses Pitenis, and \u00c7 agr\u0131 \u00c7\u00f6ltekin. 2020. SemEval-2020 task 12: Multilingual offen- sive language identification in social media (Offen- sEval 2020). In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 1425- 1447, Barcelona (online). International Committee for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Gender bias in coreference resolution: Evaluation and debiasing methods", |
| "authors": [ |
| { |
| "first": "Jieyu", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianlu", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Yatskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Vicente", |
| "middle": [], |
| "last": "Ordonez", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "2", |
| "issue": "", |
| "pages": "15--20", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-2003" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jieyu Zhao, Tianlu Wang, Mark Yatskar, Vicente Or- donez, and Kai-Wei Chang. 2018. Gender bias in coreference resolution: Evaluation and debiasing methods. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 15-20, New Orleans, Louisiana. Association for Computa- tional Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "text": "CheckList visual summary of the performances obtained by the generic Abusive Language classifier on the INVariance tests within Fairness capability7 All the data and the Jupyter notebooks implemented to run the tests are available at https://github.com/MartaMarchiori/ Fairness-Analysis-with-CheckList", |
| "uris": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "text": "Performance of Abusive Language classifier and Misogyny Detection classifier on Fairness tests. Each cell contains the failure rate expressed in percentage for each test type. Each test involves 500 records randomly extracted from a larger subset, except for neutral statements feminism-related (200) and ableist stereotypes (220).", |
| "html": null, |
| "content": "<table/>", |
| "num": null |
| } |
| } |
| } |
| } |