| { |
| "paper_id": "2022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T05:10:36.350033Z" |
| }, |
| "title": "Counter-TWIT: An Italian Corpus for Online Counterspeech in Ecological Contexts", |
| "authors": [ |
| { |
| "first": "Pierpaolo", |
| "middle": [], |
| "last": "Goffredo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universit\u00e9 C\u00f4te d'Azur", |
| "location": { |
| "country": "France" |
| } |
| }, |
| "email": "pierpaolo.goffredo@inria.fr" |
| }, |
| { |
| "first": "\u2662", |
| "middle": [], |
| "last": "Valerio Basile", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universit\u00e0 degli Studi di Torino", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Bianca", |
| "middle": [], |
| "last": "Cepollaro", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universit\u00e0 Vita-Salute San Raffaele", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "\u2660cepollaro.biancamaria@hsr.it" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universit\u00e0 degli Studi di Torino", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This work describes the process of creating a corpus of Twitter conversations annotated for the presence of counterspeech in response to toxic speech related to axes of discrimination linked to sexism, racism and homophobia. The main novelty is an annotated dataset comprising relevant tweets in their context of occurrence. The corpus is made up of tweets and responses captured by different profiles replying to discriminatory content or objectionably couched news. An annotation scheme was created to illustrate the relevant dimensions of toxic speech and counterspeech. An analysis of the collected and annotated data and of the Inter-Annotator Agreement (IAA) that emerged during the annotation process is included. Moreover, we report about preliminary experiments on automatic counterspeech detection, based on supervised automatic learning models trained on the new dataset. The results highlight the fundamental role played by the context in this detection task, confirming our intuitions about the importance to collect tweets in their context of occurrence.", |
| "pdf_parse": { |
| "paper_id": "2022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This work describes the process of creating a corpus of Twitter conversations annotated for the presence of counterspeech in response to toxic speech related to axes of discrimination linked to sexism, racism and homophobia. The main novelty is an annotated dataset comprising relevant tweets in their context of occurrence. The corpus is made up of tweets and responses captured by different profiles replying to discriminatory content or objectionably couched news. An annotation scheme was created to illustrate the relevant dimensions of toxic speech and counterspeech. An analysis of the collected and annotated data and of the Inter-Annotator Agreement (IAA) that emerged during the annotation process is included. Moreover, we report about preliminary experiments on automatic counterspeech detection, based on supervised automatic learning models trained on the new dataset. The results highlight the fundamental role played by the context in this detection task, confirming our intuitions about the importance to collect tweets in their context of occurrence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Billions of users are active every day on the main social media platforms and they are regularly exposed to toxic discourse, i.e. speech that inflicts psychological or emotional harm and/or incites people to participate in bigoted practices ranging from sexism to homophobia, to racism. To protect users from online toxicity, social media providers have been increasingly implementing censorship-based measures. Such measures are highly controversial and only targeted to the most extreme and explicit forms of toxic speech. Implicit toxic contents are particularly dangerous because they can go under the radar, they are hard to question, and may end up being accepted without conversation participants fully realizing it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The question arises: how can we counter online toxic speech? Recent studies in social philosophy of language investigated the strategy that consists in engaging in interventions aimed at avoiding that toxic contents get (wittingly or unwittingly) accepted by the conversation participants. Such strategy is often dubbed counterspeech and has been mostly analyzed by taking into account face-to-face exchanges. Philosophers of language (Lepoutre, 2017; Langton, 2018) have focused on how counterspeech could work in idealized conversational models. In particular, they have focused on speech that counters implicit toxic contents by (i) spelling out, unpacking, articulating the objectionable contents implicitly conveyed by a given utterance and then (ii) challenging, questioning, rejecting, disputing, confronting it. This counterspeech strategy seems very costly. The first move is cognitively costly: it's hard to unpack implicit content on the spot. The second move is about social cost: it may be tough to go and take a confrontational attitude.", |
| "cite_spans": [ |
| { |
| "start": 435, |
| "end": 451, |
| "text": "(Lepoutre, 2017;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 452, |
| "end": 466, |
| "text": "Langton, 2018)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Interestingly, certain features of how communication works on social networks make social media particularly interesting venues to easily observe real instances of counterspeech in ecological contexts. For counterspeech to succeed in face-to-face interactions, the counterspeaker needs to be ready to intervene saying the right thing, in the right place, at the right moment. On social networks, on the other hand, counterspeech can well be asynchronous: this may lighten its cognitive load. As for the social cost of counterspeech, note that social network users enjoy a bit of anonymity in their online intervention and online interactions follow a different etiquette than face-to-face exchanges in terms of interruption of the \"conversation\". This may possibly lighten the social cost associated with counterspeech. A further interesting aspect is that online counterspeech can reach many more people than offline interventions. In fact, users often challenge offline contents (newspapers articles, pieces of public speeches, reported conversations, passages of textbooks, and so on) on social networks, in order to give their conversational moves more attention.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Studying counterspeech online comes with the added benefit of enabling the researcher to build computational models of language interactions involving toxic speech and counterspeech. By leveraging the most recent Natural Language Processing techniques, a corpus of counterspeech represents the first step towards automated systems to detect, support or even generate effective responses to toxic speech online.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The exploratory theoretical investigation conducted in philosophy raises many empirical questions. In our work, we address a few ones. For instance: do people on social networks ever employ such an idealized model where in order to reject implicit toxic content one has to first make explicit what was wrong with it? Or do users prefer less sophisticated strategy, like insulting and attacking bigoted contributions? Does the use of irony make the counterspeaker sound more or less aggressive? Do users support counterspeakers with reactions and comments or is it a solitary enterprise? Many more questions are still left unanswered, but this work paves the way for illuminating further the nature and working of online counterspeech.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The contributions of this article can be summarized as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 A novel corpus of toxic speech and counterspeech in a conversational context from Italian social media, covering different target groups.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 A novel annotation schema encoding a finegrained classification of toxic speech and argumentative relations between utterances.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 A pilot experiment on automatic counterspeech detection, showing the importance of taking the conversational context into account rather than modeling single utterances in isolation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There is a growing concern among the ICT (Information and Communication Technologies) companies leading the development of Social Networks about toxic speech: as it can undermine the image of such social environments as \"safe\" place, they must implement methods to cut off this phenomenon (Mathew et al., 2019) . Some countries started to considered hate speech as a crime and sentencing it as such 1 . In other cases, institutions invited the ICT companies to subscribe codes of conduct concerning hate speech moderation and censorship on their platforms. This is the case of the Code of Conduct issued by the EU Commission in 2016 (EU Commission, 2016) . Moreover, Social Networks regulated hateful conduct, publishing guidelines to avoid harmful behaviors subscribed by users as part of their terms of service 2 . However, such measures don't seem to suffice to effectively combat the phenomenon (Gagliardone, 2015) . Approaches to counterspeech have been investigated by the Computational Linguistics community, suggesting that counterspeech can reduce or limit the hateful content on the Web, especially in Social Networks (Mathew et al., 2018) . However, especially from a computational point of view, the development of corpora and models for the automatic detection and generation of counterspeech is still underdeveloped, while most of the efforts have been devoted to the detection of various forms of toxic speech, hate speech included (Poletto et al., 2021; Jurgens et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 289, |
| "end": 310, |
| "text": "(Mathew et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 633, |
| "end": 654, |
| "text": "(EU Commission, 2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 899, |
| "end": 918, |
| "text": "(Gagliardone, 2015)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1128, |
| "end": 1149, |
| "text": "(Mathew et al., 2018)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1447, |
| "end": 1469, |
| "text": "(Poletto et al., 2021;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1470, |
| "end": 1491, |
| "text": "Jurgens et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Most literature focuses on English language and considers toxic speech data collected from specific templates, which limits the coverage of explicit toxic speech and leaves out implicit toxic speech altogether. Chung et al. (2019) recently created a large multilingual corpus of short texts in English, French and Italian, called CONAN, consisting of <hate speech (HS) -counterspeech (CS)> pairs created ad hoc in the context of the HateMeter project 3 , with the effort of more than 100 operators from NGOs and with a special focus on Anti-Muslim hatred online in different European countries. Annotated corpora like CONAN enable a systematic study of Counter-Narratives (CNs), a study which is still in its beginnings, but differs from the one we presented here. In particular, counterspeech in CONAN is not observed in an ecological setting, which is the perspective we hold in the current study.", |
| "cite_spans": [ |
| { |
| "start": 211, |
| "end": 230, |
| "text": "Chung et al. (2019)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A similar work to Chung et al. (2019) is realized by , where off-the-shelf 1 https://en.wikipedia.org/wiki/Hate_ speech_laws_by_country 2 Twitter's measures: https://help. twitter.com/it/rules-and-policies/ hateful-conduct-policy and Facebook's measure:", |
| "cite_spans": [ |
| { |
| "start": 18, |
| "end": 37, |
| "text": "Chung et al. (2019)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "https://www.facebook.com/ communitystandards/hate_speech NMT models are used to synthesize silver data from other languages using the CONAN dataset as kick-start for generation to overcome the scarcity of gold standard data for training and the lack of huge datasets made of counter narratives in Italian language. The accomplishment is done under different resource conditions, testing the effect of using (i) silver data, (ii) gold standard data, and (iii) their combination. Tekiroglu et al. (2020) investigate methods to obtain high quality Counter-Narratives while reducing efforts from experts trained by some Non-Governmental Organizations (NGOs) to intervene in online hateful conversations. Orbach et al. (2020) created benchmark data for training and evaluating the performance of an automatic detection system of counterspeech debates in order to introduce a novel NLU task. Mathew et al. (2019) propose a study to understand how the counterspeech phenomenon is related to statistics of comments collected from YouTube. Menini et al. (2021) present experimental results obtained considering different methods with and without context referring to abusive vs. not abusive tweets.", |
| "cite_spans": [ |
| { |
| "start": 700, |
| "end": 720, |
| "text": "Orbach et al. (2020)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 886, |
| "end": 906, |
| "text": "Mathew et al. (2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1031, |
| "end": 1051, |
| "text": "Menini et al. (2021)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Unlike the related works presented in this section, the contribution of this work in Automatic Counterspeech Detection is the development of a multi-layer corpus of Italian Twitter data in the context of their conversation thread.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We developed a novel corpus, called Counter-TWIT, to study counterspeech online in an ecological setting, based on Twitter conversation threads in the Italian language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Counter-TWIT corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We collected a new dataset of tweets. Counter speech is rare across all of social media, and we considered several strategies for ensuring there were sufficient instances in our dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Collecting Conterspeech Twitter Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We chose Twitter as the source platform, in particular collecting tweets and their replies, because of the accessibility of its API.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Collecting Conterspeech Twitter Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Collecting counterspeech in an ecological setting is a very challenging task, since there are not obvious keyword-based strategies to filter out the relevant tweets from the ones that are posted everyday and that can be collected by relying on the Twitter API. Let us recall that the creation of the novel corpus was a stage, necessary to the following preliminary experimental phase, where the corpus will be exploited for training a machine learning model able to recognize automatically counterspeech discourse on misogyny, homophobia and racism. We initially selected the profiles of activists, organizations, or pages especially devoted to calling out common instances of bigotry. Users interacting in such contexts are likely to comment on hate speech and thus engage in counterspeech. Such profiles are not as popular as those of public figures such as actresses and politicians. In some cases, however, a few comments are enough to start an interesting conversation thread. In such pages users often highlight how certain news are presented in troublesome ways implicitly conveying discriminatory contents. In addition, these profiles allow their followers to reply in order to share their personal opinion giving rise to counterspeech as a collective enterprise, which is an interesting trait.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Collecting Conterspeech Twitter Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For collecting data different tools for Python language have been used in favor of rebuilding the conversation tree.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Collecting Conterspeech Twitter Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To annotate the tweets we developed a custom annotation platform. Expert annotators were selected among bachelor's, master's and PhD students and university researchers, within disciplines related to Humanities and Social Sciences such as philosophy and psychology, with some specific background in the study of hate speech and counterspeech.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The annotators were trained in various areas of language sciences, ranging from philosophy of language to computational linguistics. Therefore, they were trained to be sensitive to the relevant distinctions at play in the annotation, e.g., between explicit and implicit communication, irony, and so on. The annotation scheme was applied by seven annotators to a collection of 624 messages, including 344 root tweets and their replies (280 posts). The annotators were provided clear and detailed guidelines 4 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "At first, the annotators tested a preliminary version of the platform on a small sample of tweets and replies, sharing comments and discussing doubts and controversial issues that needed explanation or modification. This process led to settling on the final version of the annotation scheme and guidelines. The annotation process was based on two layers: firstly, annotators were called to judge whether a tweet or reply could be considered as (Yes/No): TOXIC SPEECH, COUNTERSPEECH, SUPPORT TO COUNTERSPEECH. All of these are binary questions and not mutually exclusive. Figure 1 shows a screenshot of the annotation interface.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 571, |
| "end": 579, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In case a tweet or reply is marked as \"counterspeech\", the annotator is asked to annotate the type of counterspeech and the target group considered (Misogyny, Homophobia, Racism and Other 5 ), as a second annotation layer. Counterspeech often denounces the nature of the discriminatory content it aims to counter. There are several possible labels that can be used for marking different classes of counterspeech, also based on previous studies (Mathew et al., 2019) . After a careful discussion and inspired by the reflections in (Cepollaro, 2021), we decided to select four labels associated to the different type of counterspeech: EX-PLICITATION, HOSTILITY, IRONY/HUMOR, AL-TERNATIVE. In the second-level each label is bi- 5 We did not constrain the definition of the main axes of discrimination in place, because we wanted annotators to be aligned with the folk understanding of such notions. We introduced the category \"Other\" to collect any other targets, with the idea of qualitatively analyzing any choices on this item. The small number of such selections (only 33 within the entire corpus) seems to confirm that the choice of targets was reasonable. nary and they are not mutually exclusive, except for hostility that is rated on a scale from 1 to 10. In the following all the layers included in our annotation scheme are described.", |
| "cite_spans": [ |
| { |
| "start": 444, |
| "end": 465, |
| "text": "(Mathew et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 725, |
| "end": 726, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Toxic Speech Toxic speech promotes discrimination or deprives people of important powers of self-determination and social and civic participation. Racist, sexist and homophobic slurs count as systemic toxic discourse that generally worsens its targets' well being. Furthermore, note that toxic speech is not about impolite language or vulgar expressions: speech can be toxic and damage people's dignity without employing \"bad\" words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Therefore, we call toxic speech the discourse that explicitly or implicitly expresses or promotes unjust discrimination on the basis of gender, ethnicity, geographical origin, sexual orientation, the presence of disabilities, and so on. The toxic speech label applies both to explicit and obvious cases, and to implicit and more difficult to grasp cases. What distinguishes toxic speech is that it implicitly or explicitly conveys content that contributes to extant social injustice, e.g., those due to sexism, homophobia, and racism. This could be in principle performed via aggressive as well as nonaggressive speech. Take for instance a scenario where one attacks their interlocutor with a racial insult: this is aggressive toxic speech. Then take a scenario where one claims that the members of a given group should not benefit from certain rights: this is toxic speech too because of its content, but it is not aggressive in the sense of the former. In other words, the feature of aggressiveness or hostility does not primarily concern the content but the form of a contribution. This said, it appears clear how a counterspeech intervention can also display a different degree of aggressiveness or hostility in its form. Counterspeech in general (at least of the kind we considered in this study) is confrontational in character, for it challenges a piece of discriminatory content. But confrontation can be carried out in more or less aggressive ways. What's the difference between toxic speech and counterspeech hostility? Possibly none, but this does not blur the divide between the two notions: while the former conveys discriminatory content, the latter challenges it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Counterspeech Counterspeech is a secondround speech expressing disagreement with a content or attitude. The type of counterspeech we are interested in is the one that tries to combat discriminatory or stereotyped contents (e.g., sexist, homophobic, racist, etc.) occurring in another post, comment, newspaper article, song, film, etc. expressed using a toxic language. In our framework, counterspeech is meant to be used to address toxic speech, rather than merely false speech. It is particularly interesting when it is exploited to address implicit rather then explicit toxic speech (speech conveying toxic contents via implications, presupposition, and the like): \"implicit toxic contents are particularly dangerous: they can go under radar, they are hard to question, and may end up being accepted in the common ground without conversation participants fully realizing it. They may be immune to censorship, slipping through it\" (Cepollaro, 2021).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Support to counterspeech Support consists in giving resonance and visibility to a certain counterspeech intervention (inside or outside the Twitter thread), in expressing approval and support for another user's intervention. For example, in this exchange 6 :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "-\"Miley Cyrus video reveals all the sexualization of lesbians.\" -\"Quite right!\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The answer expresses approval and support for the counterspeech intervention, therefore it counts as support for the counterspeech.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Explicitation The explicitation of the implicit meaning unpacks, articulates and brings out what was implicit in a message (Sbis\u00e0, 1999) . This typology is particularly interesting because discriminatory contents are often conveyed. Social media users sometimes employ explicitation to point out how certain apparently harmless interventions actually communicated discriminatory contents. Explicitation, by articulating what is implicit, opens up the possibility that implicit content will be criticized or questioned.", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 136, |
| "text": "(Sbis\u00e0, 1999)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The practice of explicitation highlights implicitly transmitted information monitors and filters the influence that the implicit meaning can have on. Here is an example of what the practice of explicitation looks like:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "-\"Emma Watson is beautiful but smart\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "6 The main tweet is in bold, while the reply is in italic, the tweets are translated into English by the authors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "-\"What does 'but' mean, that a beautiful woman is not smart?!\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In this case the second speaker challenges the first's assumption that there would be a contrast for a woman between being beautiful and being smart.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Hostility In engaging in counterspeech, users can express various degrees of hostility and antagonism. This is often carried out through (but is not limited to) the use of aggressive and insulting language. For instance:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\"Good giant? What a bunch of morons\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The speaker in the example gets angry at the newspaper that called \"good giant\" a man who murdered a lesbian woman for rejecting him. To conceptualize and then measure the efficacy of counterspeech is still an open question. Among the most promising candidates, we find its capabilities to change people's minds and raise awareness about discrimination in the toxic speaker and in the audience. It is also an open question what modulates counterspeech efficacy. It may well be that hostility backfires, and that less confrontational counterspeech styles obtain better effects, but it is not said. This could easily depend on the context and the kind of content that counterspeech aims to reject. For this reason, our study is not yet concerned with counterspeech efficacy, but rather on the ways in which it is performed and perceived. A further step in this research is then to conceptualize and measure its efficacy, relying on a classification of its most salient features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Alternative In engaging in counterspeech, users can propose an alternative to the main topic being discussed: they may for instance object to the way a newspaper title an article and come up with an alternative that in their view would avoid the troublesome contents conveyed by the actual one.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "This kind of correcting interventions typically targets the wording of the text or some aspects of its content, suggesting a more \"fair\" point of view or providing a more detailed description of the facts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The news to report is not that there are baby prostitutes in Parioli, but that there are pedophile customers in Parioli. Stop blaming the victims!", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The speaker in the example suggests that newspaper shouldn't talk about \"baby prostitutes\" but \"pedophile clients\" since their way of couching the news implicitly blames victims.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Irony/Humor Irony detection consists in reporting if a text contains traces of irony. In this context we call \"irony\" a plethora of phenomena, such as humor, something witty, black humor, sarcasm, etc.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Irony can be expressed in many ways and there is no single definition of what is ironic and what is not. In this task users are asked, expanding as much as possible the definition of irony, to note as ironic any humorous, sarcastic, ironic intent, be it positive or negative.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\"And thank goodness he's a good giant. If he was bad that he did, would he eat it?\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "This tweet ironically remarks how ridiculous it is to call \"good\" someone who murdered a woman for rejecting it. Note that the labels on this layer are not mutually exclusive: more then one typology label could be selected during the annotation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "For each tweet, the gold label was obtained by aggregating the results of the individual judgments, by applying simple mathematical operations: majority vote for binary labels and arithmetic mean for labels with numeric values (only Hostility in our scheme). Figure 2 shows the distribution of the gold standard labels. 3.04% of tweets were labeled as both Counterspeech and Support, while no overlap was found between Toxic and the other labels. The labels are not evenly distributed between tweets and replies. It is possible to observe in Figure 3 that TOXIC SPEECH is more present in replies (3.5%) than in tweets (1.7%), as well as SUPPORT (17.5% in replies and 7.2% in tweets). The opposite is true for the COUNTERSPEECH label, present in 16.2% of the tweets and 8.9% of the replies.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 259, |
| "end": 267, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 542, |
| "end": 548, |
| "text": "Figure", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Interestingly, the presence of counterspeech at the root tweet level is significant. This indicates that tweets classified as counterspeech have led users to comment to support counterspeech. These first analysis results confirm that collecting data from target profiles is effective for the purpose of filtering samples of counterspeech in the wild, given that the phenomenon is very sparse and a simple keyword-based or hashtag approach is harder to be applied. We can also see that in the debate generated around these profiles there is often an attempt of countering toxic speech generated elsewhere (news, TV, etc) . This is interesting because it allows us to analyze the phenomenon of toxic speech in social media (and its reactions) in more comprehensive way such as by investigating crossreferences between various media, and framing the overall debate in the context of a media ecosystem. This latter includes social media but also others toxic information sources to be countered. As a consequence, the support label among annotated replies is also significant. Figure 4 shows the distribution of the gold standard labels for the second level of annotation considering the whole corpus made of 642 tweets. Also in this case it is possible to notice that a tweet or a reply can be considered belonging to different type of counterspeech rather than a single one as illustrated in the Figure 5 .", |
| "cite_spans": [ |
| { |
| "start": 604, |
| "end": 619, |
| "text": "(news, TV, etc)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1073, |
| "end": 1081, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 1394, |
| "end": 1402, |
| "text": "Figure 5", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Regarding the neutral class, this is represented by all those tweets and replies that are not classified as toxic, counterspeech and support to counterspeech. It includes 472 tweets and replies. This imbalance in the data highlights once again how difficult it can be to collect these types of tweets and replies and subsequently categorize them. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The quality of the gold standard is evaluated in terms of inter-annotator agreement using Krippendorff's \u03b1 , a generalization of Cohen's Kappa to an arbitrary number of annotators applicable to incomplete question-answer matrices, which was suitable to our case (Artstein and Poesio, 2008) . The analysis is limited to the binary labels. Table 1 shows that the annotation of the replies in particular is controversial and the issue deserves a deeper investigations. One possible reason could be that different annotators interpret the main tweet differently, and then, with a cascade effect, diverge more in assigning the label to the reply tweets. The agreement on the root tweets is, instead, generally higher, in particular on the core label COUNTER-", |
| "cite_spans": [ |
| { |
| "start": 262, |
| "end": 289, |
| "text": "(Artstein and Poesio, 2008)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 338, |
| "end": 345, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Inter-Annotator Agreement", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "In addition, the label which created disagreement the most has been EXPLICITATION. The annotators reported that during the annotation task it was very difficult to understand when a tweet or a reply could be marked with this tag, which highlighted a difficulty in reaching a common understanding of the meaning of the label. Recent literature postulates how disagreement stems from different sources. We hypothesize that in the case of this work, the disagreement on the main level of annotation (toxic/counterspeech) is dependent on the highly subjective nature of the annotation task. However, the disagreement on the finer-grained level may be due to the more difficult, ambiguous nature of the task, which needs greater knowledge of linguistic phenomena under observation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SPEECH.", |
| "sec_num": null |
| }, |
| { |
| "text": "Furthermore, a deeper analysis on that tweets (25) and replies (4) which have been considered as counterspeech by all three annotators reveals confusion in agreeing on EXPLICITATION as showed in Table 2 . Thus, the label which created a visible disagreement has been the explicitation. The annotators reported that during the annotation task it was very difficult to understand when a tweet or a reply could be marked with this tag, which highlighted a difficulty in reaching a common understanding of the meaning of the label.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 195, |
| "end": 202, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "SPEECH.", |
| "sec_num": null |
| }, |
| { |
| "text": "However, the disagreement on the finer-grained level may be due to the more difficult, ambiguous nature of the task, which needs greater knowledge of linguistic phenomena under observation. The IAA results reflect the problems described.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SPEECH.", |
| "sec_num": null |
| }, |
| { |
| "text": "We carried our a battery of experiments in order to perform three independent binary classifications: toxic vs. non-toxic speech, counterspeech vs. not counterspeech, and support to counterspeech vs. not support to counterspeech. We employ a supervised classifier based on BERT (Devlin et al., 2019) pre-trained on a large corpus of Italian tweets named AlBERTo (Polignano et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 278, |
| "end": 299, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 362, |
| "end": 386, |
| "text": "(Polignano et al., 2019)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The metrics used to evaluate AlBERTo's performance are Precision, Recall, and F1-Score for the individual labels, and their macro-average.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The three experiments are 5-fold crossvalidation experiments with 9 fine-tuning epochs and a learning rate of 10 \u22125 . The results are shown Table 3 : Model performance over three binary classification using reply text as dataset for training. (0), (1), and (avg) refer respectively to positive class, negative class, and their macro-average.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 140, |
| "end": 147, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Prec.(0) Rec.(0) F1 (0) Prec.(1) Rec. 1 in Table 3 . Despite the small size of the corpus and the representative items for each class, the classifiers for COUNTERSPEECH and SUPPORT perform reasonably well, while the classification of TOXIC SPEECH turned out to be a challenge, in particular for detecting the positive class.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 43, |
| "end": 50, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Label", |
| "sec_num": null |
| }, |
| { |
| "text": "The results are obtained with the model finetuned only with the tweet or reply text in isolation. We performed an additional experiment taking into account the root of the conversations where the replies belong. We do so by concatenating the text of the reply to the text of the original tweet it replies to, with the goal of observing how the performance of the model changes when considering the context of the reply. The results of this second experiment are shown in Table 4 . The experiment is performed with the same hyperparameters of the previous experiment, in order to provide a consistent comparison.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 471, |
| "end": 478, |
| "text": "Table 4", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Label", |
| "sec_num": null |
| }, |
| { |
| "text": "Including context in the training improves the classification of counterspeech. This is due mainly to a higher recall on the positive class. This is true for all labels, and particularly for COUNTER-SPEECH, which is about 65% higher. However, the extra training data seem to confuse the classifiers for the other two labels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Label", |
| "sec_num": null |
| }, |
| { |
| "text": "In order to get some deeper insight about the difficulties in classifying a counterspeech content, we selected False Positives (FP), i.e., counterspeech tweets hat have not been classified as such by the model, and exploited the information included in the finer-grained annotation layer regarding counterspeech categories, namely EXPLIC-ITATION, HOSTILITY, IRONY/HUMOR, ALTER-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We considered all the FPs for the first annotation layer, counting all the data (tweets or reply) that were labeled as belonging to the counterspeech category from humans but not from the model. Thus, for those tweets we checked the values attached to the counterspeech typology labels in order to find a meaning among the classification errors and the counterspeech typologies' relation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NATIVE.", |
| "sec_num": null |
| }, |
| { |
| "text": "The proportion of False Positives over all the predictions obtained from the language model is the following: false positives represent about 7% of the total. Of these, the vast majority are Ironic (\u223c34%) and Hostile (\u223c76%), also considering that the labels are not mutually exclusive.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NATIVE.", |
| "sec_num": null |
| }, |
| { |
| "text": "This qualitative analysis can lead to affirm that the model tends to confuse hostile and ironic content more than explicit and suggestion of alternative ones probably due to a higher cost from a cognitive and social point of view.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NATIVE.", |
| "sec_num": null |
| }, |
| { |
| "text": "There are two layers of complexity that give rise to disagreement in classifying correctly the tweets. Detecting toxic speech depends on how each subject is sensitive to detecting each axis of discrimination (which often varies along demographic and psychological factors). A further source of disagreement stems from the relative unconstrained character of the notions deployed (toxic speech and counterspeech) (Basile et al., 2021) .", |
| "cite_spans": [ |
| { |
| "start": 412, |
| "end": 433, |
| "text": "(Basile et al., 2021)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NATIVE.", |
| "sec_num": null |
| }, |
| { |
| "text": "Finally, we analyzed the False Positive Rate by counterspeech category. Irony and Hostility are by far the most difficult categories to predict, with a FP ratio of about 60% and 70% respectively, while next to no FPs are predicted for Explicit and Alternative.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NATIVE.", |
| "sec_num": null |
| }, |
| { |
| "text": "In this work we studied hate speech in online environments. To address the dangers of toxic speech, Social Networks defined policies that regulate speech inciting hatred, while some countries started to introduce norms to treat this phenomenon as a crime and sentenced as such. This way to address the problem showed some limitations as the main approaches consist in blocking or suspending the problematic content or the user account itself. Therefore several involved parties, such as institutions and organizations, started to consider counterspeech as an alternative to blocking (Gagliardone, 2015) . Thus, adding \"more speech\" has been considered as a valid alternative to counter hate speech.", |
| "cite_spans": [ |
| { |
| "start": 583, |
| "end": 602, |
| "text": "(Gagliardone, 2015)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We collected and annotated data from Twitter in order to create the Counter-TWIT Italian corpus to study counterspeech in an ecological setting. The corpus includes content that is considered to unleash hate speech and to receive replies in the form of counterspeech. Specifically, data were collected with the aim of observing counterspeech within the context of occurrence, i.e. collecting not only tweets in isolation, but conversation threads including a root tweet and the corresponding replies. Finally, we validated the corpus with cross-validation experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We developed the Counter-TWIT corpus made of tweets and replies collected from accounts that has been selected after a deep research based on shared contents. All the data collected have been annotated, by exploiting a web-based annotation platform developed roughly from the scratch and published online 7 , where a group of expert annotators were applying a novel multi-layer annotation scheme devoted to mark whether the tweets or replies were counterspeech, toxic speech or in support of counterspeech (layer 1). In case counterspeech was marked as present, users were asked to label the text as belonging to some typology of counterspeech for the sake of a deeper understanding of the phenomenon (Layer 2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Thus, the annotated corpus has been used for training the AlBERTo neural language model for performing a battery of binary classification task related to the detection of toxic, counterspeech, and support to counterspeech. We used this language model since it has been trained and developed using an Italian vocabulary instead of using other multilingual model that presented limitations to the type of language learned and the size of vocabulary (Polignano et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 447, |
| "end": 471, |
| "text": "(Polignano et al., 2019)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We executed two type of experiments: one using only the replies of conversation tree and the second with also the \"main\" tweet. This approach has been designed in order to go deep into the intuition that this classification task needs the context. Results show that performance, Recall in particular, improves when conversation context data are provided, and this supports the original hypothesis that counterspeech must be studied in a context, which is intuitive given the definition of counterspeech as second-turn intervention aimed to contrast a previous contribution (Cepollaro, 2021) , taken as reference definition in this work.", |
| "cite_spans": [ |
| { |
| "start": 573, |
| "end": 590, |
| "text": "(Cepollaro, 2021)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Finally, we performed a statistical and qualitative evaluation of the results obtained from the neural language model evaluating the number of data classified as not belonging to counterspeech class rather than being considered as such (False Positives data). We discovered that the model tends to confuse most with Irony and Hostility labels rather than Explicitation and suggestion to Alternative ones.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Given the promising preliminary results, we plan to expand the corpus in our future research. Furthermore, other qualitative analysis could be run by considering the correlation of types of counterspeech and the predictions made with a language model in order to understand in greater detail how the model behaves towards a specific counterspeech category. Indeed, annotating content as counterspeech is not an easy task, due to different shapes of the textual meaning based on the context and the language used. There is not a unique pattern to individuate and mark the tweet as belonging to a specific categories. A large annotated corpus will provide a more solid base for training the model in detecting counterspeech and, in possible future developments, for generating automatically counterspeech content in order to fight hate speech, which is another very interesting direction (Tekiroglu et al., 2020).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Counter-TWIT 8 is made available online to further study the phenomenon described and other issue related to counterspeech classification in Italian Twitter.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "http://hatemeter.eu/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Guidelines are available at https://github. com/pierpaologoffredo/Counter-TWIT/blob/ main/Readme.md (in Italian).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://thesiscounterspeech.altervista. org/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/pierpaologoffredo/ Counter-TWIT", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Survey article: Inter-coder agreement for computational linguistics", |
| "authors": [ |
| { |
| "first": "Ron", |
| "middle": [], |
| "last": "Artstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Massimo", |
| "middle": [], |
| "last": "Poesio", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Computational Linguistics", |
| "volume": "34", |
| "issue": "4", |
| "pages": "555--596", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/coli.07-034-R2" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ron Artstein and Massimo Poesio. 2008. Survey article: Inter-coder agreement for computational linguistics. Computational Linguistics, 34(4):555-596.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Massimo Poesio, and Alexandra Uma. 2021. We need to consider disagreement in evaluation", |
| "authors": [ |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Fell", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommaso", |
| "middle": [], |
| "last": "Fornaciari", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Silviu", |
| "middle": [], |
| "last": "Paun", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the 1st Workshop on Benchmarking: Past, Present and Future", |
| "volume": "", |
| "issue": "", |
| "pages": "15--21", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2021.bppf-1.3" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Valerio Basile, Michael Fell, Tommaso Fornaciari, Dirk Hovy, Silviu Paun, Barbara Plank, Massimo Poesio, and Alexandra Uma. 2021. We need to consider disagreement in evaluation. In Proceedings of the 1st Workshop on Benchmarking: Past, Present and Future, pages 15-21, Online. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Remedies to discriminatory contents: On and offline counterspeech. Talk at HaLO Workshop", |
| "authors": [ |
| { |
| "first": "Bianca", |
| "middle": [], |
| "last": "Cepollaro", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bianca Cepollaro. 2021. Remedies to discriminatory contents: On and offline counterspeech. Talk at HaLO Workshop.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "CONAN -COunter NArratives through nichesourcing: a multilingual dataset of responses to fight online hate speech", |
| "authors": [ |
| { |
| "first": "Yi-Ling", |
| "middle": [], |
| "last": "Chung", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizaveta", |
| "middle": [], |
| "last": "Kuzmenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Serra Sinem Tekiroglu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Guerini", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2819--2829", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1271" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi-Ling Chung, Elizaveta Kuzmenko, Serra Sinem Tekiroglu, and Marco Guerini. 2019. CONAN - COunter NArratives through nichesourcing: a mul- tilingual dataset of responses to fight online hate speech. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 2819-2829, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Italian counter narrative generation to fight online hate speech", |
| "authors": [ |
| { |
| "first": "Yi-Ling", |
| "middle": [], |
| "last": "Chung", |
| "suffix": "" |
| }, |
| { |
| "first": "Serra", |
| "middle": [], |
| "last": "Sinem Tekiroglu", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Guerini", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Seventh Italian Conference on Computational Linguistics, CLiC-it 2020", |
| "volume": "2769", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi-Ling Chung, Serra Sinem Tekiroglu, and Marco Guerini. 2020. Italian counter narrative generation to fight online hate speech. In Proceedings of the Seventh Italian Conference on Computational Lin- guistics, CLiC-it 2020, Bologna, Italy, March 1-3, 2021, volume 2769 of CEUR Workshop Proceedings. CEUR-WS.org.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Code of conduct on countering illegal hate speech online", |
| "authors": [], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "EU Commission. 2016. Code of conduct on countering illegal hate speech online.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Countering Online Hate Speech -UNESCO", |
| "authors": [ |
| { |
| "first": "Iginio", |
| "middle": [], |
| "last": "Gagliardone", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iginio Gagliardone. 2015. Countering Online Hate Speech -UNESCO. UNESCO Publishing.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A just and comprehensive strategy for using NLP to address online abuse", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Jurgens", |
| "suffix": "" |
| }, |
| { |
| "first": "Libby", |
| "middle": [], |
| "last": "Hemphill", |
| "suffix": "" |
| }, |
| { |
| "first": "Eshwar", |
| "middle": [], |
| "last": "Chandrasekharan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3658--3666", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1357" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Jurgens, Libby Hemphill, and Eshwar Chan- drasekharan. 2019. A just and comprehensive strat- egy for using NLP to address online abuse. In Pro- ceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 3658- 3666, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Blocking as counter-speech", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Langton", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "144--164", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/oso/9780198738831.003.0006" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Langton. 2018. Blocking as counter-speech, pages 144-164. Oxford Scholarship Online.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Hate speech in public discourse: A pessimistic defense of counterspeech", |
| "authors": [ |
| { |
| "first": "Maxime", |
| "middle": [], |
| "last": "Lepoutre", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Social Theory and Practice", |
| "volume": "43", |
| "issue": "4", |
| "pages": "851--883", |
| "other_ids": { |
| "DOI": [ |
| "10.5840/soctheorpract201711125" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maxime Lepoutre. 2017. Hate speech in public dis- course: A pessimistic defense of counterspeech. So- cial Theory and Practice, 43(4):851-883.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Analyzing the hate and counter speech accounts on twitter", |
| "authors": [ |
| { |
| "first": "Binny", |
| "middle": [], |
| "last": "Mathew", |
| "suffix": "" |
| }, |
| { |
| "first": "Navish", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Pawan", |
| "middle": [], |
| "last": "Ravina", |
| "suffix": "" |
| }, |
| { |
| "first": "Animesh", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mukherjee", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Binny Mathew, Navish Kumar, Ravina, Pawan Goyal, and Animesh Mukherjee. 2018. Analyzing the hate and counter speech accounts on twitter. CoRR, abs/1812.02712.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Thou shalt not hate: Countering online hate speech", |
| "authors": [ |
| { |
| "first": "Binny", |
| "middle": [], |
| "last": "Mathew", |
| "suffix": "" |
| }, |
| { |
| "first": "Punyajoy", |
| "middle": [], |
| "last": "Saha", |
| "suffix": "" |
| }, |
| { |
| "first": "Hardik", |
| "middle": [], |
| "last": "Tharad", |
| "suffix": "" |
| }, |
| { |
| "first": "Subham", |
| "middle": [], |
| "last": "Rajgaria", |
| "suffix": "" |
| }, |
| { |
| "first": "Prajwal", |
| "middle": [], |
| "last": "Singhania", |
| "suffix": "" |
| }, |
| { |
| "first": "Pawan", |
| "middle": [], |
| "last": "Suman Kalyan Maity", |
| "suffix": "" |
| }, |
| { |
| "first": "Animesh", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mukherjee", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the International AAAI Conference on Web and Social Media", |
| "volume": "13", |
| "issue": "", |
| "pages": "369--380", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Binny Mathew, Punyajoy Saha, Hardik Tharad, Subham Rajgaria, Prajwal Singhania, Suman Kalyan Maity, Pawan Goyal, and Animesh Mukherjee. 2019. Thou shalt not hate: Countering online hate speech. Pro- ceedings of the International AAAI Conference on Web and Social Media, 13(01):369-380.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Abuse is contextual, what about nlp? the role of context in abusive language annotation and detection", |
| "authors": [ |
| { |
| "first": "Stefano", |
| "middle": [], |
| "last": "Menini", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessio", |
| "middle": [], |
| "last": "Palmero Aprosio", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Tonelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefano Menini, Alessio Palmero Aprosio, and Sara Tonelli. 2021. Abuse is contextual, what about nlp? the role of context in abusive language annotation and detection. CoRR, abs/2103.14916.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Out of the echo chamber: Detecting countering debate speeches", |
| "authors": [ |
| { |
| "first": "Matan", |
| "middle": [], |
| "last": "Orbach", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Bilu", |
| "suffix": "" |
| }, |
| { |
| "first": "Assaf", |
| "middle": [], |
| "last": "Toledo", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Lahav", |
| "suffix": "" |
| }, |
| { |
| "first": "Michal", |
| "middle": [], |
| "last": "Jacovi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ranit", |
| "middle": [], |
| "last": "Aharonov", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Slonim", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.633" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matan Orbach, Yonatan Bilu, Assaf Toledo, Dan Lahav, Michal Jacovi, Ranit Aharonov, and Noam Slonim. 2020. Out of the echo chamber: Detecting coun- tering debate speeches. In Proceedings of the 58th", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Annual Meeting of the Association for Computational Linguistics", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "7073--7086", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Annual Meeting of the Association for Computational Linguistics, pages 7073-7086, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Resources and benchmark corpora for hate speech detection: a systematic review. Language Resources and Evaluation", |
| "authors": [ |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Poletto", |
| "suffix": "" |
| }, |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "55", |
| "issue": "", |
| "pages": "477--523", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/s10579-020-09502-8" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fabio Poletto, Valerio Basile, Manuela Sanguinetti, Cristina Bosco, and Viviana Patti. 2021. Resources and benchmark corpora for hate speech detection: a systematic review. Language Resources and Evalua- tion, 55(2):477-523.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "AlBERTo: Italian BERT Language Understanding Model for NLP Challenging Tasks Based on Tweets", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Polignano", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierpaolo", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "De Gemmis", |
| "suffix": "" |
| }, |
| { |
| "first": "Giovanni", |
| "middle": [], |
| "last": "Semeraro", |
| "suffix": "" |
| }, |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Sixth Italian Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Polignano, Pierpaolo Basile, Marco de Gem- mis, Giovanni Semeraro, and Valerio Basile. 2019. AlBERTo: Italian BERT Language Understanding Model for NLP Challenging Tasks Based on Tweets. In Proceedings of the Sixth Italian Conference on Computational Linguistics (CLiC-it 2019), volume 2481. CEUR.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Ideology and the persuasive use of presupposition. Language and ideology. Selected papers from the 6th International Pragmatics Conference", |
| "authors": [ |
| { |
| "first": "Marina", |
| "middle": [], |
| "last": "Sbis\u00e0", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "", |
| "volume": "1", |
| "issue": "", |
| "pages": "492--509", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marina Sbis\u00e0. 1999. Ideology and the persuasive use of presupposition. Language and ideology. Selected papers from the 6th International Pragmatics Con- ference, 1:492-509.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Generating counter narratives against online hate speech: Data and strategies", |
| "authors": [ |
| { |
| "first": "Yi-Ling", |
| "middle": [], |
| "last": "Serra Sinem Tekiroglu", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Chung", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Guerini", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1177--1190", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.110" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Serra Sinem Tekiroglu, Yi-Ling Chung, and Marco Guerini. 2020. Generating counter narratives against online hate speech: Data and strategies. In Proceed- ings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1177-1190, On- line. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Screenshot of the annotation interface of Counter-TWIT." |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Distribution of the Layer 1 labels over the Counter-TWIT corpus." |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Distribution of Layer 1 labels (root tweets and replies)." |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Distribution of the counterspeech typology labels over the Counter-TWIT corpus" |
| }, |
| "FIGREF4": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Intersection of counterspeech typology labels over the Counter-TWIT corpus (% refers to the total of tweets annotated as counterspeech)." |
| }, |
| "TABREF0": { |
| "content": "<table><tr><td>Label TOXIC SPEECH COUNTERSPEECH SUPPORT EXPLICITATION IRONY ALTERNATIVE</td><td>\u03b1 (tweets) \u03b1 (replies) 0.25 0.15 0.46 0.03 0.36 0.37 0.38 0.02 0.40 0.05 0.25 0.02</td></tr></table>", |
| "num": null, |
| "html": null, |
| "text": "Krippendorff's \u03b1 values for each label on tweets and replies.", |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "content": "<table><tr><td>explicitation</td><td>irony</td><td>alternative</td></tr><tr><td>0.09790</td><td>0.41364</td><td>0.46749</td></tr></table>", |
| "num": null, |
| "html": null, |
| "text": "Krippendorff's \u03b1 values for data considered counterspeech by all three annotators.", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "content": "<table><tr><td>Label COUNTERSPEECH TOXIC SUPPORT</td><td>Prec.(0) Rec.(0) F1 (0) Prec.(1) Rec.(1) F1 (1) Prec. (avg) Rec.(avg) F1 (avg) .960 .883 .920 .466 .730 .564 .713 .807 .742 .979 .840 .903 .037 .283 .065 .508 .561 .484 .922 .816 .865 .317 .544 .396 .620 .680 .630</td></tr></table>", |
| "num": null, |
| "html": null, |
| "text": "Model performance over three binary classification using reply text and root tweet for training. (0), (1), and (avg) refer respectively to positive class, negative class, and their macro-average.", |
| "type_str": "table" |
| } |
| } |
| } |
| } |