| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:11:48.633911Z" |
| }, |
| "title": "dhivya-hope-detection@LT-EDI-EACL2021: Multilingual Hope Speech Detection for Code-mixed and Transliterated Texts", |
| "authors": [ |
| { |
| "first": "Dhivya", |
| "middle": [], |
| "last": "Chinnappa", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "dhivya.infant@gmail.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper describe the shared task on hope speech detection. We present a unified framework to predict hope speech in the English, Tamil, and Malayalam datasets. Our mechanism follows a two phase approach to detect hope speech. In the first phase we build a classifier to identify the language of the text. In the second phase, we build a classifier to detect hope speech, non hope speech or not lang labels. Experimental results show that hope speech detection is challenging and there is scope for improvement.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper describe the shared task on hope speech detection. We present a unified framework to predict hope speech in the English, Tamil, and Malayalam datasets. Our mechanism follows a two phase approach to detect hope speech. In the first phase we build a classifier to identify the language of the text. In the second phase, we build a classifier to detect hope speech, non hope speech or not lang labels. Experimental results show that hope speech detection is challenging and there is scope for improvement.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Artificial Intelligence models are criticized for their bias against the protected classes (Rudinger et al., 2017; Davidson et al., 2019) . These biases are shown to arise from data or the model itself. There are several efforts taken to mitigate bias from the data and model perspectives (Park et al., 2018; Bender and Friedman, 2018; Mitchell et al., 2019) . Mozafari et al. (2020) present a bias alleviation mechanism evaluating the performance following a cross-domain approach.", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 114, |
| "text": "(Rudinger et al., 2017;", |
| "ref_id": null |
| }, |
| { |
| "start": 115, |
| "end": 137, |
| "text": "Davidson et al., 2019)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 289, |
| "end": 308, |
| "text": "(Park et al., 2018;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 309, |
| "end": 335, |
| "text": "Bender and Friedman, 2018;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 336, |
| "end": 358, |
| "text": "Mitchell et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 361, |
| "end": 383, |
| "text": "Mozafari et al. (2020)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Hope speech detection is the task of automatically detecting web content that may play a positive role in diffusing hostility on social media triggered by heightened political tensions during a conflict (Palakodety et al., 2020) . We hypothesize that hope speech detection datasets could be used in evaluating the aforementioned bias alleviation mechanisms. These datasets and mechanisms could help in building AI systems that are diverse and inclusive. Additionally, with divisiveness spread across social media platforms, identifying hope speech and enhancing them would help mitigate divisiveness and animosity. In this paper, we describe our approach on the hope detection shared task. We work with a multilingual corpora aiming to detect hope speech. We follow a two phase approach to accomplish the task. In the first phase, we identify the language of the input text. In the second phase we classify if the text is hope speech or not. We begin with describing the corpora, then analyze the datasets, explain the experimental setup, and finally discuss the results.", |
| "cite_spans": [ |
| { |
| "start": 203, |
| "end": 228, |
| "text": "(Palakodety et al., 2020)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We work with the hope speech detection corpora from Chakravarthi (2020). Unlike most other corpora that target English texts, this corpora focuses on diversity and inclusion including two dravidian languages apart from English. The corpora contains hope speech detection datasets in three languages (i) English, (ii) Tamil, and (iii) Malayalam.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The datasets are generated from YouTube comments, and manually labeled for the three labels hope speech, non hope speech, and not lang. The hope speech label and the non hope speech label are self-explanatory. The not lang label indicates that the YouTube comment does not belong to the specific language. That is, the datasets include not English, not Tamil, and not Malayalam instances depending on the language of the dataset they belong. This label becomes important as the Tamil and Malayalam dataset instances are generated by social media users who are usually bilingual (English and their mother tongue). Throughout this paper, we use the label not lang to refer that the text does not belong to the specific language. Table 1 presents examples of hope speech, non hope speech, and not lang from the three datasets.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 727, |
| "end": 734, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The English dataset includes 28,451 instances (hope: 2,484; non hope: 25,940; not lang: 27), the Tamil dataset includes 20,198 instances (hope: 7,899; non hope: 9,816; not lang: 2,483), and the Malayalam dataset includes 10,705 instances (hope: 2,052; non hope: 7,765; not lang: 888). Find more about the statistics of the corpora in the original paper (Chakravarthi, 2020).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Analyzing the datasets reveal that there are several Tamil and Malayalam instances that include code-mixed or English transliterated texts. For instance, the non hope example for Tamil dataset in Table 1 shows a combination of code-mixed (English and Tamil) and English transliteration of Tamil. It is common for bilingual speakers around the Indian subcontinent to use several English words when uttering a sentence in their mother tongue. Additionally, as several devices did not provide easy non-English typing in the beginning of the smart phone era, most users adapted to type English transliteration of non-English sentences. This phenomena is profoundly reflected in the Tamil and the Malayalam hope speech detection datasets. In case of the English dataset, this phenomena is uncommon and there was only 0.1% of not lang labels. We attribute these labels to the error caused by the language detector. Figure 1 presents wordclouds for the hope speech instances in the three datasets English, Tamil, and Malayalam. As we can see, the English hope wordcloud (left) has hopeful words like good, teach, etc. Interstingly, the Tamil wordcloud (center) is filled with English words and Tamil words translitered to English like pathukonga, neenga, etc. Tamil social media users tend to use code-mixed (Tamil and English) and English transliterations of Tamil texts, causing this behavior. In case of Malay- alam, the wordcloud(right) includes both English and Malayalam words. Figure 2 presents wordclouds for the non hope speech instances in the three datasets English, Tamil, and Malayalam. Unsurprisingly, the wordclouds for non hope speech instances include words with negative connotations. It is to be noted that Tamil social media users have more English influence than Malayalam social media users, despite both languages being Dravidian.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 196, |
| "end": 203, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 909, |
| "end": 917, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 1477, |
| "end": 1485, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "As the number of not lang labels are considerably less than (English: 0.1%, Tamil: 12%, Malayalam: 11%) the other two labels, we specifically target to identify not lang labels by building a dedicated classifier. Thus we follow a two-phase approach to detect hope speech. We argue a two-phase language identification approach might be helpful as the datasets include a not lang label despite corresponding to a specific language. In phase 1, we identify the language of the text using a language detector. In phase 2, we use the results from phase 1 in addition to other features, to identify hope speech or not using a hope detector. The architecture is described in Figure 1 ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 668, |
| "end": 676, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In this phase, we convert all hope speech and not-hope speech labels to lang labels, and keep the not lang labels as they are. Thus, we build a binary classifier using a feedforward neural network (FNN) to predict lang or not lang. We call this FNN as the language detector.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language detection", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The language detector takes as input (i) outputs from five language models (ii) probabilities from a vanilla feedforward network classifying lang and not langwith SBERT (Reimers and Gurevych, 2019) inputs, (iii) BERT (Devlin et al., 2019) inferences for lang and not lang.", |
| "cite_spans": [ |
| { |
| "start": 169, |
| "end": 197, |
| "text": "(Reimers and Gurevych, 2019)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language detection", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We use five language models that take text as input and return the language of the text. The language detectors used are Compact Language Detector 2(CLD2, 2015), Compact Language Detector 3(CLD3, 2020), langid (Lui and Baldwin, 2012) , textblob language detector (Loria, 2018) , and langdetect (Nakatani, 2010) . We use multiple language models rather than one language model improve the performance of the language detector. We follow the same approach for all language datasets including English, Tamil, and Malayalam. SBERT FNN. We generate SBERT embeddings for each input text and feed it to a vanilla feedforward network that predicts lang or not lang. The output probabilities are passed to the language detector FNN. For the English dataset we use the bert-base-nli-stsbmean-tokens model (Reimers and Gurevych, 2019) to generate SBERT embeddings. For Tamil and Malayalam datasets, we use the distiluse-base-multilingual-cased model (Sanh et al., 2019) . BERT inference. Here, we fine tune BERT models to predict lang or not lang obtaining BERT inferences. We hypothesize that hate speech models are useful in identifying hope speech detection, and use hate speech BERT models if present for a specific language. For English, we use the BERT models dehatebert-mono-english (Aluru et al., 2020) and twitter-roberta-base-hate (Barbieri et al., 2020) . For Tamil, we use the BERT models tamillion (Doiron, 2020) and bert-basemultilingual-uncased (Devlin et al., 2018) . For Malayalam we use the BERT model bertbase-multilingual-uncased (Devlin et al., 2018 ). Thus we have two BERT inferences results for English ( dehatebert-mono-english, twitterroberta-base-hate) and Tamil (tamillion, bertbase-multilingual-uncased), and one result for Malayalam (bert-base-multilingual-uncased).", |
| "cite_spans": [ |
| { |
| "start": 210, |
| "end": 233, |
| "text": "(Lui and Baldwin, 2012)", |
| "ref_id": null |
| }, |
| { |
| "start": 263, |
| "end": 276, |
| "text": "(Loria, 2018)", |
| "ref_id": null |
| }, |
| { |
| "start": 294, |
| "end": 310, |
| "text": "(Nakatani, 2010)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 795, |
| "end": 823, |
| "text": "(Reimers and Gurevych, 2019)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 939, |
| "end": 958, |
| "text": "(Sanh et al., 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 1279, |
| "end": 1299, |
| "text": "(Aluru et al., 2020)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1330, |
| "end": 1353, |
| "text": "(Barbieri et al., 2020)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1449, |
| "end": 1470, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1539, |
| "end": 1559, |
| "text": "(Devlin et al., 2018", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1616, |
| "end": 1668, |
| "text": "( dehatebert-mono-english, twitterroberta-base-hate)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language models.", |
| "sec_num": null |
| }, |
| { |
| "text": "The outputs from language models, probabilities from the SBERT vanilla FNN, and the BERT inferences make up the language module. The outputs from the language module is fed as an input to the language detector and the hope detector.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language models.", |
| "sec_num": null |
| }, |
| { |
| "text": "In this phase we predict the labels hope speech, non hope speech, or not lang using the hope detector. Similar to the language detector, the hope detector is a FNN that takes as input (i) outputs from the language module, (ii) outputs from the hope module, and (iii) probabilities from language detector. Outputs from the language module. The same ouputs from the language module as described in 4.1 is given as the input to the hope detector. SBERT FNN. This is similar to the SBERT FNN described in 4.1 except that it predicts hope speech, non hope speech, or not lang. BERT inference. This is also very similar to the SBERT inferences described in 4.1, except that the BERT models are finetuned to predict hope speech, non hope speech, or not lang.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hope detection", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "This SBERT FNN and BERT inference make up the hope module. We note that the probabilities from the SBERT FNN and BERT inference for language module and the hope module are different, as they were trained with different labels (language labels vs. hope labels).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hope detection", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We present test results for English, Tamil, and Malayalam datasets in Table 2 . Regarding the English dataset, the non hope speech labels achieve higher performance than hope speech labels (F1: .95 vs. .56). None of the not lang labels are predicted correctly. This poor performance for the not lang labels can be attributed to the imbalanced label distribution. There were only 3 not lang labels in the test set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 70, |
| "end": 77, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Regarding the Tamil dataset, the performance on all the three labels are comparable (F1: .52 vs. .66 vs. .54). Note that the Tamil dataset includes several code-mixed and transliterated texts. This phenomena can be attributed to why the classifier struggles in identifying the correct label.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Regarding the Malayalam dataset the performance of the label hope speech is worse than the others. Additionally, it is relatively easier to predict not lang labels in Malayalam.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The experimental results show detecting hope speech is difficult regardless of the language. Even in the English dataset where there are no transliterations or code-mixing, the classifier struggles. While we infer that hope detection is a difficult task, code-mixing and transliterations in Tamil and Malayalam increases the complexity of the problem. Chakravarthi and Muralidaran (2021) describe the results and techniques from the other participants of the hope speech detection shared task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "This paper describes the shared task on hope speech detection It targets to detect hope speech from a multilingual corpora. The corpora includes datasets in three languages English, Tamil, and Malayalam. First we conduct an analysis over the corpora finding codemix and transliterated texts in the Tamil and Malayalam datasets. Next, we build a two phase mechanism to identify hope speech. In the first phase we detect the language of the text. In the next phase, we classify the text into hope speech, non hope speech, or not lang. Finally, we discuss the results concluding that hope detection is a challenging task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Deep learning models for multilingual hate speech detection", |
| "authors": [ |
| { |
| "first": "Binny", |
| "middle": [], |
| "last": "Sai Saket Aluru", |
| "suffix": "" |
| }, |
| { |
| "first": "Punyajoy", |
| "middle": [], |
| "last": "Mathew", |
| "suffix": "" |
| }, |
| { |
| "first": "Animesh", |
| "middle": [], |
| "last": "Saha", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mukherjee", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2004.06465" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sai Saket Aluru, Binny Mathew, Punyajoy Saha, and Animesh Mukherjee. 2020. Deep learning models for multilingual hate speech detection. arXiv preprint arXiv:2004.06465.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "TweetEval: Unified benchmark and comparative evaluation for tweet classification", |
| "authors": [ |
| { |
| "first": "Francesco", |
| "middle": [], |
| "last": "Barbieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Jose", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "" |
| }, |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Espinosa Anke", |
| "suffix": "" |
| }, |
| { |
| "first": "Leonardo", |
| "middle": [], |
| "last": "Neves", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "1644--1650", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.findings-emnlp.148" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francesco Barbieri, Jose Camacho-Collados, Luis Espinosa Anke, and Leonardo Neves. 2020. TweetEval: Unified benchmark and comparative evaluation for tweet classification. In Findings of the Association for Computational Linguis- tics: EMNLP 2020, pages 1644-1650, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Data statements for natural language processing: Toward mitigating system bias and enabling better science", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [ |
| "M" |
| ], |
| "last": "Bender", |
| "suffix": "" |
| }, |
| { |
| "first": "Batya", |
| "middle": [], |
| "last": "Friedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "6", |
| "issue": "", |
| "pages": "587--604", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00041" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily M. Bender and Batya Friedman. 2018. Data statements for natural language processing: To- ward mitigating system bias and enabling bet- ter science. Transactions of the Association for Computational Linguistics, 6:587-604.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "HopeEDI: A multilingual hope speech detection dataset for equality, diversity, and inclusion", |
| "authors": [ |
| { |
| "first": "Chakravarthi", |
| "middle": [], |
| "last": "Bharathi Raja", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "41--53", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi. 2020. HopeEDI: A multilingual hope speech detection dataset for equality, diversity, and inclusion. In Proceedings of the Third Workshop on Computational Model- ing of People's Opinions, Personality, and Emo- tion's in Social Media, pages 41-53, Barcelona, Spain (Online). Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "2021. Findings of the shared task on Hope Speech Detection for Equality, Diversity, and Inclusion", |
| "authors": [ |
| { |
| "first": "Vigneshwaran", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Muralidaran", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi and Vigneshwaran Muralidaran. 2021. Findings of the shared task on Hope Speech Detection for Equality, Diver- sity, and Inclusion. In Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Compact language detector 2", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "CLD2. 2015. Compact language detector 2. https: //github.com/CLD2Owners/cld2.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Compact language detector 3", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "CLD3. 2020. Compact language detector 3. https: //github.com/google/cld3.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Racial bias in hate speech and abusive language detection datasets", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Davidson", |
| "suffix": "" |
| }, |
| { |
| "first": "Debasmita", |
| "middle": [], |
| "last": "Bhattacharya", |
| "suffix": "" |
| }, |
| { |
| "first": "Ingmar", |
| "middle": [], |
| "last": "Weber", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Third Workshop on Abusive Language Online", |
| "volume": "", |
| "issue": "", |
| "pages": "25--35", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-3504" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Davidson, Debasmita Bhattacharya, and Ingmar Weber. 2019. Racial bias in hate speech and abusive language detection datasets. In Pro- ceedings of the Third Workshop on Abusive Lan- guage Online, pages 25-35, Florence, Italy. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "BERT: pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: pre-training of deep bidirectional transformers for language understanding. CoRR, abs/1810.04805.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Con- ference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapo- lis, Minnesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "2012. langid.py: An off-the-shelf language identification tool", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Lui", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the ACL 2012 System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "25--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Lui and Timothy Baldwin. 2012. langid.py: An off-the-shelf language identification tool. In Proceedings of the ACL 2012 System Demonstra- tions, pages 25-30, Jeju Island, Korea. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Model cards for model reporting", |
| "authors": [ |
| { |
| "first": "Margaret", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Simone", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Zaldivar", |
| "suffix": "" |
| }, |
| { |
| "first": "Parker", |
| "middle": [], |
| "last": "Barnes", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucy", |
| "middle": [], |
| "last": "Vasserman", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Hutchinson", |
| "suffix": "" |
| }, |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Spitzer", |
| "suffix": "" |
| }, |
| { |
| "first": "Deborah", |
| "middle": [], |
| "last": "Inioluwa", |
| "suffix": "" |
| }, |
| { |
| "first": "Timnit", |
| "middle": [], |
| "last": "Raji", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gebru", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Conference on Fairness, Accountability, and Transparency", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3287560.3287596" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchin- son, Elena Spitzer, Inioluwa Deborah Raji, and Timnit Gebru. 2019. Model cards for model re- porting. Proceedings of the Conference on Fair- ness, Accountability, and Transparency.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Hate speech detection and racial bias mitigation in social media based on bert model", |
| "authors": [ |
| { |
| "first": "Marzieh", |
| "middle": [], |
| "last": "Mozafari", |
| "suffix": "" |
| }, |
| { |
| "first": "Reza", |
| "middle": [], |
| "last": "Farahbakhsh", |
| "suffix": "" |
| }, |
| { |
| "first": "Noel", |
| "middle": [], |
| "last": "Crespi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marzieh Mozafari, Reza Farahbakhsh, and Noel Crespi. 2020. Hate speech detection and racial bias mitigation in social media based on bert model.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Language detection library for java", |
| "authors": [ |
| { |
| "first": "Shuyo", |
| "middle": [], |
| "last": "Nakatani", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuyo Nakatani. 2010. Language detection library for java.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Hope speech detection: A computational analysis of the voice of peace", |
| "authors": [ |
| { |
| "first": "Shriphani", |
| "middle": [], |
| "last": "Palakodety", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashiqur", |
| "middle": [ |
| "R" |
| ], |
| "last": "Khudabukhsh", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [ |
| "G" |
| ], |
| "last": "Carbonell", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shriphani Palakodety, Ashiqur R. KhudaBukhsh, and Jaime G. Carbonell. 2020. Hope speech de- tection: A computational analysis of the voice of peace.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Reducing gender bias in abusive language detection", |
| "authors": [ |
| { |
| "first": "Ji", |
| "middle": [], |
| "last": "Ho Park", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamin", |
| "middle": [], |
| "last": "Shin", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2799--2804", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1302" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ji Ho Park, Jamin Shin, and Pascale Fung. 2018. Reducing gender bias in abusive language detec- tion. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Process- ing, pages 2799-2804, Brussels, Belgium. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Sentencebert: Sentence embeddings using siamese bertnetworks", |
| "authors": [ |
| { |
| "first": "Nils", |
| "middle": [], |
| "last": "Reimers", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- bert: Sentence embeddings using siamese bert- networks. In Proceedings of the 2019 Confer- ence on Empirical Methods in Natural Language", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Wordcloud generated from the hope speech instances of the English (left), Tamil (center), and Malayalam (right) datasets.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF1": { |
| "text": "Wordcloud generated from the non hope speech instances of the English (left), Tamil (center), and Malayalam (right) datasets. Note that the Tamil wordclouds consistently have English words.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF2": { |
| "text": "Architecture diagram describing the two phase hope detection process. The language detector identifies the language of the model, and the hope detector classifies the text into hope speech, non hope speech, or not lang.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF3": { |
| "text": ".", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "TABREF1": { |
| "html": null, |
| "text": "", |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "html": null, |
| "text": ".56 0.56 0.56 0.52 0.54 0.63 0.46 0.54 Non hope speech 0.96 0.96 0.96 0.61 0.66 0.63 0.85 0.92 0.88 Not lang 0.00 0.00 0.00 0.61 0.54 0.58 0.83 0.66 0.74 W. avg. 0.92 0.92 0.92 0.59 0.59 0.59 0.81 0.82 0.81", |
| "content": "<table><tr><td/><td colspan=\"2\">English</td><td/><td/><td>Tamil</td><td/><td/><td>Malayalam</td></tr><tr><td/><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td></tr><tr><td>Hope speech</td><td>0.56 0</td><td/><td/><td/><td/><td/><td/><td/></tr></table>", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "html": null, |
| "text": "Results obtained with hope speech identified from text (YouTube comments) across the three languages English, Tamil, and Malyalam.", |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |