| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:20:37.788532Z" |
| }, |
| "title": "ZYJ123@DravidianLangTech-EACL2021: Offensive Language Identification based on XLM-RoBERTa with DPCNN", |
| "authors": [ |
| { |
| "first": "Yingjia", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Yunnan University/Yunnan", |
| "location": { |
| "country": "P.R. China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Xin", |
| "middle": [], |
| "last": "Tao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Yunnan University / Yunnan", |
| "location": { |
| "country": "P.R. China" |
| } |
| }, |
| "email": "taoxinwy@126.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The development of online media platforms has given users more opportunities to post and comment freely, but the negative impact of offensive language has become increasingly apparent. It is very necessary for the automatic identification system of offensive language. This paper describes our work on the task of Offensive Language Identification in Dravidian language-EACL 2021. To complete this task, we propose a system based on the multilingual model XLM-Roberta and DPCNN. The test results on the official test data set confirm the effectiveness of our system. The weighted average F1-score of Kannada, Malayalam, and Tamil language are 0.69, 0.92, and 0.76 respectively, ranked 6th, 6th, and 3rd.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The development of online media platforms has given users more opportunities to post and comment freely, but the negative impact of offensive language has become increasingly apparent. It is very necessary for the automatic identification system of offensive language. This paper describes our work on the task of Offensive Language Identification in Dravidian language-EACL 2021. To complete this task, we propose a system based on the multilingual model XLM-Roberta and DPCNN. The test results on the official test data set confirm the effectiveness of our system. The weighted average F1-score of Kannada, Malayalam, and Tamil language are 0.69, 0.92, and 0.76 respectively, ranked 6th, 6th, and 3rd.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "With the development of the information society, people have become accustomed to uploading content on social media platforms in the form of text, pictures, or videos. At the same time, they also comment on the content uploaded by other users and interact with each other, thus increasing the activity of social media platforms Mahesan, 2019, 2020a,b) . Inevitably, however, some users will post offensive posts or comments. The use of offensive discourse is a kind of impolite phenomenon which has negative effects on the civilization of the network community (Chakravarthi, 2020) . It usually has the characteristics of causing conflicts and the purpose of publishing intentionally. The publisher of offensive language may use reproach, sarcasm, swear and other language means to achieve intentional offense, and express a variety of intentions, such as disturbing, provoking, and expressing negative emotions (Chakravarthi and Muralidaran, 2021; Suryawanshi and Chakravarthi, 2021) . Most people will take measures to respond to offensive words. The way to respond to the direct conflict of offensive words is mainly rhetorical questions, swear, sarcasm and threat, so as to express dissatisfaction, deny and satirize the other party and provoke the other party. This will further cause conflicts and destroy the harmony of the network environment.", |
| "cite_spans": [ |
| { |
| "start": 328, |
| "end": 351, |
| "text": "Mahesan, 2019, 2020a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 561, |
| "end": 581, |
| "text": "(Chakravarthi, 2020)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 912, |
| "end": 948, |
| "text": "(Chakravarthi and Muralidaran, 2021;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 949, |
| "end": 984, |
| "text": "Suryawanshi and Chakravarthi, 2021)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Many social media platforms use a content review process, in which human reviewers check users' comments for offensive language and other infractions, and which comments have been removed from the platform because of the violation (Mandl et al., 2020) . It is up to the moderator to decide which comments will be removed from the platform due to violations and which ones will be kept. As the number of network users increases and user activity increases, the manual approach is undoubtedly inefficient. Therefore, the automatic detection and identification of offensive content are very necessary. However, offensive words often depend on the emotions and psychology of the listener, and some seemingly innocuous words can be potentially offensive, and words that often seem offensive are watered down by the emotions of the listener. This kind of language phenomenon is not uncommon in real life, either unintentionally or deliberately used to achieve the speaker's expected purpose, which is a challenging work for the current detection system.", |
| "cite_spans": [ |
| { |
| "start": 231, |
| "end": 251, |
| "text": "(Mandl et al., 2020)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our team takes part in the shared task of Offensive Language Identification in Dravidian Languages-EACL 2021 Hande et al., 2020) . This is a classification task at the comment/post level. The goal of this task is to identify offensive language content of the code-mixed dataset of comments/posts in Dravidian Languages ( (Tamil-English, Malayalam-English, and Kannada-English)) collected from social media. Tamil language is the oldest language in Indian languages, Malayalam and Kannada evolved from Tamil language. For a comment on Youtube, the system must classify it into not-offensive, offensive-untargeted, offensive-targeted-individual, offensive-targeted-group, offensive-targeted-other, or not-in-indented-language.", |
| "cite_spans": [ |
| { |
| "start": 89, |
| "end": 108, |
| "text": "Languages-EACL 2021", |
| "ref_id": null |
| }, |
| { |
| "start": 109, |
| "end": 128, |
| "text": "Hande et al., 2020)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In our approach, the multilingual model XLM-RoBERTa and DPCNN are combined to carry out the classification task. This method can combine the advantages of the two models to achieve a better classification effect. The rest of the paper is divided into the following parts. In the second part, we introduce the relevant work in this field, which involves offensive language detection and text classification methods. In the third part, we introduce the model structure and the composition of our training data. The fourth part introduces our experimental setup and results. The fifth part is the conclusion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Due to the harm of offensive language to the network environment, the identification of offensive language has been carried out for a long time. Research so far has focused on automating the decision-making process in the form of supervised machine learning for classification tasks (Sun et al., 2019) . As far back as 2012, Chen et al. (2012) proposed a lexical syntactic feature (LSF) framework to detect offensive content in social media, distinguished the roles of derogatory/profane and obscenity in identifying offensive content, and introduced handwritten syntax rules to identify abusive harassment. In contrast to the start-to-end training model, Howard and Ruder (2018) proposed an effective transfer learning method, Universal Language Model Tuning (ULMFIT), which can be applied to any task in natural language processing, and has shown significant results on six text classification tasks. Subsequently, Abdellatif and Elgammal (2020) used the ULMFiT transfer learning method to train forward and backward models on Arabic datasets and ensemble the results to perform an offensive language detection task.", |
| "cite_spans": [ |
| { |
| "start": 283, |
| "end": 301, |
| "text": "(Sun et al., 2019)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 325, |
| "end": 343, |
| "text": "Chen et al. (2012)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 656, |
| "end": 679, |
| "text": "Howard and Ruder (2018)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 917, |
| "end": 947, |
| "text": "Abdellatif and Elgammal (2020)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Although English is currently one of the most commonly spoken languages in the world, work is ongoing to identify the offensive language in other languages that are less widely spoken. Pitenis et al. (2020) tested the performance of several traditional machine learning models and deep learning models on an offensive language dataset of Greek, and the best results were achieved with the attention model of LSTM and GRU. Ozdemir and Yeniterzi (2020) ensembled CNN-LSTM, BILSTM-Attention, and BERT three models, combined with pre-trained word embedding on Twitter to complete the identification task of offensive Turkish language, and achieved a good result.", |
| "cite_spans": [ |
| { |
| "start": 422, |
| "end": 450, |
| "text": "Ozdemir and Yeniterzi (2020)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A key challenge in automatically detecting hate speech on social media is to separate hate speech from other offensive languages. Davidson et al. (2017) used the crowd-sourced hate speech lexicon to collect tweets containing hate speech keywords. They trained a multi-class classifier to reliably distinguish hate speech from other offensive languages, and found that racist and homophobic tweets were more likely to be classified as hate speech, but sexist tweets were generally classified as offensive. Razavi et al. 2010proposed to extract features at different conceptual levels and apply multilevel classification for offensive language detection. The system leverages a variety of statistical models and rule-based patterns, combined with an auxiliary weighted pattern library, to improve accuracy by matching text with its graded entries. Pitsilis et al. 2018proposed the ensemble of a recursive neural network (RNN) classifier, which combines various characteristics related to user-related information, such as the user's sexist or racist tendencies, and was then fed to the classifier as input along with a word frequency vector derived from the text content.", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 152, |
| "text": "Davidson et al. (2017)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "When there is a large amount of labeled data, increasing the size and parameters of the model will definitely improve the performance of the model.However, when the amount of training is relatively small, the large-scale model may not be able to achieve good results, so solving the problem of model training under the condition of a small amount of target data has become a research hotspot. Sun et al. (2019) proposed a Hierarchical Attention Prototype Network (HAPN) for fewshot text classification, which designed multiple cross-concerns of a feature layer, word layer, and instance layer for the model to enhance the expressive power of semantic space. The model was validated on two standard reference text classification datasets, Fewrel and CSID. Prettenhofer and Stein (2010) built on structural correspondence learning, using untagged documents and simple word translation to induce task-specific, cross-language word correspondence. English was used as the source language and German, French, and Japanese were used as the target language to conduct the experiment in the field of cross-language sentiment classification. Using English data, Ranasinghe and Zampieri (2020) trained the model by applying cross-language contextual word embedding and transfer learning methods, and then predicted the effect of cross-language contextual embedding and transfer learning on this task in less resourceintensive languages such as Bengali, Hindi, and Spanish.", |
| "cite_spans": [ |
| { |
| "start": 393, |
| "end": 410, |
| "text": "Sun et al. (2019)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "3 Data and Methodology", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We count the number of each type of tag in the training set and the validation set, and obtain the data distribution of Not-offensive, offensive-untargeted, offensive-targeted-individual, offensive-targeted-group, offensive-targeted-other, and Not-in-indented-language in Tamil, Malayalam, and Kannada. as shown in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 315, |
| "end": 322, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data description", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Compared with the original BERT model, XLM-RoBERTa increases the number of languages and the number of training data sets. Specifically, a preprocessed CommonCrawl dataset of more than 2TB based on 100 languages is used to train crosslanguage representations in a self-supervised manner. This includes generating new unlabeled corpora for low-resource languages and expanding the amount of training data available for these languages by two orders of magnitude. In the finetuning period, the multi-language tagging data is used based on the ability of the multi-language model to improve the performance of the downstream tasks. This enables XLM-RoBERTa to achieve state-of-the-art results in cross-language benchmarks while exceeding the performance of the single-language BERT model for each language. Tune the parameters of the model to address cases where extending the model to more languages using cross-language migration limits the ability of the model to understand each language. The XLM-RoBERTa parameter changes include up-sampling of low-resource languages during training and vocabulary building, generating a larger shared vocabulary, and increasing the overall model to 550 million parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why XLM-RoBERTa", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In this task, we combined XLM-RoBERTa with DPCNN (Johnson and Zhang, 2017) to make the whole model more suitable for the downstream classification task. DPCNN(Deep Pyramid Convolutional Neural Networks) is a kind of deep word level CNN structure, the calculation amount of each layer of the structure decreases exponentially. DPCNN simply stacks the convolution module and negative sampling layer. The computation volume of the whole model is limited to less than two times the number of convolution blocks. At the same time, the pyramid structure also enables the model to discover long-term dependencies in the text. In a common classification task, the last hidden state of the first token of the sequence (CLS token), namely the original output of XLM-Roberta (Pooler output), is further processed through the linear layer and the tanh activation function for classification purposes. To obtain richer semantic information features of the model and improve the performance of the model, we first processed the output of the last three layers of XLM-RoBERTa through DPCNN, and then concatenate it with the original output of XLM-RoBERTa (Pooler output) to get a new and more effective feature vector, and then input this feature vector into the classifier for classification. As shown in Figure 1 .", |
| "cite_spans": [ |
| { |
| "start": 49, |
| "end": 74, |
| "text": "(Johnson and Zhang, 2017)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1291, |
| "end": 1299, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "XLM-RoBERTa with DPCNN", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In this experiment, the pre-training model I used was XLM-RoBERTa-base. After adding the DPCNN module, we began to set the experimental parameters. We set the learning rate as 2e-5, the maximum sequence length is 256, and the gradient steps are set to 4. The batch size is set to 32, as shown in table 2. In the training process, we used five-fold stratified cross-validation to make the proportion of data of each category in each subsample the same as that in the original data and finally obtained the optimal result through the voting (Onan et al., 2016) system, as shown in Figure 2 ", |
| "cite_spans": [ |
| { |
| "start": 539, |
| "end": 558, |
| "text": "(Onan et al., 2016)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 579, |
| "end": 587, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment setting", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "After the evaluation by the organizer, we obtained the weighted average F1-score in the three languages, as shown in table 3. Our team's F1-score is 0.69, ranked 6th place for the Kannada language. For the Malayalam language, our team's F1-score is 0.92 ranked 6th place, and for the Tamil language, our team's F1-score is 0.76 ranked 3rd place.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In this paper, we describe our system in the task of offensive language identification for Tamil, Malayalam, and Kannada language. In this model, the XLM-RoBERTa pre-training model is used to extract semantic information features of the text, and DPCNN is used to further process the output features. At the same time, the hierarchical crossvalidation method is used to improve the training effect. The final results show that our model achieves satisfactory performance. In future work, we will try to adjust the structure of the new model, so as to improve its effect more significantly.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Offensive language detection in arabic using ulmfit", |
| "authors": [ |
| { |
| "first": "Mohamed", |
| "middle": [], |
| "last": "Abdellatif", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "Elgammal", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 4th Workshop on Open-Source Arabic Corpora and Processing Tools, with a Shared Task on Offensive Language Detection", |
| "volume": "", |
| "issue": "", |
| "pages": "82--85", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohamed Abdellatif and Ahmed Elgammal. 2020. Of- fensive language detection in arabic using ulmfit. In Proceedings of the 4th Workshop on Open-Source Arabic Corpora and Processing Tools, with a Shared Task on Offensive Language Detection, pages 82-85.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "HopeEDI: A multilingual hope speech detection dataset for equality, diversity, and inclusion", |
| "authors": [ |
| { |
| "first": "Chakravarthi", |
| "middle": [], |
| "last": "Bharathi Raja", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "41--53", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi. 2020. HopeEDI: A mul- tilingual hope speech detection dataset for equality, diversity, and inclusion. In Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Me- dia, pages 41-53, Barcelona, Spain (Online). Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A sentiment analysis dataset for codemixed Malayalam-English", |
| "authors": [ |
| { |
| "first": "Navya", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Shardul", |
| "middle": [], |
| "last": "Jose", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Suryawanshi", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "Philip" |
| ], |
| "last": "Sherly", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mc-Crae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
| "volume": "", |
| "issue": "", |
| "pages": "177--184", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi, Navya Jose, Shardul Suryawanshi, Elizabeth Sherly, and John Philip Mc- Crae. 2020a. A sentiment analysis dataset for code- mixed Malayalam-English. In Proceedings of the 1st Joint Workshop on Spoken Language Technolo- gies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL), pages 177-184, Marseille, France. European Language Resources association.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Findings of the shared task on Hope Speech Detection for Equality, Diversity, and Inclusion", |
| "authors": [ |
| { |
| "first": "Vigneshwaran", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Muralidaran", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi and Vigneshwaran Mural- idaran. 2021. Findings of the shared task on Hope Speech Detection for Equality, Diversity, and Inclu- sion. In Proceedings of the First Workshop on Lan- guage Technology for Equality, Diversity and Inclu- sion. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Corpus creation for sentiment analysis in code-mixed Tamil-English text", |
| "authors": [ |
| { |
| "first": "Vigneshwaran", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Muralidaran", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "Philip" |
| ], |
| "last": "Priyadharshini", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mc-Crae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
| "volume": "", |
| "issue": "", |
| "pages": "202--210", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi, Vigneshwaran Murali- daran, Ruba Priyadharshini, and John Philip Mc- Crae. 2020b. Corpus creation for sentiment anal- ysis in code-mixed Tamil-English text. In Pro- ceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced lan- guages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL), pages 202-210, Marseille, France. European Language Re- sources association.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Findings of the shared task on Offensive Language Identification in Tamil, Malayalam, and Kannada", |
| "authors": [ |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Navya", |
| "middle": [], |
| "last": "Priyadharshini", |
| "suffix": "" |
| }, |
| { |
| "first": "Anand", |
| "middle": [], |
| "last": "Jose", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Mandl", |
| "suffix": "" |
| }, |
| { |
| "first": "Prasanna", |
| "middle": [], |
| "last": "Kumar Kumaresan", |
| "suffix": "" |
| }, |
| { |
| "first": "Rahul", |
| "middle": [], |
| "last": "Ponnusamy", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Hariharan", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Sherly", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "Philip" |
| ], |
| "last": "Mc-Crae", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi, Ruba Priyadharshini, Navya Jose, Anand Kumar M, Thomas Mandl, Prasanna Kumar Kumaresan, Rahul Ponnusamy, Hariharan V, Elizabeth Sherly, and John Philip Mc- Crae. 2021. Findings of the shared task on Offen- sive Language Identification in Tamil, Malayalam, and Kannada. In Proceedings of the First Workshop on Speech and Language Technologies for Dravid- ian Languages. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Detecting offensive language in social media to protect adolescent online safety", |
| "authors": [ |
| { |
| "first": "Ying", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yilu", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Sencun", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Heng", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "International Confernece on Social Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "71--80", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ying Chen, Yilu Zhou, Sencun Zhu, and Heng Xu. 2012. Detecting offensive language in social media to protect adolescent online safety. In 2012 Inter- national Conference on Privacy, Security, Risk and Trust and 2012 International Confernece on Social Computing, pages 71-80. IEEE.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Automated hate speech detection and the problem of offensive language", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Davidson", |
| "suffix": "" |
| }, |
| { |
| "first": "Dana", |
| "middle": [], |
| "last": "Warmsley", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Macy", |
| "suffix": "" |
| }, |
| { |
| "first": "Ingmar", |
| "middle": [], |
| "last": "Weber", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the International AAAI Conference on Web and Social Media", |
| "volume": "11", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Davidson, Dana Warmsley, Michael Macy, and Ingmar Weber. 2017. Automated hate speech detection and the problem of offensive language. In Proceedings of the International AAAI Conference on Web and Social Media, volume 11.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "KanCMD: Kannada CodeMixed dataset for sentiment analysis and offensive language detection", |
| "authors": [ |
| { |
| "first": "Adeep", |
| "middle": [], |
| "last": "Hande", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Priyadharshini", |
| "suffix": "" |
| }, |
| { |
| "first": "Bharathi Raja", |
| "middle": [], |
| "last": "Chakravarthi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "54--63", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adeep Hande, Ruba Priyadharshini, and Bharathi Raja Chakravarthi. 2020. KanCMD: Kannada CodeMixed dataset for sentiment analysis and offensive language detection. In Proceedings of the Third Workshop on Computational Modeling of Peo- ple's Opinions, Personality, and Emotion's in Social Media, pages 54-63, Barcelona, Spain (Online). Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Universal language model fine-tuning for text classification", |
| "authors": [ |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Howard", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1801.06146" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeremy Howard and Sebastian Ruder. 2018. Univer- sal language model fine-tuning for text classification. arXiv preprint arXiv:1801.06146.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Deep pyramid convolutional neural networks for text categorization", |
| "authors": [ |
| { |
| "first": "Rie", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Tong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "562--570", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rie Johnson and Tong Zhang. 2017. Deep pyramid convolutional neural networks for text categoriza- tion. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 562-570.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Overview of the HASOC Track at FIRE 2020: Hate Speech and Offensive Language Identification in Tamil", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Mandl", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandip", |
| "middle": [], |
| "last": "Modha", |
| "suffix": "" |
| }, |
| { |
| "first": "Anand", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Bharathi Raja Chakravarthi ;", |
| "middle": [], |
| "last": "Malayalam", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hindi", |
| "suffix": "" |
| }, |
| { |
| "first": "German", |
| "middle": [], |
| "last": "English", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Forum for Information Retrieval Evaluation", |
| "volume": "2020", |
| "issue": "", |
| "pages": "29--32", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3441501.3441517" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Mandl, Sandip Modha, Anand Kumar M, and Bharathi Raja Chakravarthi. 2020. Overview of the HASOC Track at FIRE 2020: Hate Speech and Offensive Language Identification in Tamil, Malay- alam, Hindi, English and German. In Forum for Information Retrieval Evaluation, FIRE 2020, page 29-32, New York, NY, USA. Association for Com- puting Machinery.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A multiobjective weighted voting ensemble classifier based on differential evolution algorithm for text sentiment classification", |
| "authors": [ |
| { |
| "first": "Aytug", |
| "middle": [], |
| "last": "Onan", |
| "suffix": "" |
| }, |
| { |
| "first": "Serdar", |
| "middle": [], |
| "last": "Korukoglu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hasan", |
| "middle": [], |
| "last": "Bulut", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Expert Systems with Applications", |
| "volume": "62", |
| "issue": "", |
| "pages": "1--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aytug Onan, Serdar Korukoglu, and Hasan Bulut. 2016. A multiobjective weighted voting ensemble classi- fier based on differential evolution algorithm for text sentiment classification. Expert Systems with Appli- cations, 62:1-16.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Su-nlp at semeval-2020 task 12: Offensive language identification in turkish tweets", |
| "authors": [ |
| { |
| "first": "Anil", |
| "middle": [], |
| "last": "Ozdemir", |
| "suffix": "" |
| }, |
| { |
| "first": "Reyyan", |
| "middle": [], |
| "last": "Yeniterzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fourteenth Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "2171--2176", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anil Ozdemir and Reyyan Yeniterzi. 2020. Su-nlp at semeval-2020 task 12: Offensive language identi- fication in turkish tweets. In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 2171-2176.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Zeses Pitenis, Marcos Zampieri, and Tharindu Ranasinghe. 2020. Offensive language identification in greek", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2003.07459" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeses Pitenis, Marcos Zampieri, and Tharindu Ranas- inghe. 2020. Offensive language identification in greek. arXiv preprint arXiv:2003.07459.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Detecting offensive language in tweets using deep learning", |
| "authors": [ |
| { |
| "first": "Heri", |
| "middle": [], |
| "last": "Georgios K Pitsilis", |
| "suffix": "" |
| }, |
| { |
| "first": "Helge", |
| "middle": [], |
| "last": "Ramampiaro", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Langseth", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1801.04433" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Georgios K Pitsilis, Heri Ramampiaro, and Helge Langseth. 2018. Detecting offensive language in tweets using deep learning. arXiv preprint arXiv:1801.04433.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Crosslanguage text classification using structural correspondence learning", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Prettenhofer", |
| "suffix": "" |
| }, |
| { |
| "first": "Benno", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th annual meeting of the association for computational linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1118--1127", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Prettenhofer and Benno Stein. 2010. Cross- language text classification using structural corre- spondence learning. In Proceedings of the 48th an- nual meeting of the association for computational linguistics, pages 1118-1127.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Multilingual offensive language identification with cross-lingual embeddings", |
| "authors": [ |
| { |
| "first": "Tharindu", |
| "middle": [], |
| "last": "Ranasinghe", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2010.05324" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tharindu Ranasinghe and Marcos Zampieri. 2020. Multilingual offensive language identification with cross-lingual embeddings. arXiv preprint arXiv:2010.05324.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Offensive language detection using multi-level classification", |
| "authors": [ |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Amir H Razavi", |
| "suffix": "" |
| }, |
| { |
| "first": "Sasha", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| }, |
| { |
| "first": "Stan", |
| "middle": [], |
| "last": "Uritsky", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Matwin", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Canadian Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "16--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amir H Razavi, Diana Inkpen, Sasha Uritsky, and Stan Matwin. 2010. Offensive language detection using multi-level classification. In Canadian Conference on Artificial Intelligence, pages 16-27. Springer.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Hierarchical attention prototypical networks for few-shot text classification", |
| "authors": [ |
| { |
| "first": "Shengli", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Qingfeng", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Tengchao", |
| "middle": [], |
| "last": "Lv", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "476--485", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shengli Sun, Qingfeng Sun, Kevin Zhou, and Tengchao Lv. 2019. Hierarchical attention prototypical net- works for few-shot text classification. In Proceed- ings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Inter- national Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 476-485.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Findings of the shared task on Troll Meme Classification in Tamil", |
| "authors": [ |
| { |
| "first": "Shardul", |
| "middle": [], |
| "last": "Suryawanshi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shardul Suryawanshi and Bharathi Raja Chakravarthi. 2021. Findings of the shared task on Troll Meme Classification in Tamil. In Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Sentiment Analysis in Tamil Texts: A Study on Machine Learning Techniques and Feature Representation", |
| "authors": [ |
| { |
| "first": "Sajeetha", |
| "middle": [], |
| "last": "Thavareesan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sinnathamby", |
| "middle": [], |
| "last": "Mahesan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "2019 14th Conference on Industrial and Information Systems (ICIIS)", |
| "volume": "", |
| "issue": "", |
| "pages": "320--325", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICIIS47346.2019.9063341" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sajeetha Thavareesan and Sinnathamby Mahesan. 2019. Sentiment Analysis in Tamil Texts: A Study on Machine Learning Techniques and Feature Rep- resentation. In 2019 14th Conference on Industrial and Information Systems (ICIIS), pages 320-325.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Sentiment Lexicon Expansion using Word2vec and fastText for Sentiment Prediction in Tamil texts", |
| "authors": [ |
| { |
| "first": "Sajeetha", |
| "middle": [], |
| "last": "Thavareesan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sinnathamby", |
| "middle": [], |
| "last": "Mahesan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "2020 Moratuwa Engineering Research Conference (MERCon)", |
| "volume": "", |
| "issue": "", |
| "pages": "272--276", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/MERCon50084.2020.9185369" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sajeetha Thavareesan and Sinnathamby Mahesan. 2020a. Sentiment Lexicon Expansion using Word2vec and fastText for Sentiment Prediction in Tamil texts. In 2020 Moratuwa Engineering Re- search Conference (MERCon), pages 272-276.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Word embedding-based Part of Speech tagging in Tamil texts", |
| "authors": [ |
| { |
| "first": "Sajeetha", |
| "middle": [], |
| "last": "Thavareesan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sinnathamby", |
| "middle": [], |
| "last": "Mahesan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "2020 IEEE 15th International Conference on Industrial and Information Systems (ICIIS)", |
| "volume": "", |
| "issue": "", |
| "pages": "478--482", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICIIS51140.2020.9342640" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sajeetha Thavareesan and Sinnathamby Mahesan. 2020b. Word embedding-based Part of Speech tag- ging in Tamil texts. In 2020 IEEE 15th International Conference on Industrial and Information Systems (ICIIS), pages 478-482.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": ".", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "num": null, |
| "text": "Schematic overview of the architecture of our model", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "num": null, |
| "text": "Voting system", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF1": { |
| "html": null, |
| "content": "<table/>", |
| "text": "Train and Validation datasets description.", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "html": null, |
| "content": "<table><tr><td/><td colspan=\"3\">Kan Mal Tam</td></tr><tr><td colspan=\"4\">Best F1-score 0.75 0.97 0.78</td></tr><tr><td colspan=\"4\">Our Precision 0.65 0.91 0.75</td></tr><tr><td>Our Recall</td><td colspan=\"3\">0.74 0.94 0.77</td></tr><tr><td colspan=\"4\">Our F1-score 0.69 0.92 0.76</td></tr><tr><td>Rank</td><td>6</td><td>6</td><td>3</td></tr></table>", |
| "text": "Details of the parameters", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "html": null, |
| "content": "<table/>", |
| "text": "", |
| "num": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |