| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:12:40.329373Z" |
| }, |
| "title": "MUCS@LT-EDI-EACL2021:CoHope-Hope Speech Detection for Equality, Diversity, and Inclusion in Code-Mixed Texts", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Balouchzahi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Mangalore University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "B", |
| "middle": [ |
| "K" |
| ], |
| "last": "Aparna", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Mangalore University", |
| "location": {} |
| }, |
| "email": "aparnabk14@gmail.com" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "L" |
| ], |
| "last": "Shashirekha", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Mangalore University", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes the models submitted by the team MUCS for \"Hope Speech Detection for Equality, Diversity, and Inclusion-EACL 2021\" shared task that aims at classifying a comment / post in English and code-mixed texts in two language pairs, namely, Tamil-English (Ta-En) and Malayalam-English (Ma-En) into one of the three predefined categories, namely, \"Hope speech\", \"Non hope speech\", and \"other languages\". Three models namely, CoHope-ML, CoHope-NN, and CoHope-TL based on Ensemble of classifiers, Keras Neural Network (NN) and BiLSTM with Conv1d model respectively are proposed for the shared task. CoHope-ML, CoHope-NN models are trained on a feature set comprised of char sequences extracted from sentences combined with words for Ma-En and Ta-En code-mixed texts and a combination of word and char ngrams along with syntactic word ngrams for English text. CoHope-TL model consists of three major parts: training tokenizer, BERT Language Model (LM) training and then using pre-trained BERT LM as weights in BiLSTM-Conv1d model. Out of three proposed models, CoHope-ML model (best among our models) obtained 1st, 2nd, and 3rd ranks with weighted F1-scores of 0.85, 0.92, and 0.59 for Ma-En, English and Ta-En texts respectively.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes the models submitted by the team MUCS for \"Hope Speech Detection for Equality, Diversity, and Inclusion-EACL 2021\" shared task that aims at classifying a comment / post in English and code-mixed texts in two language pairs, namely, Tamil-English (Ta-En) and Malayalam-English (Ma-En) into one of the three predefined categories, namely, \"Hope speech\", \"Non hope speech\", and \"other languages\". Three models namely, CoHope-ML, CoHope-NN, and CoHope-TL based on Ensemble of classifiers, Keras Neural Network (NN) and BiLSTM with Conv1d model respectively are proposed for the shared task. CoHope-ML, CoHope-NN models are trained on a feature set comprised of char sequences extracted from sentences combined with words for Ma-En and Ta-En code-mixed texts and a combination of word and char ngrams along with syntactic word ngrams for English text. CoHope-TL model consists of three major parts: training tokenizer, BERT Language Model (LM) training and then using pre-trained BERT LM as weights in BiLSTM-Conv1d model. Out of three proposed models, CoHope-ML model (best among our models) obtained 1st, 2nd, and 3rd ranks with weighted F1-scores of 0.85, 0.92, and 0.59 for Ma-En, English and Ta-En texts respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The recent wave of using social media especially during the outbreak of Covid-19 has increasingly affected the amount of user-generated data and text over the internet that has provided immense opportunities in automated text analysis and Computational Linguistics (Bohra et al., 2018) . Most of tools and systems to analyze social media texts are designed to handle them in their native script. However, social media texts are often code-mixed, i.e., written in Roman script mixing English words rather than in the native script of language due to difficulty in using tools provided to pen the comments in native script (Jose et al., 2020; Priyadharshini et al., 2020) . Further, users may prefer using Roman scripts even though the language has its own standardized written form and script (Sitaram and Black, 2016) . The analysis of Romanized and code-mixed texts is more challenging task compared to analysis of texts in native scripts because of the inconsistent Romanization conventions and non-standard grammars in code-mixed texts (Riyadh and Kondrak, 2019) .", |
| "cite_spans": [ |
| { |
| "start": 265, |
| "end": 285, |
| "text": "(Bohra et al., 2018)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 621, |
| "end": 640, |
| "text": "(Jose et al., 2020;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 641, |
| "end": 669, |
| "text": "Priyadharshini et al., 2020)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 792, |
| "end": 817, |
| "text": "(Sitaram and Black, 2016)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1039, |
| "end": 1065, |
| "text": "(Riyadh and Kondrak, 2019)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Hope speech detection is defined as analysis and detection of inspirational talk and comments/posts with positive vibes, etc. against people with not straight desires such as Lesbian, Gay, and Transgender or positive suggestion for Covid-19 guidelines, etc. (Chakravarthi, 2020) . Even though a couple of studies and workshops are focused on analyzing code-mixed texts in tasks such as Sentiments Analysis (SA) and Offensive Language Identification (OLI) it has been rarely experimented on Hope Speech Detection even in native scripts. In this direction, the \"Hope Speech Detection for Equality, Diversity, and Inclusion\" 1 shared task aims at classifying a comment/post in English and code-mixed texts in two language pairs, namely, Tamil-English (Ta-En) and Malayalam-English (Ma-En) into one of the three predefined categories, namely, \"Hope speech\", \"Non hope speech\", and \"other languages\". The details of the datasets provided by organizers are given in (Chakravarthi, 2020) .", |
| "cite_spans": [ |
| { |
| "start": 232, |
| "end": 278, |
| "text": "Covid-19 guidelines, etc. (Chakravarthi, 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 960, |
| "end": 980, |
| "text": "(Chakravarthi, 2020)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we, team MUCS describe the three models CoHope-ML, CoHope-NN and CoHope-TL submitted for \"Hope Speech Detection for Equality, Diversity, and Inclusion\" shared task. The char sequences extracted from sentences com-bined with words in the sentences are used to train CoHope-ML and CoHope-NN models for codemixed Ma-En and Ta-En texts whereas a combination of char and word ngrams along with syntactic ngrams are used to train the same models for English texts. CoHope-TL model is comprised of three major steps: (i) training tokenizer, (ii) training BERT LM using raw texts from Dakshina Dataset 2 [5], for Ma-En and Ta-En code mixed texts and pre-trained BERT LM from Kaggle 3 for English, and (iii) transferring obtained weights and building BiLSTM-Conv1d model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of paper is organized as follows: while Section 2 describes the recent literature on codemixed text processing, Section 3 focuses on the description of the models submitted to the shared task followed by experiments and results in Section 4. Conclusion and future plans are included in Section 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Researchers have developed a vast range of datasets, tools and models for Text Classification (TC). However, comparatively very less work has been done on the classification of code-mixed texts and the available literature focus on SA and OLI tasks for several languages pairs. Hope Speech detection is a new challenge that has been explored rarely. Some of recent studies on TC tasks for code-mixed texts are given below:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "(Chakravarthi et al., 2020b) presents an overview of OLI shared task on code-mixed texts in Dravidian languages 4 consisting of two subtasks A and B to classify a given text into \"offensive\" or \"not-offensive\" categories. While Subtask A is to classify code-mixed Ma-En YouTube comments, SubTask B is to classify Romanized Malayalam and Romanized Tamil texts from YouTube or Twitter comments. Datasets used in this shared tasks are described in (Chakravarthi et al., 2020c) and (Chakravarthi et al., 2020a) . Two models based on different configurations of LSTM proposed by (Renjit and Idicula, 2020 ) for the OLI shared task obtained a weighted F1-score of 0.53 for Romanized Malayalam text in Subtask B. A Universal LM has been trained for Ma-En code-mixed texts from Wikipedia articles in native script combined with translated and transliterated versions by (Arora, 2020). The authors transferred the obtained LM to TC model from fastai library to classify code-mixed texts in Ma-En and obtained 0.91, 0.74 weighted F1-score for Subtask A and Romanized Malayalam text of Subtask B respectively.", |
| "cite_spans": [ |
| { |
| "start": 445, |
| "end": 473, |
| "text": "(Chakravarthi et al., 2020c)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 478, |
| "end": 506, |
| "text": "(Chakravarthi et al., 2020a)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 574, |
| "end": 599, |
| "text": "(Renjit and Idicula, 2020", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\"Sentiment Analysis of Dravidian Languages in Code-Mixed Text\" 5 which focuses on SA of code-mixed texts in Ta-En and Ma-En language pairs (Chakravarthi et al., 2020d) is another shared task on Dravidian languages. Datasets described in (Chakravarthi et al., 2020c) and (Chakravarthi et al., 2020a) are used in this shared task and they include five categories, namely, \"Positive\", \"Negative\", \"Unknown state\", \"Mixed-Feelings\", and \"Other languages\" for each language pairs. The overall results of this shared task reported in leaderboard illustrates that XLM-Roberta model proposed by (Sun and Zhou, 2020 ) with a weighted F1-score of 0.65 and 0.74 for Ta-En and Ma-En language pairs respectively obtained first rank for both subtasks. The proposed XLM-Roberta model uses extracted output of Convolution Neural Networks (CNN) which enables it to utilize the semantic information from texts. Another XLM-Roberta model proposed by (Ou and Li, 2020) ensembles pre-trained multi-language models and K-folding method to classify code-mixed texts. The proposed model with 0.63 and 0.74 weighted F1-score obtained third and first ranks on Ta-En and Ma-En language pairs respectively.", |
| "cite_spans": [ |
| { |
| "start": 237, |
| "end": 265, |
| "text": "(Chakravarthi et al., 2020c)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 270, |
| "end": 298, |
| "text": "(Chakravarthi et al., 2020a)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 587, |
| "end": 606, |
| "text": "(Sun and Zhou, 2020", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 931, |
| "end": 948, |
| "text": "(Ou and Li, 2020)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The proposed models are described in terms of feature engineering to extract the required features followed by description of the classifiers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Framework of the proposed methodology for CoHope-ML and CoHope-NN consists of a step of preprocessing the train and test data followed by feature engineering module to extract features and use them to train and test the models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Engineering", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Preprocessing steps includes converting emojis to corresponding text (using emoji library 6 ), removing punctuations, words of length less than 2, unwanted characters (such as !()-[];:'\" \u00a1\u00bf./?$=% +@* ', etc.) and converting text to lowercase.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Engineering", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Feature engineering module uses everygrams 7", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Engineering", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Input text Extracted features \"yuvanvera level ya.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Engineering", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "(in Ta-En) yu , uv, va, an, n , v, ve, er, ra, a , l, le, ev, ve, el, l , y, ya, yuv, uva,van, an , ve, ver, era, ra , le, lev, eve, vel, el , ya, yuva, uvan, van , ver, vera, era , lev, leve, evel, vel , yuvan, uvan , vera, vera , leve, level, evel , yuvan , vera , level, level ,yuvanvera, level, ya Tables 1 and 2 give samples of input texts and features extracted from the corresponding texts.", |
| "cite_spans": [ |
| { |
| "start": 14, |
| "end": 301, |
| "text": ", uv, va, an, n , v, ve, er, ra, a , l, le, ev, ve, el, l , y, ya, yuv, uva,van, an , ve, ver, era, ra , le, lev, eve, vel, el , ya, yuva, uvan, van , ver, vera, era , lev, leve, evel, vel , yuvan, uvan , vera, vera , leve, level, evel , yuvan , vera , level, level ,yuvanvera, level, ya", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 4, |
| "end": 10, |
| "text": "Ta-En)", |
| "ref_id": null |
| }, |
| { |
| "start": 302, |
| "end": 317, |
| "text": "Tables 1 and 2", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Feature Engineering", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The proposed models are described below:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models Description", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "There are various notions of ensemble learning such as bagging, stacking, etc. Due to simplicity and efficiency of bagging method, CoHope-ML model is developed as a hard voting classifier based on bagging by ensembling three sklearn 9 classifiers, Logistic Regression (LR), eXtreme Gradient Boosting (XGB) (Chen and Guestrin, 2016) and Multi-Layer Perceptron (MLP) 10 . Idea behind ensembling simple classifiers as estimators is to build a robust classifier utilizing the strength of each classifier. Parameters used for each estimator are given in Table 3 . CoHope-ML model is trained on TFIDF vectors obtained in feature engineering module. The framework of CoHope-ML is shown in Figure 1 . The steps involved in designing CoHope model are described below:", |
| "cite_spans": [ |
| { |
| "start": 306, |
| "end": 331, |
| "text": "(Chen and Guestrin, 2016)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 549, |
| "end": 556, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 682, |
| "end": 690, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "CoHope-ML", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "Training Tokenizer: Romanized text from Dakshina dataset (Roark et al., 2020) combined with code-mixed texts from (Chakravarthi et al., 2020c) and (Chakravarthi et al., 2020a) are preprocessed and used to train a byte-level Byte-pair encoding tokenizer 12 with a vocab size of 52000 words and min frequency of 2 (separately for each language pairs Ma-En and Ta-En). The resulting tokenizer is later used in training BERT LM.", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 77, |
| "text": "(Roark et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 114, |
| "end": 142, |
| "text": "(Chakravarthi et al., 2020c)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 147, |
| "end": 175, |
| "text": "(Chakravarthi et al., 2020a)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CoHope-ML", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "Training BERT LM: BERT LM is trained using the trained tokenizer and raw texts used in previous step and transformers library 13 with following configurations:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CoHope-ML", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "\u2022 vocab size=52 000", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CoHope-ML", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "\u2022 max position embeddings=514", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CoHope-ML", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "\u2022 num attention heads=12", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CoHope-ML", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "\u2022 num hidden layers=6", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CoHope-ML", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "\u2022 type vocab size=1 11 https://keras.io/ 12 https://huggingface.co/transformers/ tokenizer summary.html 13 https://pypi.org/project/transformers/ Table 4 gives summary of the layers in BiLSTM-Conv1D model and the frame work of CoHope-TL is shown in Figure 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 146, |
| "end": 153, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 249, |
| "end": 257, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "CoHope-ML", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "Datasets used in this study include unannotated Romanized text from Dakshina (Roark et al., 2020) combined with texts from (Chakravarthi et al., 2020c), (Chakravarthi et al., 2020a) and annotated datasets provided by organizers which are Figure 4 . It can be observed that MaCo code-mixed texts are noticeably less than TaCo code-mixed texts. Annotated datasets include two code-mixed datasets Ta-En and Ma-En along with English datasets. Texts in the datasets for each Language Pairs (LP) are distributed in three categories namely, \"Hope speech (HS)\", \"Non hope speech (NO)\", and \"other languages (OL)\". Statistics of labels distribution in train, development (Dev.) and test sets and given in Table 5 . It can be observed that as Ma-En code-mixed texts include significant number of samples in Malayalam native script and English text includes more samples, the proposed models are expected to perform better for Ma-En code-mixed texts and English texts compared to Ta-En code-mixed texts.", |
| "cite_spans": [ |
| { |
| "start": 77, |
| "end": 97, |
| "text": "(Roark et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 238, |
| "end": 247, |
| "text": "Figure 4", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 697, |
| "end": 704, |
| "text": "Table 5", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Out of three proposed models, the results reported by organizers in leaderboard obtained 1st, 2nd, and 3rd ranks for Ma-En, English and Ta-En texts respectively for CoHope-ML model (best among our models). Comparison of weighted scores of all the Table 6 . As it is illustrated in Table 6 , both CoHope-ML and CoHope-NN models utilizing char sequences, traditional n-grams and syntactic ngrams features outperformed the CoHope-TL model. The results also illustrate that models performed better for texts with more native scripts.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 247, |
| "end": 254, |
| "text": "Table 6", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 281, |
| "end": 288, |
| "text": "Table 6", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "The Confusion Matrix (CM) for Ma-En, Ta-En, and English texts using CoHope-ML model are shown in Figures 5, 6 and 7 respectively. The confusion matrices illustrates that CoHope-ML model rarely gets confused between other languages and the intended language in Malayalam and English since both datasets are having significant number of samples in native scripts.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 97, |
| "end": 109, |
| "text": "Figures 5, 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "In this paper we, team MUCS, present the description of three proposed models for the task of \"Hope Speech Detection for Equality, Diversity, Figure 5 : CM for Ma-En texts using CoHope-ML model Figure 6 : CM for Ta-En texts using CoHope-ML model Figure 7 : CM for English texts using CoHope-ML model and Inclusion-EACL 2021\". Proposed models includes a ML voting classifier -CoHope-ML, a DL NN model -CoHope-NN and a TL based model -CoHope-TL. The first two models are trained on a combination of char sequences and words for Ta-En and Ma-En code-mixed texts and combination of traditional char and word ngrams with syntactic word ngrams for English. CoHope-TL model utilizes BERT LM as weights in a BiLSTM-Conv1D architecture. Out of three proposed models, CoHope-ML model (best among our models) obtained weighted F1-scores of 0.85, 0.92 and 0.59 and 1, 2, 3 ranks for Malayalam-English, English, and Tamil-English texts. As future work, we planned to explore syntactic ngrams features for code-mixed texts and improve CoHope-NN architecture by experimenting on different NN layers and configurations. We also would like to compare different approaches based on TL for code-mixed texts from low resource languages.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 142, |
| "end": 150, |
| "text": "Figure 5", |
| "ref_id": null |
| }, |
| { |
| "start": 194, |
| "end": 202, |
| "text": "Figure 6", |
| "ref_id": null |
| }, |
| { |
| "start": 246, |
| "end": 254, |
| "text": "Figure 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "https://sites.google.com/view/lt-edi-2021", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/google-research-datasets/dakshina 3 https://www.kaggle.com/christofhenkel/pytorchpretrainedbert 4 https://competitions.codalab.org/competitions/ 25295#learn the details", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://dravidian-codemix.github.io/2020/index.html 6 https://pypi.org/project/emoji/ 7 https://www.kite.com/python/docs/nltk.everygrams", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://pypi.org/project/SNgramExtractor/ 9 https://scikit-learn.org/stable/ 10 https://scikit-learn.org/stable/modules/ neural networks supervised.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Gaurav Arora. 2020. Gauravarora@ HASOC-Dravidian-CodeMix-FIRE2020: Pre-training ULM-FiT on Synthetically Generated Code-Mixed Data for Hate Speech Detection", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2010.02094" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gaurav Arora. 2020. Gauravarora@ HASOC- Dravidian-CodeMix-FIRE2020: Pre-training ULM- FiT on Synthetically Generated Code-Mixed Data for Hate Speech Detection. arXiv preprint arXiv:2010.02094.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A dataset of Hindi-English code-mixed social media text for hate speech detection", |
| "authors": [ |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Bohra", |
| "suffix": "" |
| }, |
| { |
| "first": "Deepanshu", |
| "middle": [], |
| "last": "Vijay", |
| "suffix": "" |
| }, |
| { |
| "first": "Vinay", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Manish", |
| "middle": [], |
| "last": "Syed Sarfaraz Akhtar", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Shrivastava", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the second workshop on computational modeling of people's opinions, personality, and emotions in social media", |
| "volume": "", |
| "issue": "", |
| "pages": "36--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aditya Bohra, Deepanshu Vijay, Vinay Singh, Syed Sarfaraz Akhtar, and Manish Shrivastava. 2018. A dataset of Hindi-English code-mixed social media text for hate speech detection. In Proceedings of the second workshop on computational modeling of people's opinions, personality, and emotions in social media, pages 36-41.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "HopeEDI: A multilingual hope speech detection dataset for equality, diversity, and inclusion", |
| "authors": [ |
| { |
| "first": "Chakravarthi", |
| "middle": [], |
| "last": "Bharathi Raja", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "41--53", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi. 2020. HopeEDI: A mul- tilingual hope speech detection dataset for equality, diversity, and inclusion. In Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Me- dia, pages 41-53, Barcelona, Spain (Online). Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A sentiment analysis dataset for codemixed Malayalam-English", |
| "authors": [ |
| { |
| "first": "Navya", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Shardul", |
| "middle": [], |
| "last": "Jose", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Suryawanshi", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "Philip" |
| ], |
| "last": "Sherly", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mc-Crae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
| "volume": "", |
| "issue": "", |
| "pages": "177--184", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi, Navya Jose, Shardul Suryawanshi, Elizabeth Sherly, and John Philip Mc- Crae. 2020a. A sentiment analysis dataset for code- mixed Malayalam-English. In Proceedings of the 1st Joint Workshop on Spoken Language Technolo- gies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL), pages 177-184, Marseille, France. European Language Resources association.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Overview of the track on HASOC-Offensive Language Identification", |
| "authors": [ |
| { |
| "first": "Anand", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Philip Mccrae", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Premjith", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "P" |
| ], |
| "last": "Soman", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Mandl", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi, M Anand Kumar, John Philip McCrae, Premjith B, Soman KP, and Thomas Mandl. 2020b. Overview of the track on HASOC-Offensive Language Identification-", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Working Notes of the Forum for Information Retrieval Evaluation (FIRE 2020)", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dravidiancodemix", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "DravidianCodeMix. In Working Notes of the Forum for Information Retrieval Evaluation (FIRE 2020).", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "CEUR Workshop Proceedings", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "CEUR Workshop Proceedings. In: CEUR-WS. org, Hyderabad, India.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Corpus creation for sentiment analysis in code-mixed Tamil-English text", |
| "authors": [ |
| { |
| "first": "Vigneshwaran", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Muralidaran", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "Philip" |
| ], |
| "last": "Priyadharshini", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mc-Crae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
| "volume": "", |
| "issue": "", |
| "pages": "202--210", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi, Vigneshwaran Murali- daran, Ruba Priyadharshini, and John Philip Mc- Crae. 2020c. Corpus creation for sentiment anal- ysis in code-mixed Tamil-English text. In Pro- ceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced lan- guages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL), pages 202-210, Marseille, France. European Language Re- sources association.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Overview of the Track on Sentiment Analysis for Dravidian Languages in Code-Mixed Text", |
| "authors": [ |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Vigneshwaran", |
| "middle": [], |
| "last": "Priyadharshini", |
| "suffix": "" |
| }, |
| { |
| "first": "Shardul", |
| "middle": [], |
| "last": "Muralidaran", |
| "suffix": "" |
| }, |
| { |
| "first": "Navya", |
| "middle": [], |
| "last": "Suryawanshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Jose", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "P" |
| ], |
| "last": "Sherly", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccrae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "In Forum for Information Retrieval Evaluation", |
| "volume": "2020", |
| "issue": "", |
| "pages": "21--24", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3441501.3441515" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi, Ruba Priyadharshini, Vigneshwaran Muralidaran, Shardul Suryawanshi, Navya Jose, Elizabeth Sherly, and John P. McCrae. 2020d. Overview of the Track on Sentiment Analy- sis for Dravidian Languages in Code-Mixed Text. In Forum for Information Retrieval Evaluation, FIRE 2020, page 21-24, New York, NY, USA. Associa- tion for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Xgboost: A scalable tree boosting system", |
| "authors": [ |
| { |
| "first": "Tianqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Guestrin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 22nd acm sigkdd international conference on knowledge discovery and data mining", |
| "volume": "", |
| "issue": "", |
| "pages": "785--794", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tianqi Chen and Carlos Guestrin. 2016. Xgboost: A scalable tree boosting system. In Proceedings of the 22nd acm sigkdd international conference on knowl- edge discovery and data mining, pages 785-794.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A Survey of Current Datasets for Code-Switching Research", |
| "authors": [ |
| { |
| "first": "Navya", |
| "middle": [], |
| "last": "Jose", |
| "suffix": "" |
| }, |
| { |
| "first": "Shardul", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Suryawanshi", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "P" |
| ], |
| "last": "Sherly", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccrae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "2020 6th International Conference on Advanced Computing and Communication Systems (ICACCS)", |
| "volume": "", |
| "issue": "", |
| "pages": "136--141", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICACCS48705.2020.9074205" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Navya Jose, Bharathi Raja Chakravarthi, Shardul Suryawanshi, Elizabeth Sherly, and John P. McCrae. 2020. A Survey of Current Datasets for Code- Switching Research. In 2020 6th International Con- ference on Advanced Computing and Communica- tion Systems (ICACCS), pages 136-141.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "YNU@Dravidian-CodeMix-FIRE2020: XLM-RoBERTa for Multi-language Sentiment Analysis", |
| "authors": [ |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Ou", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Forum for Information Retrieval Evaluation. CEUR Workshop Proceedings", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "X. Ou and H. Li. 2020. YNU@Dravidian-CodeMix- FIRE2020: XLM-RoBERTa for Multi-language Sentiment Analysis. In Forum for Information Re- trieval Evaluation. CEUR Workshop Proceedings.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Syntactic n-grams as features for the author profiling task", |
| "authors": [ |
| { |
| "first": "Juan-Pablo", |
| "middle": [], |
| "last": "Posadas-Dur\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilia", |
| "middle": [], |
| "last": "Markov", |
| "suffix": "" |
| }, |
| { |
| "first": "Helena", |
| "middle": [], |
| "last": "G\u00f3mez-Adorno", |
| "suffix": "" |
| }, |
| { |
| "first": "Grigori", |
| "middle": [], |
| "last": "Sidorov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ildar", |
| "middle": [], |
| "last": "Batyrshin", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Gelbukh", |
| "suffix": "" |
| }, |
| { |
| "first": "Obdulia", |
| "middle": [], |
| "last": "Pichardo-Lagunas", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Juan-Pablo Posadas-Dur\u00e1n, Ilia Markov, Helena G\u00f3mez-Adorno, Grigori Sidorov, Ildar Batyrshin, Alexander Gelbukh, and Obdulia Pichardo-Lagunas. 2015. Syntactic n-grams as features for the author profiling task.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Named Entity Recognition for Code-Mixed Indian Corpus using Meta Embedding", |
| "authors": [ |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Priyadharshini", |
| "suffix": "" |
| }, |
| { |
| "first": "Mani", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "P" |
| ], |
| "last": "Vegupatti", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccrae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "2020 6th International Conference on Advanced Computing and Communication Systems (ICACCS)", |
| "volume": "", |
| "issue": "", |
| "pages": "68--72", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICACCS48705.2020.9074379" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruba Priyadharshini, Bharathi Raja Chakravarthi, Mani Vegupatti, and John P. McCrae. 2020. Named Entity Recognition for Code-Mixed Indian Corpus using Meta Embedding. In 2020 6th International Conference on Advanced Computing and Communi- cation Systems (ICACCS), pages 68-72.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "CUSATNLP@ HASOC-Dravidian-CodeMix-FIRE2020: Identifying Offensive Language from ManglishTweets", |
| "authors": [ |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Renjit", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [], |
| "last": "Sumam", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Idicula", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2010.08756" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sara Renjit and Sumam Mary Idicula. 2020. CUSATNLP@ HASOC-Dravidian-CodeMix- FIRE2020: Identifying Offensive Language from ManglishTweets. arXiv preprint arXiv:2010.08756.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Joint approach to deromanization of code-mixed texts", |
| "authors": [ |
| { |
| "first": "Grzegorz", |
| "middle": [], |
| "last": "Rashed Rubby Riyadh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kondrak", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Sixth Workshop on NLP for Similar Languages, Varieties and Dialects", |
| "volume": "", |
| "issue": "", |
| "pages": "26--34", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rashed Rubby Riyadh and Grzegorz Kondrak. 2019. Joint approach to deromanization of code-mixed texts. In Proceedings of the Sixth Workshop on NLP for Similar Languages, Varieties and Dialects, pages 26-34.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Isin Demirsahin, and Keith Hall. 2020. Processing South Asian languages written in the Latin script: the Dakshina dataset", |
| "authors": [ |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Roark", |
| "suffix": "" |
| }, |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "Wolf-Sonkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Christo", |
| "middle": [], |
| "last": "Kirov", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Sabrina", |
| "suffix": "" |
| }, |
| { |
| "first": "Cibu", |
| "middle": [], |
| "last": "Mielke", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Johny", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2007.01176" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brian Roark, Lawrence Wolf-Sonkin, Christo Kirov, Sabrina J Mielke, Cibu Johny, Isin Demirsahin, and Keith Hall. 2020. Processing South Asian languages written in the Latin script: the Dakshina dataset. arXiv preprint arXiv:2007.01176.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Syntactic dependency-based ngrams: More evidence of usefulness in classification", |
| "authors": [ |
| { |
| "first": "Grigori", |
| "middle": [], |
| "last": "Sidorov", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Velasquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Efstathios", |
| "middle": [], |
| "last": "Stamatatos", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Gelbukh", |
| "suffix": "" |
| }, |
| { |
| "first": "Liliana", |
| "middle": [], |
| "last": "Chanona-Hern\u00e1ndez", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "International Conference on Intelligent Text Processing and Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "13--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Grigori Sidorov, Francisco Velasquez, Efstathios Sta- matatos, Alexander Gelbukh, and Liliana Chanona- Hern\u00e1ndez. 2013. Syntactic dependency-based n- grams: More evidence of usefulness in classification. In International Conference on Intelligent Text Pro- cessing and Computational Linguistics, pages 13- 24. Springer.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Speech synthesis of code-mixed text", |
| "authors": [ |
| { |
| "first": "Sunayana", |
| "middle": [], |
| "last": "Sitaram", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Black", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
| "volume": "", |
| "issue": "", |
| "pages": "3422--3428", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sunayana Sitaram and Alan W Black. 2016. Speech synthesis of code-mixed text. In Proceedings of the Tenth International Conference on Language Re- sources and Evaluation (LREC'16), pages 3422- 3428.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "SRJ @ Dravidian-CodeMix-FIRE2020:Automatic Classification and Identification Sentiment in Code-mixed Text", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Forum for Information Retrieval Evaluation. CEUR Workshop Proceedings", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Sun and X. Zhou. 2020. SRJ @ Dravidian- CodeMix-FIRE2020:Automatic Classification and Identification Sentiment in Code-mixed Text. In Forum for Information Retrieval Evaluation. CEUR Workshop Proceedings.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "text": "Framework of CoHope-ML modelThe resulting LM is in turn trained for Ta-En and Ma-En language pairs separately and the weights are transferred for the construction of the classifier.Model Construction: a BiLSTM-Conv1D architecture which is a BiLSTM model over convolutional layers with : \u2022 Kernel size of 3 \u2022 Filter = 32 \u2022 MaxPooling1D with pool size of 2 \u2022 Length of words sequences = 250 with padding for short sentences is used to train CoHope-TL model for 50 epochs with a batch size of 126.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "text": "Framework of CoHope-NN model Figure 3: Framework of CoHope-TL", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "text": "Statistics of raw texts models proposed by MUCS is shown in", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "num": null, |
| "text": "Examples of input text and extracted features for", |
| "html": null, |
| "content": "<table><tr><td>code-mixed texts</td></tr><tr><td>function from NLTK library to extract char se-</td></tr><tr><td>quences of length 3 to 6 from texts along with</td></tr><tr><td>tokenized words for Ma-En and Ta-En language</td></tr><tr><td>pairs as features. For English texts SNgramExtrac-</td></tr><tr><td>tor</td></tr></table>" |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "num": null, |
| "text": "Examples of input text and extracted features for", |
| "html": null, |
| "content": "<table><tr><td>English texts</td><td/></tr><tr><td colspan=\"2\">Estimators Parameters</td></tr><tr><td/><td>max depth=20, n estimators=80,</td></tr><tr><td/><td>learning rate=0.1,</td></tr><tr><td>XGB</td><td>colsample bytree=.7,</td></tr><tr><td/><td>gamma=.01, reg alpha=4,</td></tr><tr><td/><td>objective='multi: softmax'</td></tr><tr><td/><td>hidden layer sizes= (150,100,50),</td></tr><tr><td>MLP</td><td>max iter=300,activation = 'relu',</td></tr><tr><td/><td>solver='adam', random state=1</td></tr><tr><td>LR</td><td>Default parameters</td></tr></table>" |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "num": null, |
| "text": "Parameters for estimators in CoHope-ML3.2.2 CoHope-NNThe framework of CoHope-NN model is shown inFigure 2. It makes use of a Keras 11 dense Neural Network (NN) architecture adopted from", |
| "html": null, |
| "content": "<table><tr><td>https://www.kaggle.com/ismu94/</td></tr><tr><td>tf-idf-deep-neural-net</td></tr><tr><td>CoHope-NN model is trained for 40 epochs with a</td></tr><tr><td>batch size of 128 on TFIDF vectors obtained from</td></tr><tr><td>feature engineering module.</td></tr><tr><td>3.2.3 CoHope-TL</td></tr><tr><td>Based on TL, CoHope-TL adopts the architecture</td></tr><tr><td>described in</td></tr><tr><td>https://huggingface.co/blog/</td></tr><tr><td>how-to-train</td></tr><tr><td>to train Tokenizers and LMs using transformers for</td></tr><tr><td>Ta-En and Ma-En language pairs. Tokenizer and</td></tr><tr><td>LM for English are publicly available at:</td></tr><tr><td>https://www.kaggle.</td></tr><tr><td>com/christofhenkel/</td></tr><tr><td>torch-bert-weights</td></tr></table>" |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "num": null, |
| "text": "", |
| "html": null, |
| "content": "<table><tr><td/><td colspan=\"4\">: Layers in BiLSTM-Conv1D</td></tr><tr><td>Set</td><td>LP</td><td>NO</td><td>HS</td><td>OL</td></tr><tr><td colspan=\"2\">Train Ma-En</td><td>6205</td><td colspan=\"2\">1668 691</td></tr><tr><td/><td>Ta-En</td><td>7872</td><td colspan=\"2\">6327 1961</td></tr><tr><td/><td colspan=\"4\">English 20778 1962 22</td></tr><tr><td>Dev.</td><td>Ma-En</td><td>784</td><td>190</td><td>96</td></tr><tr><td/><td>Ta-En</td><td>998</td><td>757</td><td>263</td></tr><tr><td/><td colspan=\"2\">English 2569</td><td>272</td><td>2</td></tr><tr><td>Test</td><td>Ma-En</td><td>776</td><td>194</td><td>101</td></tr><tr><td/><td>Ta-En</td><td>946</td><td>815</td><td>259</td></tr><tr><td/><td colspan=\"2\">English 2593</td><td>250</td><td>3</td></tr></table>" |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "num": null, |
| "text": "", |
| "html": null, |
| "content": "<table><tr><td>: Label distribution over annotated datasets</td></tr><tr><td>described in (Chakravarthi, 2020). Statistics of the</td></tr><tr><td>Tamil-English 14 (TaCo) and Malayalam-English 15</td></tr><tr><td>(MaCo) code-mixed raw texts are shown in</td></tr></table>" |
| }, |
| "TABREF8": { |
| "type_str": "table", |
| "num": null, |
| "text": "Results of the proposed models", |
| "html": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |