| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:21:14.554745Z" |
| }, |
| "title": "UVCE-IIITT@DravidianLangTech-EACL2021: Tamil Troll Meme Classification: You need to Pay more Attention", |
| "authors": [ |
| { |
| "first": "Siddhanth", |
| "middle": [ |
| "U" |
| ], |
| "last": "Hegde", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Bangalore University", |
| "location": {} |
| }, |
| "email": "siddhanthhegde227@gmail.com" |
| }, |
| { |
| "first": "Adeep", |
| "middle": [], |
| "last": "Hande", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tamil Nadu", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Priyadarshini", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "ULTRA Arts and Science College", |
| "location": { |
| "country": "India" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Sajeetha", |
| "middle": [], |
| "last": "Thavareesan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Eastern University", |
| "location": { |
| "country": "Sri Lanka" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Bharathi", |
| "middle": [ |
| "Raja" |
| ], |
| "last": "Chakravarthi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National University of Ireland", |
| "location": { |
| "settlement": "Galway" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Tamil is a Dravidian language that is commonly used and spoken in the southern part of Asia. In the era of social media, memes have been a fun moment in the day-today life of people. Here, we try to analyze the true meaning of Tamil memes by categorizing them as troll and non-troll. We propose an ingenious model comprising of a transformertransformer architecture that tries to attain state-of-the-art by using attention as its main component. The dataset consists of troll and non-troll images with their captions as text. The task is a binary classification task. The objective of the model is to pay more attention to the extracted features and to ignore the noise in both images and text.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Tamil is a Dravidian language that is commonly used and spoken in the southern part of Asia. In the era of social media, memes have been a fun moment in the day-today life of people. Here, we try to analyze the true meaning of Tamil memes by categorizing them as troll and non-troll. We propose an ingenious model comprising of a transformertransformer architecture that tries to attain state-of-the-art by using attention as its main component. The dataset consists of troll and non-troll images with their captions as text. The task is a binary classification task. The objective of the model is to pay more attention to the extracted features and to ignore the noise in both images and text.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Over the past decade, memes have become a ubiquitous phenomenon over the internet. Memes can come in several formats such as images, video, etc. Memes can take a combined form of both text and images too. Due to its vast popularity, different people perceive memes distinctively. Recent studies have prompted the usage of memes as a mode of communication across social media platforms. The presence of text in images makes it harder to decode the sentiment or any other characteristic (Avvaru and Vobilisetty, 2020) . Regardless of the type of the meme, they may be changed, recreated over social media networks, and tend to be used in contexts involving sensitive topics such as politics, casteism, etc, to add a sarcastic perspective (French, 2017; Nave et al., 2018) . Due to its multimodality, conscientious analysis of memes can shed light on the societal factors, their implications on culture, and the values promoted by them (Milner, 2013) . In addition to that, analyzing the intended emotion of a meme could help us acknowledge fake news, offensive content that is being propagated using the internet memes as a medium, thus helping in eradicating the spread of misinformation and hatred to the large user base in social media (Chakravarthi et al., 2020b,a) . It is plausible that memes might become an integral part of most of the people, as it is used to understand racial and gender discourse on social media platforms such as Reddit (Milner, 2013; Ghanghor et al., 2021b,a) . One of the approaches to overcome this is manually monitoring and moderating user-generated content. But due to the amount of data being generated on the internet every day, it would be ideal to develop automated systems to moderate them (Kumar et al., 2018; Yasaswini et al., 2021; Puranik et al., 2021; Chakravarthi et al., 2020c; Mandl et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 485, |
| "end": 515, |
| "text": "(Avvaru and Vobilisetty, 2020)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 736, |
| "end": 750, |
| "text": "(French, 2017;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 751, |
| "end": 769, |
| "text": "Nave et al., 2018)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 933, |
| "end": 947, |
| "text": "(Milner, 2013)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1237, |
| "end": 1267, |
| "text": "(Chakravarthi et al., 2020b,a)", |
| "ref_id": null |
| }, |
| { |
| "start": 1447, |
| "end": 1461, |
| "text": "(Milner, 2013;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1462, |
| "end": 1487, |
| "text": "Ghanghor et al., 2021b,a)", |
| "ref_id": null |
| }, |
| { |
| "start": 1728, |
| "end": 1748, |
| "text": "(Kumar et al., 2018;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1749, |
| "end": 1772, |
| "text": "Yasaswini et al., 2021;", |
| "ref_id": null |
| }, |
| { |
| "start": 1773, |
| "end": 1794, |
| "text": "Puranik et al., 2021;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1795, |
| "end": 1822, |
| "text": "Chakravarthi et al., 2020c;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1823, |
| "end": 1842, |
| "text": "Mandl et al., 2020)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Consider countries with huge populations such as India, several memes are directed towards targeted communities. To address these issues of identifying if a given meme is trolling a person's sentiments, a dataset for memes that were suspected to troll a particular community. We participate in the shared task on meme classification based on the troll classification of Tamil Memes (Suryawanshi et al., 2020) . Tamil (ISO 639-3: tam) language is spoken in South Asia (Chakravarthi, 2020). The earliest inscription in India dated from 580 BCE was the Tamil inscription in pottery and then the Asoka inscription in Prakrit, Greek, and Aramaic dating from 260 BCE. The earliest known inscriptions in Sanskrit are from the inscriptions of the 1st century BCE. Tamil is the official language of Tamil Nadu, India, as well as of Singapore and Sri Lanka (Chakravarthi et al., 2018, 2019) . The task primarily consists of identifying whether a meme is a troll or a non-troll (Suryawanshi and Chakravarthi, 2021) . We use the images and captions that are provided to achieve the most efficient model to classify the memes. We use a combination of Vision Transform (ViT) (Dosovitskiy et al., 2021) and mBERT (Pires et al., 2019) over other pretrained models used for image classification as described in (Venkatesh et al., 2020 (Venkatesh et al., , 2021 . (Dosovitskiy et al., 2021; Devlin et al., 2019) 2 Related Work", |
| "cite_spans": [ |
| { |
| "start": 382, |
| "end": 408, |
| "text": "(Suryawanshi et al., 2020)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 823, |
| "end": 836, |
| "text": "Singapore and", |
| "ref_id": null |
| }, |
| { |
| "start": 837, |
| "end": 880, |
| "text": "Sri Lanka (Chakravarthi et al., 2018, 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 967, |
| "end": 1003, |
| "text": "(Suryawanshi and Chakravarthi, 2021)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 1161, |
| "end": 1187, |
| "text": "(Dosovitskiy et al., 2021)", |
| "ref_id": null |
| }, |
| { |
| "start": 1198, |
| "end": 1218, |
| "text": "(Pires et al., 2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1294, |
| "end": 1317, |
| "text": "(Venkatesh et al., 2020", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 1318, |
| "end": 1343, |
| "text": "(Venkatesh et al., , 2021", |
| "ref_id": null |
| }, |
| { |
| "start": 1346, |
| "end": 1372, |
| "text": "(Dosovitskiy et al., 2021;", |
| "ref_id": null |
| }, |
| { |
| "start": 1373, |
| "end": 1393, |
| "text": "Devlin et al., 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Internet memes have been a subject of interest for both Computer Vision and Natural Language Processing researchers. The type of memes that are being used illustrates the context of discussions on social media platforms. People are using memes to express themselves, and in the making, showcase their stance on a certain social issue, be it in acknowledgment or rejection of the issue (French, 2017; Boinepelli et al., 2020; Gal et al., 2016) . There exist several reasons that suggest the spread of memes. Some of the reasons include novelty, simplicity, coherence. It also includes an emotional attachment, its ability to have different meanings, depending on how a person perceives it (Nave et al., 2018; Stephens, 2018; Chielens and Heylighen, 2002) . Hu and Flaxman developed a multimodal sentiment analysis by developing a deep neural network that combines both visual analysis and text analysis to predict the emotional state of the user by using Tumblr posts.", |
| "cite_spans": [ |
| { |
| "start": 385, |
| "end": 399, |
| "text": "(French, 2017;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 400, |
| "end": 424, |
| "text": "Boinepelli et al., 2020;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 425, |
| "end": 442, |
| "text": "Gal et al., 2016)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 688, |
| "end": 707, |
| "text": "(Nave et al., 2018;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 708, |
| "end": 723, |
| "text": "Stephens, 2018;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 724, |
| "end": 753, |
| "text": "Chielens and Heylighen, 2002)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We use Troll Classification dataset of Tamil Memes (Suryawanshi et al., 2020) . It consists of 2,699 memes, of which most of the images have text embedded within them. We are also provided with captions for all images. The distribution is shown is ", |
| "cite_spans": [ |
| { |
| "start": 51, |
| "end": 77, |
| "text": "(Suryawanshi et al., 2020)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Multimodal deep learning is a robust and efficient way of addressing the main goals of artificial intelligence by integrating and combining multiple communicative modalities to obtain crucial results which usually improves the outcome of the single models trained. As deep learning models tend to extract features on their own, the objective can easily be achieved with the help of neural networks. Given the images of Tamil Memes, along with the embedded text on the images, scrutiny of images and texts independently and then picking out relevant information for further process plays a climacteric role in our system. At the end of the training, the model has to output a single value stating the given meme is Troll or Non-Troll. The specialty of our model was to neither use the Convolutional Neural Networks (CNN) nor Recurrent Neural Networks (RNN). As the title of the paper points out, the model tries to gain more attention towards the salient portions of text and images. The proposed solution makes an effort to convey the importance of attention gain and its relation ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The architecture of the ViT is analogous to the transformer used for Natural Language Processing (NLP) tasks. NLP transformers use self-attention which is a highly cost-inefficient approach in regard to images. Admitting this, the technique applied here was Global Attention. Keeping the analogy of sentences, instead of 1D token embeddings as input, ViT receives a sequence of flattened 2D patches. If H, W is the height and width of the image and (P, P) is the resolution of each patch, N = HW/P 2 is the effective sequence length for the transformer (Dosovitskiy et al., 2021) . Then the patches are projected linearly and then multiplied with an embedding matrix to eventually form patched embeddings. This along with position embeddings are sent through the transformer. Similar to BERT's [CLS] token, a token is prepended along with the patched embeddings. The transformer consists of an encoder block which consists of alternating layers of multiheaded self-attention blocks to generate attention for specific regions of the images. Layer normalization and residual connections are made comparable to the original NLP transformer.", |
| "cite_spans": [ |
| { |
| "start": 553, |
| "end": 579, |
| "text": "(Dosovitskiy et al., 2021)", |
| "ref_id": null |
| }, |
| { |
| "start": 794, |
| "end": 799, |
| "text": "[CLS]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Vision Transformer (ViT)", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The success of fine-tuning a pretrained model in computer-vision prompted researchers to do the same in Natural Language Processing. Therefore it was the objective of the researchers to develop a model which can be fine-tuned for NLP related works. Bidirectional Encoder Representations from Transformers (BERT) (Devlin et al., 2019) is a language representation model which was trained on Wikipedia corpus. The training phase had two tasks. First was Masked Language Modelling(MLM), where the sentence had random masks in them and the model has to predict the masked word. The second task Next Sentence Prediction(NSP), where the model has to predict whether the second sentence is the continuation of the first one. The input to the transformer is the sum of the token segmentation and positional embeddings. As the name suggests, the model is jointly conditioned on both left and right contexts to extract meaning. BERT is comparable to the transformer encoder block of (Vaswani et al., 2017) . The NSP task matches the classification task for the objective of the model. During NSP, two sentence separated by [SEP] and [CLS] token are fed in and the output of the [CLS] token is pondered upon to determine the required class. Here, the input is only a single sentence with tokens and the model is fine-tuned as necessary. Table 4 : Classification report of our system on the test set of the top 104 different languages, with the largest MLM objective, also making the model case sensitive (Pires et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 312, |
| "end": 333, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 973, |
| "end": 995, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1113, |
| "end": 1118, |
| "text": "[SEP]", |
| "ref_id": null |
| }, |
| { |
| "start": 1123, |
| "end": 1128, |
| "text": "[CLS]", |
| "ref_id": null |
| }, |
| { |
| "start": 1168, |
| "end": 1173, |
| "text": "[CLS]", |
| "ref_id": null |
| }, |
| { |
| "start": 1493, |
| "end": 1513, |
| "text": "(Pires et al., 2019)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1326, |
| "end": 1333, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "BERT", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "All suitable models were implemented using Py-Torch version 1.5.0 in a google colaboratory environment. The early stages of this model include preprocessing of images. The dataset had pictures with various resolutions and had to be made equal. The images were resized to 256 X 256 pixels. Most of the images had texts on the top and bottom of the images. Texts in the images were considered as noise for classification, which resulted in performing a center crop for all images. The border of the portions was removed and images of size 224 X 224 were produced. Finally, the images were ready as the input to the transformer by normalizing the RGB channels with mean 0.485, 0.456, 0.406, and standard deviation 0.229, 0.224, 0.225 respectively. No augmentations were made to preserve the meaning of the images. The transformer was originally trained on the ImageNet dataset and had achieved remarkable results. The trained weights are transferred to this downstream task. The base version of ViT is fine-tuned which had default hyperparameters of 16 patches, an embed dimension of 768, 12 layers, 12 attention heads, and a dropout rate of 0.1. The head of the vision transformer, which outputs 1000 classes, is now replaced by a linear layer of 128 neurons. The texts were also preprocessed by removing stopwords, special characters, and punctuation. Texts need to be tokenized before feeding into the BERT configuration. After inserting it into the transformer, the resulting pooled output from the multilingual BERT model is also passed through a linear layer of 128 neurons. The two layers obtained from the transformers are merged together to form a single layer with 256 neurons. This is passed through the ReLu activation function and a dropout to obtain one final neuron which determines the class as Troll or Non- Troll. A learning rate of 2e \u2212 5 was used with a batch size of 16. The maximum length of the captions was truncated to 128 as memes usually do not contain very long sentences. The training was done for 4 epochs and with a linear schedule with warmup. To our surprise, the model learned very rapidly and achieved well progress on the validation set which mimicked the train set. It was also observed that merging the outputs of two different domain models did not harm the training, moreover, it helped in getting better results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We achieve an overall F1-score of 0.96 when we use images for classification using ViT as shown in 2. It is to be noted that using mBERT to clas-sify memes solely based on the captions achieves 0.93 as F1-score as shown in Table 3 . While we achieve such good results in comparison to the baseline scores of 0.59 mentioned in the dataset paper, we feel that if both of representations of ViT and mBERT were concatenated and then fed into a linear layer, the model would learn better. We find that the model achieves a perfect 1.00 weighted F1-score on the validation set. We believe that preprocessing of the images was a major factor for achieving a great F1-score on validation set. This argument is supported by our system's poor performance on the test set, as the test set was not coherent with the training data in terms of the positioning of texts on the images as shown in Table 4 . The confusion matrix on validation and test set are as shown in Figures 2(a) and 2(b) respectively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 223, |
| "end": 230, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 881, |
| "end": 888, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 955, |
| "end": 967, |
| "text": "Figures 2(a)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The proposed solution performs at greater heights on the validation and set in the training phase. The validation set mimics the train set as the memes are split looking at the distribution of the classes. The dataset is very small and augmenting it will not help for the optimal results. The algorithm overfits the train set undoubtedly. The reason behind the poor performance is due to the change in the distribution. The memes in the test set had multiple images which were difficult for the ViT to capture features. The model scored a F1 score of 0.46 on the test set and 1.0 on the validation set. Vast difference can be observed due to high bias. Here, in this paper, we have tried to come up with this innovation of transformer-transformer architecture which can achieve extreme results. In the future, we will be performing a wonderful task of having more transformers in parallel computation and syncing them makes an immense difference in this era of deep learning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "7" |
| }, |
| { |
| "text": ".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "7" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "BERT at SemEval-2020 task 8: Using BERT to analyse meme emotions", |
| "authors": [ |
| { |
| "first": "Adithya", |
| "middle": [], |
| "last": "Avvaru", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanath", |
| "middle": [], |
| "last": "Vobilisetty", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fourteenth Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "1094--1099", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adithya Avvaru and Sanath Vobilisetty. 2020. BERT at SemEval-2020 task 8: Using BERT to analyse meme emotions. In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 1094- 1099, Barcelona (online). International Committee for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "SIS@IIITH at SemEval-2020 task 8: An overview of simple text classification methods for meme analysis", |
| "authors": [ |
| { |
| "first": "Sravani", |
| "middle": [], |
| "last": "Boinepelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Manish", |
| "middle": [], |
| "last": "Shrivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Vasudeva", |
| "middle": [], |
| "last": "Varma", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fourteenth Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "1190--1194", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sravani Boinepelli, Manish Shrivastava, and Vasudeva Varma. 2020. SIS@IIITH at SemEval-2020 task 8: An overview of simple text classification meth- ods for meme analysis. In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 1190-1194, Barcelona (online). International Com- mittee for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Leveraging orthographic information to improve machine translation of under-resourced languages", |
| "authors": [ |
| { |
| "first": "Chakravarthi", |
| "middle": [], |
| "last": "Bharathi Raja", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi. 2020. Leveraging ortho- graphic information to improve machine translation of under-resourced languages. Ph.D. thesis, NUI Galway.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Improving wordnets for underresourced languages using machine translation", |
| "authors": [ |
| { |
| "first": "Mihael", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "P" |
| ], |
| "last": "Arcan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccrae", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 9th Global Wordnet Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "77--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi, Mihael Arcan, and John P. McCrae. 2018. Improving wordnets for under- resourced languages using machine translation. In Proceedings of the 9th Global Wordnet Conference, pages 77-86, Nanyang Technological University (NTU), Singapore. Global Wordnet Association.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "WordNet gloss translation for underresourced languages using multilingual neural machine translation", |
| "authors": [ |
| { |
| "first": "Mihael", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "P" |
| ], |
| "last": "Arcan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccrae", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Second Workshop on Multilingualism at the Intersection of Knowledge Bases and Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "1--7", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi, Mihael Arcan, and John P. McCrae. 2019. WordNet gloss translation for under- resourced languages using multilingual neural ma- chine translation. In Proceedings of the Second Workshop on Multilingualism at the Intersection of Knowledge Bases and Machine Translation, pages 1-7, Dublin, Ireland. European Association for Ma- chine Translation.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "A sentiment analysis dataset for codemixed Malayalam-English", |
| "authors": [ |
| { |
| "first": "Navya", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Shardul", |
| "middle": [], |
| "last": "Jose", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Suryawanshi", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "Philip" |
| ], |
| "last": "Sherly", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mc-Crae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
| "volume": "", |
| "issue": "", |
| "pages": "177--184", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi, Navya Jose, Shardul Suryawanshi, Elizabeth Sherly, and John Philip Mc- Crae. 2020a. A sentiment analysis dataset for code- mixed Malayalam-English. In Proceedings of the 1st Joint Workshop on Spoken Language Technolo- gies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL), pages 177-184, Marseille, France. European Language Resources association.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Corpus creation for sentiment analysis in code-mixed Tamil-English text", |
| "authors": [ |
| { |
| "first": "Vigneshwaran", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Muralidaran", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "Philip" |
| ], |
| "last": "Priyadharshini", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mc-Crae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
| "volume": "", |
| "issue": "", |
| "pages": "202--210", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi, Vigneshwaran Murali- daran, Ruba Priyadharshini, and John Philip Mc- Crae. 2020b. Corpus creation for sentiment anal- ysis in code-mixed Tamil-English text. In Pro- ceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced lan- guages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL), pages 202-210, Marseille, France. European Language Re- sources association.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Overview of the Track on Sentiment Analysis for Dravidian Languages in Code-Mixed Text", |
| "authors": [ |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Vigneshwaran", |
| "middle": [], |
| "last": "Priyadharshini", |
| "suffix": "" |
| }, |
| { |
| "first": "Shardul", |
| "middle": [], |
| "last": "Muralidaran", |
| "suffix": "" |
| }, |
| { |
| "first": "Navya", |
| "middle": [], |
| "last": "Suryawanshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Jose", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "P" |
| ], |
| "last": "Sherly", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccrae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "In Forum for Information Retrieval Evaluation", |
| "volume": "2020", |
| "issue": "", |
| "pages": "21--24", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3441501.3441515" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi, Ruba Priyadharshini, Vigneshwaran Muralidaran, Shardul Suryawanshi, Navya Jose, Elizabeth Sherly, and John P. McCrae. 2020c. Overview of the Track on Sentiment Analy- sis for Dravidian Languages in Code-Mixed Text. In Forum for Information Retrieval Evaluation, FIRE 2020, page 21-24, New York, NY, USA. Associa- tion for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Operationalization of meme selection criteria : Methodologies to empirically test memetic predictions", |
| "authors": [ |
| { |
| "first": "Klaas", |
| "middle": [], |
| "last": "Chielens", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Heylighen", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Klaas Chielens and F. Heylighen. 2002. Operational- ization of meme selection criteria : Methodologies to empirically test memetic predictions.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Jakob Uszkoreit, and Neil Houlsby. 2021. An image is worth 16x16 words: Transformers for image recognition at scale", |
| "authors": [ |
| { |
| "first": "Alexey", |
| "middle": [], |
| "last": "Dosovitskiy", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucas", |
| "middle": [], |
| "last": "Beyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Kolesnikov", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Weissenborn", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaohua", |
| "middle": [], |
| "last": "Zhai", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Unterthiner", |
| "suffix": "" |
| }, |
| { |
| "first": "Mostafa", |
| "middle": [], |
| "last": "Dehghani", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Minderer", |
| "suffix": "" |
| }, |
| { |
| "first": "Georg", |
| "middle": [], |
| "last": "Heigold", |
| "suffix": "" |
| }, |
| { |
| "first": "Sylvain", |
| "middle": [], |
| "last": "Gelly", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. 2021. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Image-based memes as sentiment predictors", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "H" |
| ], |
| "last": "French", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "2017 International Conference on Information Society (i-Society)", |
| "volume": "", |
| "issue": "", |
| "pages": "80--85", |
| "other_ids": { |
| "DOI": [ |
| "10.23919/i-Society.2017.8354676" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. H. French. 2017. Image-based memes as sentiment predictors. In 2017 International Conference on In- formation Society (i-Society), pages 80-85.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "it gets better\": Internet memes and the construction of collective identity. New Media & Society", |
| "authors": [ |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Gal", |
| "suffix": "" |
| }, |
| { |
| "first": "Limor", |
| "middle": [], |
| "last": "Shifman", |
| "suffix": "" |
| }, |
| { |
| "first": "Zohar", |
| "middle": [], |
| "last": "Kampf", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "18", |
| "issue": "", |
| "pages": "1698--1714", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noam Gal, Limor Shifman, and Zohar Kampf. 2016. \"it gets better\": Internet memes and the construc- tion of collective identity. New Media & Society, 18:1698 -1714.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Ruba Priyadharshini, and Bharathi Raja Chakravarthi. 2021a. IIITK@DravidianLangTech-EACL2021: Offensive Language Identification and Meme Classification in Tamil, Malayalam and Kannada", |
| "authors": [ |
| { |
| "first": "Parameswari", |
| "middle": [], |
| "last": "Nikhil Kumar Ghanghor", |
| "suffix": "" |
| }, |
| { |
| "first": "Sajeetha", |
| "middle": [], |
| "last": "Krishnamurthy", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Thavareesan", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikhil Kumar Ghanghor, Parameswari Krishna- murthy, Sajeetha Thavareesan, Ruba Priyad- harshini, and Bharathi Raja Chakravarthi. 2021a. IIITK@DravidianLangTech-EACL2021: Offensive Language Identification and Meme Classification in Tamil, Malayalam and Kannada. In Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "IIITK@LT-EDI-EACL2021: Hope Speech Detection for Equality, Diversity, and Inclusion in Tamil, Malayalam and English", |
| "authors": [ |
| { |
| "first": "Rahul", |
| "middle": [], |
| "last": "Nikhil Kumar Ghanghor", |
| "suffix": "" |
| }, |
| { |
| "first": "Prasanna", |
| "middle": [], |
| "last": "Ponnusamy", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Kumar Kumaresan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sajeetha", |
| "middle": [], |
| "last": "Priyadharshini", |
| "suffix": "" |
| }, |
| { |
| "first": "Bharathi Raja", |
| "middle": [], |
| "last": "Thavareesan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chakravarthi", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the First Workshop on Language Technology for Equality", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikhil Kumar Ghanghor, Rahul Ponnusamy, Prasanna Kumar Kumaresan, Ruba Priyad- harshini, Sajeetha Thavareesan, and Bharathi Raja Chakravarthi. 2021b. IIITK@LT-EDI-EACL2021: Hope Speech Detection for Equality, Diversity, and Inclusion in Tamil, Malayalam and English. In Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Multimodal sentiment analysis to explore the structure of emotions", |
| "authors": [ |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Seth", |
| "middle": [], |
| "last": "Flaxman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3219819.3219853" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anthony Hu and Seth Flaxman. 2018. Multimodal sentiment analysis to explore the structure of emo- tions. Proceedings of the 24th ACM SIGKDD In- ternational Conference on Knowledge Discovery & Data Mining.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Benchmarking aggression identification in social media", |
| "authors": [ |
| { |
| "first": "Ritesh", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Atul", |
| "middle": [], |
| "last": "Kr", |
| "suffix": "" |
| }, |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Ojha", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the First Workshop on Trolling, Aggression and Cyberbullying (TRAC-2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--11", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ritesh Kumar, Atul Kr. Ojha, Shervin Malmasi, and Marcos Zampieri. 2018. Benchmarking aggression identification in social media. In Proceedings of the First Workshop on Trolling, Aggression and Cyber- bullying (TRAC-2018), pages 1-11, Santa Fe, New Mexico, USA. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Overview of the HASOC Track at FIRE 2020: Hate Speech and Offensive Language Identification in Tamil", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Mandl", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandip", |
| "middle": [], |
| "last": "Modha", |
| "suffix": "" |
| }, |
| { |
| "first": "Anand", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Bharathi Raja Chakravarthi ;", |
| "middle": [], |
| "last": "Malayalam", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hindi", |
| "suffix": "" |
| }, |
| { |
| "first": "German", |
| "middle": [], |
| "last": "English", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Forum for Information Retrieval Evaluation", |
| "volume": "2020", |
| "issue": "", |
| "pages": "29--32", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3441501.3441517" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Mandl, Sandip Modha, Anand Kumar M, and Bharathi Raja Chakravarthi. 2020. Overview of the HASOC Track at FIRE 2020: Hate Speech and Offensive Language Identification in Tamil, Malay- alam, Hindi, English and German. In Forum for Information Retrieval Evaluation, FIRE 2020, page 29-32, New York, NY, USA. Association for Com- puting Machinery.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Fcj-156 hacking the social: Internet memes, identity antagonism, and the logic of lulz", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [ |
| "M" |
| ], |
| "last": "Milner", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "The Fibreculture Journal", |
| "volume": "", |
| "issue": "", |
| "pages": "61--91", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. M. Milner. 2013. Fcj-156 hacking the social: In- ternet memes, identity antagonism, and the logic of lulz. The Fibreculture Journal, pages 61-91.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Talking it personally: Features of successful political posts on facebook", |
| "authors": [ |
| { |
| "first": "Limor", |
| "middle": [], |
| "last": "Nir Noon Nave", |
| "suffix": "" |
| }, |
| { |
| "first": "Keren", |
| "middle": [], |
| "last": "Shifman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tenenboim-Weinblatt", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Social Media + Society", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nir Noon Nave, Limor Shifman, and Keren Tenenboim- Weinblatt. 2018. Talking it personally: Features of successful political posts on facebook. Social Media + Society, 4.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "How multilingual is multilingual BERT?", |
| "authors": [ |
| { |
| "first": "Telmo", |
| "middle": [], |
| "last": "Pires", |
| "suffix": "" |
| }, |
| { |
| "first": "Eva", |
| "middle": [], |
| "last": "Schlinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Garrette", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "4996--5001", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1493" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Telmo Pires, Eva Schlinger, and Dan Garrette. 2019. How multilingual is multilingual BERT? In Pro- ceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4996- 5001, Florence, Italy. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "IIITT@LT-EDI-EACL2021-Hope Speech Detection: There is always hope in Transformers", |
| "authors": [ |
| { |
| "first": "Karthik", |
| "middle": [], |
| "last": "Puranik", |
| "suffix": "" |
| }, |
| { |
| "first": "Adeep", |
| "middle": [], |
| "last": "Hande", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Priyadharshini", |
| "suffix": "" |
| }, |
| { |
| "first": "Sajeetha", |
| "middle": [], |
| "last": "Thavareesan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bharathi Raja", |
| "middle": [], |
| "last": "Chakravarthi", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karthik Puranik, Adeep Hande, Ruba Priyad- harshini, Sajeetha Thavareesan, and Bharathi Raja Chakravarthi. 2021. IIITT@LT-EDI-EACL2021- Hope Speech Detection: There is always hope in Transformers. In Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Ryan m. milner, the world made meme: Public conversations and participatory media", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Niall", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Stephens", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "International Journal of Communication", |
| "volume": "12", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Niall P Stephens. 2018. Ryan m. milner, the world made meme: Public conversations and participatory media. International Journal of Communication, 12:4.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Findings of the shared task on Troll Meme Classification in Tamil", |
| "authors": [ |
| { |
| "first": "Shardul", |
| "middle": [], |
| "last": "Suryawanshi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shardul Suryawanshi and Bharathi Raja Chakravarthi. 2021. Findings of the shared task on Troll Meme Classification in Tamil. In Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "A dataset for troll classification of TamilMemes", |
| "authors": [ |
| { |
| "first": "Shardul", |
| "middle": [], |
| "last": "Suryawanshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihael", |
| "middle": [], |
| "last": "Verma", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Arcan", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Philip Mccrae", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Buitelaar", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the WILDRE5-5th Workshop on Indian Language Data: Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "7--13", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shardul Suryawanshi, Bharathi Raja Chakravarthi, Pranav Verma, Mihael Arcan, John Philip McCrae, and Paul Buitelaar. 2020. A dataset for troll clas- sification of TamilMemes. In Proceedings of the WILDRE5-5th Workshop on Indian Language Data: Resources and Evaluation, pages 7-13, Marseille, France. European Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "30", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, volume 30. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Transfer learning based convolutional neural network model for classification of mango leaves infected by anthracnose", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Venkatesh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nagaraju", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sahanat", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Swetha", |
| "suffix": "" |
| }, |
| { |
| "first": "U", |
| "middle": [], |
| "last": "Siddhanth", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hegde", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "IEEE International Conference for Innovation in Technology (INOCON)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--7", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Venkatesh, Y Nagaraju, S SahanaT, S Swetha, and Sid- dhanth U Hegde. 2020. Transfer learning based con- volutional neural network model for classification of mango leaves infected by anthracnose. 2020 IEEE International Conference for Innovation in Technol- ogy (INOCON), pages 1-7.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Siddhanth Udayashankar Hegde, and Sangeetha Raj Stalin. 2021. Fine-tuned mobilenet classifier for classification of strawberry and cherry fruit types", |
| "authors": [ |
| { |
| "first": "Nagaraju", |
| "middle": [], |
| "last": "Venkatesh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Yallappa", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Journal of Computer Science", |
| "volume": "17", |
| "issue": "1", |
| "pages": "44--54", |
| "other_ids": { |
| "DOI": [ |
| "10.3844/jcssp.2021.44.54" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Venkatesh, Nagaraju Yallappa, Sid- dhanth Udayashankar Hegde, and Sangeetha Raj Stalin. 2021. Fine-tuned mobilenet classifier for classification of strawberry and cherry fruit types. Journal of Computer Science, 17(1):44-54.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Sajeetha Thavareesan, and Bharathi Raja Chakravarthi. 2021. IIITT@DravidianLangTech-EACL2021: Transfer Learning for Offensive Language Detection in Dravidian Languages", |
| "authors": [ |
| { |
| "first": "Konthala", |
| "middle": [], |
| "last": "Yasaswini", |
| "suffix": "" |
| }, |
| { |
| "first": "Karthik", |
| "middle": [], |
| "last": "Puranik", |
| "suffix": "" |
| }, |
| { |
| "first": "Adeep", |
| "middle": [], |
| "last": "Hande", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Priyadharshini", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Konthala Yasaswini, Karthik Puranik, Adeep Hande, Ruba Priyadharshini, Sajeetha Thava- reesan, and Bharathi Raja Chakravarthi. 2021. IIITT@DravidianLangTech-EACL2021: Transfer Learning for Offensive Language Detection in Dravidian Languages. In Proceedings of the First Workshop on Speech and Language Technolo- gies for Dravidian Languages. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "System Architecture", |
| "num": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Figure 2: Confusion Matrix", |
| "num": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "content": "<table><tr><td>Class</td><td>Train</td><td>Validation</td><td>Test</td></tr><tr><td>Troll</td><td>1,154</td><td>128</td><td>395</td></tr><tr><td>Non-Troll</td><td>917</td><td>101</td><td>272</td></tr><tr><td>total</td><td>2,071</td><td>229</td><td>667</td></tr></table>", |
| "text": "", |
| "num": null, |
| "html": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "", |
| "num": null, |
| "html": null |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "content": "<table><tr><td/><td>Precision</td><td>Recall</td><td>F1-Score</td><td>Support</td></tr><tr><td>Non-Troll</td><td>0.87</td><td>0.99</td><td>0.93</td><td>101</td></tr><tr><td>Troll</td><td>0.99</td><td>0.88</td><td>0.93</td><td>128</td></tr><tr><td>Accuracy</td><td/><td/><td>0.93</td><td>229</td></tr><tr><td>Macro Avg</td><td>0.93</td><td>0.94</td><td>0.93</td><td>229</td></tr><tr><td>Weighted Avg</td><td>0.94</td><td>0.93</td><td>0.93</td><td>229</td></tr></table>", |
| "text": "Classification report of ViT to images of validation set", |
| "num": null, |
| "html": null |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "", |
| "num": null, |
| "html": null |
| } |
| } |
| } |
| } |