| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:20:46.409746Z" |
| }, |
| "title": "NLP-CUET@DravidianLangTech-EACL2021: Investigating Visual and Textual Features to Identify Trolls from Multimodal Social Media Memes", |
| "authors": [ |
| { |
| "first": "Eftekhar", |
| "middle": [], |
| "last": "Hossain", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Telecommunication Engineering Chittagong University of Engineering and Technology", |
| "location": { |
| "country": "Bangladesh" |
| } |
| }, |
| "email": "eftekhar.hossain@cuet.ac.bd" |
| }, |
| { |
| "first": "Omar", |
| "middle": [], |
| "last": "Sharif", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "omar.sharif@cuet.ac.bd" |
| }, |
| { |
| "first": "Mohammed", |
| "middle": [ |
| "Moshiul" |
| ], |
| "last": "Hoque", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In the past few years, the meme has become a new way of communication on the Internet. As memes are the images with embedded text, it can quickly spread hate, offence and violence. Classifying memes are very challenging because of their multimodal nature and regionspecific interpretation. A shared task is organized to develop models that can identify trolls from multimodal social media memes. This work presents a computational model that we have developed as part of our participation in the task. Training data comes in two forms: an image with embedded Tamil code-mixed text and an associated caption given in English. We investigated the visual and textual features using CNN, VGG16, Inception, Multilingual-BERT, XLM-Roberta, XLNet models. Multimodal features are extracted by combining image (CNN, ResNet50, Inception) and text (Long short term memory network) features via early fusion approach. Results indicate that the textual approach with XLNet achieved the highest weighted f 1-score of 0.58, which enabled our model to secure 3 rd rank in this task.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In the past few years, the meme has become a new way of communication on the Internet. As memes are the images with embedded text, it can quickly spread hate, offence and violence. Classifying memes are very challenging because of their multimodal nature and regionspecific interpretation. A shared task is organized to develop models that can identify trolls from multimodal social media memes. This work presents a computational model that we have developed as part of our participation in the task. Training data comes in two forms: an image with embedded Tamil code-mixed text and an associated caption given in English. We investigated the visual and textual features using CNN, VGG16, Inception, Multilingual-BERT, XLM-Roberta, XLNet models. Multimodal features are extracted by combining image (CNN, ResNet50, Inception) and text (Long short term memory network) features via early fusion approach. Results indicate that the textual approach with XLNet achieved the highest weighted f 1-score of 0.58, which enabled our model to secure 3 rd rank in this task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "With the Internet's phenomenal growth, social media has become a platform for sharing information, opinion, feeling, expressions, and ideas. Most users enjoy the liberty to post or share contents in such virtual platforms without any legal authority intervention or moderation Mahesan, 2019, 2020a,b) . Some people misuse this freedom and take social platforms to spread negativity, threat and offence against individuals or communities in various ways (Chakravarthi, 2020) . One such way is making and sharing troll memes to provoke, offend and demean a group or race on the Internet (Mojica de la Vega and Ng, 2018). Although memes meant to be sarcastic or humorous, sometimes it becomes aggressive, threatening and abusive (Suryawanshi et al., 2020a) . Till to date, extensive research has been conducted to detect hate, hostility, and aggression from a single modality such as image or text (Kumar et al., 2020) . Identification of troll, offence, abuse by analyzing the combined information of visual and textual modalities is still an unexplored research avenue in natural language processing (NLP). Classifying memes from multi-model data is a challenging task since memes express sarcasm and humour implicitly. One meme may not be a troll if we consider only image or text associated with it. However, it can be a troll if it considers both text and image modalities. Such implicit meaning of memes, use of sarcastic, ambiguous and humorous terms and absence of baseline algorithms that take care of multiple modalities are the primary concerns of categorizing multimodal memes. Features from multiple modalities (i.e image, text) have been exploited in many works to solve these problems (Suryawanshi et al., 2020a; Pranesh and Shekhar, 2020) . We plan to address this issue by using transfer learning as these models have better generalization capability than the models trained on small dataset. This work is a little effort to compensate for the existing deficiency of assign task with the following contributions:", |
| "cite_spans": [ |
| { |
| "start": 277, |
| "end": 300, |
| "text": "Mahesan, 2019, 2020a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 453, |
| "end": 473, |
| "text": "(Chakravarthi, 2020)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 726, |
| "end": 753, |
| "text": "(Suryawanshi et al., 2020a)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 895, |
| "end": 915, |
| "text": "(Kumar et al., 2020)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1697, |
| "end": 1724, |
| "text": "(Suryawanshi et al., 2020a;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 1725, |
| "end": 1751, |
| "text": "Pranesh and Shekhar, 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Developed a classifier model using XLNet to identify trolls from multimodal social media memes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Investigate the performance of various transfer learning techniques with the benchmark experimental evaluation by exploring visual, textual and multimodal features of the data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In the past few years, trolling, aggression, hostile and abusive language detection from social media data has been studied widely by NLP experts (Kumar et al., 2020; Sharif et al., 2021; Mandl et al., 2020) . Majority of these researches carried out concerning the textual information alone (Akiwowo et al., 2020) . However, a meme existence can be found in a basic image, text embedded in image or image with sarcastic caption. ", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 166, |
| "text": "(Kumar et al., 2020;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 167, |
| "end": 187, |
| "text": "Sharif et al., 2021;", |
| "ref_id": null |
| }, |
| { |
| "start": 188, |
| "end": 207, |
| "text": "Mandl et al., 2020)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 292, |
| "end": 314, |
| "text": "(Akiwowo et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Meme as the troll is an image that has offensive or sarcastic text embedded into it and intent to demean, provoke or offend an individual or a group (Suryawanshi et al., 2020b ). An image itself can also be a troll meme without any embedded text into it. In this task, we aim to detect troll meme from an image and its associated caption. Participants are allowed to use image, caption or both to perform the classification task. We used image, text, and multimodal (i.e., image + text) features to address the assigned task (detail analysis is discussed in Section 4).", |
| "cite_spans": [ |
| { |
| "start": 149, |
| "end": 175, |
| "text": "(Suryawanshi et al., 2020b", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task and Dataset Descriptions", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The prime concern of this work is to classify trolling from the multimodal memes. Initially, the investigation begins with accounting only the images' visual features where different CNN architectures will use. The textual features will consider in the next and apply the transformerbased model (i.e. m-BERT, XLM-R, XLNet) for the classification task. Finally, we investigate the effect of combined visual and textual features and compare its performance with the other approaches. Figure 1 shows the abstract view of the employed techniques.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 482, |
| "end": 490, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "4" |
| }, |
| { |
| "text": "A CNN architecture is used to experiment on visual modality. Besides the pre-trained models, VGG16 and Inception are also employed for the classification task. Before feeding into the model, all the images get resized into a dimension of 150 \u00d7 150 \u00d7 3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Visual Approach", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We design a CNN architecture consists of four convolution layers. The first and second layers contained 32 and 64 filters, while 128 filters are used in third and fourth layers. In all layers, convolution is performed by 3 \u00d7 3 kernel and used the Relu non-linearity function. To extract the critical features max pooling is performed with 2 \u00d7 2 window after every convolution layer. The flattened output of the final convolution layer is feed to a dense layer consisting of 512 neurons. To mitigate the chance of overfitting a dropout layer is introduced with a dropout rate of 0.1. Finally, a sigmoid layer is used for the class prediction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CNN:", |
| "sec_num": null |
| }, |
| { |
| "text": "VGG16: A CNN architecture (Simonyan and Zisserman, 2015) is pre-trained on over 14 million images of 1000 classes. To accomplish the task, we froze the top layers of VGG16 and fine-tuned it on our images with adding one global average pooling layer followed by an FC layer of 256 neurons and a sigmoid layer which is used for the class prediction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CNN:", |
| "sec_num": null |
| }, |
| { |
| "text": "Inception: We fine-tuned the InceptionV3 (Szegedy et al., 2015 ) network on our images by freezing the top layers. A global average pooling layer is added along with a fully connected layer of 256 neurons, followed by a sigmoid layer on top of these networks. In order to train the models, 'binary crossentropy' loss function and 'RMSprop' optimizer is utilized with learning rate 1e \u22123 . Training is performed for 50 epochs with passing 32 instances in a single iteration. For saving the best intermediate model, we use the Keras callback function.", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 62, |
| "text": "(Szegedy et al., 2015", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CNN:", |
| "sec_num": null |
| }, |
| { |
| "text": "Various state-of-the-art transformer models are used including m-BERT (Devlin et al., 2018) , XLNet (Yang et al., 2019) , and XLM-Roberta (Conneau et al., 2019) to investigate the textual modality. We selected 'bert-base-multilingualcased', 'xlnet-base-cased', and 'xlm-Roberta-base' models from Pytorch Huggingface 2 transformers library and fine-tuned them on our textual data. Implementation is done by using Ktrain (Maiya, 2020) package. For fine-tuning, we settled the maximum caption length 50 and used a learning rate of 2e \u22125 with a batch size 8 for all models. Ktrain 'fit onecycle' method is used to train the models for 20 epochs. The early stopping technique is utilized to alleviate the chance of overfitting.", |
| "cite_spans": [ |
| { |
| "start": 70, |
| "end": 91, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 100, |
| "end": 119, |
| "text": "(Yang et al., 2019)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 138, |
| "end": 160, |
| "text": "(Conneau et al., 2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Textual Approach", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To verify the singular modality approaches effectiveness, we continue our investigation by incorporating both visual and textual modality into one input data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multimodal Approach", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Various multimodal classification tasks adopted this approach (Pranesh and Shekhar, 2020). We have employed an early fusion approach (Duong et al., 2017) instead of using one modality feature to accept both modalities as inputs and classify them by extracting suitable features from both modalities. For extracting visual features, CNN is used, whereas bidirectional long short term memory (BiLSTM) network is applied for handling the textual features. Different multimodal models have been constructed for the comparison. However, due to the high computational complexity, incorporating transformer models with CNN-Image models has not experimented within the current work.", |
| "cite_spans": [ |
| { |
| "start": 133, |
| "end": 153, |
| "text": "(Duong et al., 2017)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multimodal Approach", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "CNNImage + BiLSTM: Firstly, CNN architecture is employed to extract the image features. It consists of four convolutional layers consisting of 32, 64, 128, and 64 filters with a size of 3 \u00d7 3 in 1 st -4 th layers. Each convolutional layer followed by a maxpool layer with a pooling size of 2 \u00d7 2. An FC layer with 256 neurons and a sigmoid layer is added after the flatten layer. On the contrary, Word2Vec (Mikolov et al., 2013) embedding technique applied to extract features from the captions/texts. We use the Keras embedding layer with embedding dimension 100. A BiLSTM layer with 128 cells is added at the top of the embedding layer to capture long-term dependencies from the texts. Finally, the output of the BiLSTM layer is passed to a sigmoid layer.", |
| "cite_spans": [ |
| { |
| "start": 406, |
| "end": 428, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multimodal Approach", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "After that, the two constructed models' output layers are concatenated together and created a new model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multimodal Approach", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We combined the pretrained Inception and BiLSTM network for the classification using both modalities. The inception model is fetched from Keras library and used it as a visual feature extractor. By excluding the top layers, we fine-tuned it on our images with one additional FC layer of 256 neurons and a sigmoid layer. For textual features, similar BiLSTM architecture is employed (Described in CNNImage + BiLSTM). The Keras concatenation layer used two models output layers and combined them to create one model. ResNet50 + BiLSTM: Pretrained residual network (ResNet) (He et al., 2015) is employed to extract visual features. The model is taken from Keras library. To fine-tuned it on our images, the final pooling and FC layer of the ResNet model is excluded. Afterwards, we have added a global average pooling layer with a fully connected layer and a sigmoid layer at the ResNet model's top. To extract the textual features identical BiLSTM network is employed (Described in CNNImage + BiLSTM). In the end, the output layer of the two models is concatenated to create one combined model.", |
| "cite_spans": [ |
| { |
| "start": 571, |
| "end": 588, |
| "text": "(He et al., 2015)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inception + BiLSTM:", |
| "sec_num": null |
| }, |
| { |
| "text": "In all cases, the output prediction is obtained from a final sigmoid layer added just after the concatenation layer of a multimodal model. All the models have compiled using 'binary crossentropy' loss function. Apart from this, we use 'Adam' optimizer with a learning rate of 1e \u22123 and choose the batch size of 32. The models are trained for 50 epochs utilizing the Keras callbacks function to store the best model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inception + BiLSTM:", |
| "sec_num": null |
| }, |
| { |
| "text": "This section presents a comparative performance analysis of the different experimental approaches to classify the multimodal memes. The superiority of the models is determined based on the weighted f 1 -score. However, other evaluation metrics like precision and recall are also considered in some cases. Table 2 shows the evaluation results of different approaches on the test set. The outcome reveals that all the models (i.e. CNN, VGG16, and Inception) developed based on the imaging modality obtained approximately same f 1 -score of 0.46. However, considering both precision (0.625) and recall (0.597) score, only Inception model performed better than the other visual models. On the other hand, models that are constructed based on textual modality show a rise of 10 \u2212 12% in f 1 score than the visual approach. In the textual approach, m-BERT and XLM-R achieved f 1 -score of 0.558 and 0.571 respectively. However, XLNet outperformed all the models by obtaining the highest f 1 -score of 0.583. For comparison, we also perform experiment by combining both modality features into one model. In case of multimodal approach, CNNImage + BiLSTM model obtained f 1 -score of 0.525 while ResNet + BiLSTM model achieved a lower f 1 -score (0.47) with a drops of 6%. Compared to that Inception + BiLSTM model achieved the highest f 1 -score of 0.559. Though two multimodal models (CNNImage + BiLSTM and Inception + BiLSTM) showed better outcome than visual models, they could not beat the textual model's performance (XLNet). Though it is skeptical that XLNet (monolingual model) outperformed multilingual models (m-BERT and XLM-R), the possible reason might be the provided captions in English.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 305, |
| "end": 312, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Result and Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "From the above analysis, it is evident that visual models performed poorly compared to the other approaches. The possible reason behind this might be due to the overlapping of multiple images in all the classes. That means the dataset consists of many memes with the same visual content with different captions in both classes. Moreover, many images do not provide any explicit meaningful information to conclude whether it is a troll or not-troll meme.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Result and Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "A detail error analysis is performed on the best model of each approach to obtain more insights. The analysis is carried out by using confusion matrices (Figure 2 ). From the figure 2 (a), it is observed that among 395 troll memes, Inception + BiLSTM model correctly classified 324 and misclassified 71 as not-troll. However, this model's true positive rate is comparatively low than the true negative rate as it identified only 72 not-troll memes correctly and wrongly classified 200 memes. On the other hand, in the visual approach, the Inception model showed outstanding performance by detecting 392 troll memes correctly from 395. However, the model The above analysis shows that all models get biased towards the troll memes and wrongly classified more than 70% memes as the troll. The probable reason behind this might be the overlapping nature of the memes in all classes. Besides, many memes do not have any embedded captions which might create difficulty for the textual and multimodal models to determine the appropriate class. Moreover, we observed that most of the missing caption memes were from troll class which might be a strong reason for the text models to incline towards the troll class.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 153, |
| "end": 162, |
| "text": "(Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "This work present the details of the methods and performance analysis of the models that we developed to participate in the meme classification shared task at EACL-2021. We have utilized visual, textual and multimodal features to classify memes into the 'troll' and 'not troll' categories. Results reveal that all the visual classifiers achieve similar weighted f 1 -score of 0.46. Transformer-based methods capture textual features where XLNet outdoes all others by obtaining 0.58 f 1 -score. In the multimodal approach, visual and textual features are combined via early fusion of weights. F 1 score rose significantly after adding textual features in CNN and Inception models. Only BiLSTM method is applied to extract features from the text in this approach. In future, it will be interesting to see how the models behave if transformers use in place of BiLSTM. An ensemble of transformers might also be explored in case of textual approach.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://huggingface.co/transformers/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Vinodkumar Prabhakaran, and Zeerak Waseem", |
| "authors": [ |
| { |
| "first": "Seyi", |
| "middle": [], |
| "last": "Akiwowo", |
| "suffix": "" |
| }, |
| { |
| "first": "Bertie", |
| "middle": [], |
| "last": "Vidgen", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "2020. Proceedings of the Fourth Workshop on Online Abuse and Harms", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Seyi Akiwowo, Bertie Vidgen, Vinodkumar Prabhakaran, and Zeerak Waseem, editors. 2020. Proceedings of the Fourth Workshop on Online Abuse and Harms. Association for Computational Linguistics, Online.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "HopeEDI: A multilingual hope speech detection dataset for equality, diversity, and inclusion", |
| "authors": [ |
| { |
| "first": "Chakravarthi", |
| "middle": [], |
| "last": "Bharathi Raja", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "41--53", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi. 2020. HopeEDI: A multilingual hope speech detection dataset for equality, diversity, and inclusion. In Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Media, pages 41-53, Barcelona, Spain (Online). Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "BERT: pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: pre-training of deep bidirectional transformers for language understanding. CoRR, abs/1810.04805.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Understanding visual memes: An empirical analysis of text superimposed on memes shared on twitter", |
| "authors": [ |
| { |
| "first": "Yuhao", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Muhammad", |
| "middle": [ |
| "Aamir" |
| ], |
| "last": "Masood", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Joseph", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the International AAAI Conference on Web and Social Media", |
| "volume": "14", |
| "issue": "", |
| "pages": "153--164", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuhao Du, Muhammad Aamir Masood, and Kenneth Joseph. 2020. Understanding visual memes: An empirical analysis of text superimposed on memes shared on twitter. Proceedings of the International AAAI Conference on Web and Social Media, 14(1):153-164.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Multimodal classification for analysing social media", |
| "authors": [ |
| { |
| "first": "Chi", |
| "middle": [ |
| "Thang" |
| ], |
| "last": "Duong", |
| "suffix": "" |
| }, |
| { |
| "first": "Remi", |
| "middle": [], |
| "last": "Lebret", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [], |
| "last": "Aberer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chi Thang Duong, Remi Lebret, and Karl Aberer. 2017. Multimodal classification for analysing social media.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Image matters: Scalable detection of offensive and non-compliant content / logo in product images", |
| "authors": [ |
| { |
| "first": "Shreyansh", |
| "middle": [], |
| "last": "Gandhi", |
| "suffix": "" |
| }, |
| { |
| "first": "Samrat", |
| "middle": [], |
| "last": "Kokkula", |
| "suffix": "" |
| }, |
| { |
| "first": "Abon", |
| "middle": [], |
| "last": "Chaudhuri", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Magnani", |
| "suffix": "" |
| }, |
| { |
| "first": "Theban", |
| "middle": [], |
| "last": "Stanley", |
| "suffix": "" |
| }, |
| { |
| "first": "Behzad", |
| "middle": [], |
| "last": "Ahmadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Venkatesh", |
| "middle": [], |
| "last": "Kandaswamy", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Ovenc", |
| "suffix": "" |
| }, |
| { |
| "first": "Shie", |
| "middle": [], |
| "last": "Mannor", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shreyansh Gandhi, Samrat Kokkula, Abon Chaudhuri, Alessandro Magnani, Theban Stanley, Behzad Ahmadi, Venkatesh Kandaswamy, Omer Ovenc, and Shie Mannor. 2019. Image matters: Scalable detection of offensive and non-compliant content / logo in product images.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Deep residual learning for image recognition", |
| "authors": [ |
| { |
| "first": "Kaiming", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiangyu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaoqing", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2015. Deep residual learning for image recognition.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "2020. Proceedings of the Second Workshop on Trolling, Aggression and Cyberbullying. European Language Resources Association (ELRA)", |
| "authors": [ |
| { |
| "first": "Ritesh", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Atul", |
| "middle": [], |
| "last": "Kr", |
| "suffix": "" |
| }, |
| { |
| "first": "Bornini", |
| "middle": [], |
| "last": "Ojha", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Lahiri", |
| "suffix": "" |
| }, |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ritesh Kumar, Atul Kr. Ojha, Bornini Lahiri, Marcos Zampieri, Shervin Malmasi, Vanessa Murdock, and Daniel Kadar, editors. 2020. Proceedings of the Second Workshop on Trolling, Aggression and Cyberbullying. European Language Resources Association (ELRA), Marseille, France.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "2020. ktrain: A low-code library for augmented machine learning", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Arun", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Maiya", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arun S. Maiya. 2020. ktrain: A low-code library for augmented machine learning.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Overview of the HASOC Track at FIRE 2020: Hate Speech and Offensive Language Identification in Tamil", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Mandl", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandip", |
| "middle": [], |
| "last": "Modha", |
| "suffix": "" |
| }, |
| { |
| "first": "Anand", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Bharathi Raja Chakravarthi ;", |
| "middle": [], |
| "last": "Malayalam", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hindi", |
| "suffix": "" |
| }, |
| { |
| "first": "German", |
| "middle": [], |
| "last": "English", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Forum for Information Retrieval Evaluation", |
| "volume": "2020", |
| "issue": "", |
| "pages": "29--32", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3441501.3441517" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Mandl, Sandip Modha, Anand Kumar M, and Bharathi Raja Chakravarthi. 2020. Overview of the HASOC Track at FIRE 2020: Hate Speech and Offensive Language Identification in Tamil, Malayalam, Hindi, English and German. In Forum for Information Retrieval Evaluation, FIRE 2020, page 29-32, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tom\u00e1s", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom\u00e1s Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Distributed representations of words and phrases and their compositionality. CoRR, abs/1310.4546.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Memesem: A multi-modal framework for sentimental analysis of meme via transfer learning. Omar Sharif, Eftekhar Hossain, and Mohammed Moshiul Hoque", |
| "authors": [], |
| "year": 2021, |
| "venue": "Combating hostility: Covid-19 fake news and hostile post detection in social media", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raj Ratn Pranesh and Ambesh Shekhar. 2020. Memesem: A multi-modal framework for sentimental analysis of meme via transfer learning. Omar Sharif, Eftekhar Hossain, and Mohammed Moshiul Hoque. 2021. Combating hostility: Covid-19 fake news and hostile post detection in social media.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Very deep convolutional networks for large-scale image recognition", |
| "authors": [ |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Simonyan", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Zisserman", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karen Simonyan and Andrew Zisserman. 2015. Very deep convolutional networks for large-scale image recognition.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Findings of the shared task on Troll Meme Classification in Tamil", |
| "authors": [ |
| { |
| "first": "Shardul", |
| "middle": [], |
| "last": "Suryawanshi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shardul Suryawanshi and Bharathi Raja Chakravarthi. 2021. Findings of the shared task on Troll Meme Classification in Tamil. In Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Multimodal meme dataset (MultiOFF) for identifying offensive content in image and text", |
| "authors": [ |
| { |
| "first": "Shardul", |
| "middle": [], |
| "last": "Suryawanshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihael", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Arcan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Buitelaar", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Second Workshop on Trolling, Aggression and Cyberbullying", |
| "volume": "", |
| "issue": "", |
| "pages": "32--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shardul Suryawanshi, Bharathi Raja Chakravarthi, Mihael Arcan, and Paul Buitelaar. 2020a. Multimodal meme dataset (MultiOFF) for identifying offensive content in image and text. In Proceedings of the Second Workshop on Trolling, Aggression and Cyberbullying, pages 32-41, Marseille, France. European Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "A dataset for troll classification of TamilMemes", |
| "authors": [ |
| { |
| "first": "Shardul", |
| "middle": [], |
| "last": "Suryawanshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihael", |
| "middle": [], |
| "last": "Verma", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Arcan", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Philip Mccrae", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Buitelaar", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the WILDRE5-5th Workshop on Indian Language Data: Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "7--13", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shardul Suryawanshi, Bharathi Raja Chakravarthi, Pranav Verma, Mihael Arcan, John Philip McCrae, and Paul Buitelaar. 2020b. A dataset for troll classification of TamilMemes. In Proceedings of the WILDRE5-5th Workshop on Indian Language Data: Resources and Evaluation, pages 7-13, Marseille, France. European Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Rethinking the inception architecture for computer vision", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Szegedy", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Vanhoucke", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Ioffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathon", |
| "middle": [], |
| "last": "Shlens", |
| "suffix": "" |
| }, |
| { |
| "first": "Zbigniew", |
| "middle": [], |
| "last": "Wojna", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, and Zbigniew Wojna. 2015. Rethinking the inception architecture for computer vision. CoRR, abs/1512.00567.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Sentiment Analysis in Tamil Texts: A Study on Machine Learning Techniques and Feature Representation", |
| "authors": [ |
| { |
| "first": "Sajeetha", |
| "middle": [], |
| "last": "Thavareesan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sinnathamby", |
| "middle": [], |
| "last": "Mahesan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "2019 14th Conference on Industrial and Information Systems (ICIIS)", |
| "volume": "", |
| "issue": "", |
| "pages": "320--325", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICIIS47346.2019.9063341" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sajeetha Thavareesan and Sinnathamby Mahesan. 2019. Sentiment Analysis in Tamil Texts: A Study on Machine Learning Techniques and Feature Representation. In 2019 14th Conference on Industrial and Information Systems (ICIIS), pages 320-325.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Sentiment Lexicon Expansion using Word2vec and fastText for Sentiment Prediction in Tamil texts", |
| "authors": [ |
| { |
| "first": "Sajeetha", |
| "middle": [], |
| "last": "Thavareesan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sinnathamby", |
| "middle": [], |
| "last": "Mahesan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "2020 Moratuwa Engineering Research Conference (MERCon)", |
| "volume": "", |
| "issue": "", |
| "pages": "272--276", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/MERCon50084.2020.9185369" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sajeetha Thavareesan and Sinnathamby Mahesan. 2020a. Sentiment Lexicon Expansion using Word2vec and fastText for Sentiment Prediction in Tamil texts. In 2020 Moratuwa Engineering Research Conference (MERCon), pages 272-276.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Word embedding-based Part of Speech tagging in Tamil texts", |
| "authors": [ |
| { |
| "first": "Sajeetha", |
| "middle": [], |
| "last": "Thavareesan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sinnathamby", |
| "middle": [], |
| "last": "Mahesan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "2020 IEEE 15th International Conference on Industrial and Information Systems (ICIIS)", |
| "volume": "", |
| "issue": "", |
| "pages": "478--482", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICIIS51140.2020.9342640" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sajeetha Thavareesan and Sinnathamby Mahesan. 2020b. Word embedding-based Part of Speech tagging in Tamil texts. In 2020 IEEE 15th International Conference on Industrial and Information Systems (ICIIS), pages 478-482.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Modeling trolling in social media conversations", |
| "authors": [ |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Gerardo Mojica De", |
| "suffix": "" |
| }, |
| { |
| "first": "La", |
| "middle": [], |
| "last": "Vega", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC-2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luis Gerardo Mojica de la Vega and Vincent Ng. 2018. Modeling trolling in social media conversations. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC-2018), Miyazaki, Japan. European Languages Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "2015. I can has cheezburger? a nonparanormal approach to combining textual and visual information for predicting and generating popular meme descriptions", |
| "authors": [ |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Miaomiao", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wen", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "355--365", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/N15-1039" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "William Yang Wang and Miaomiao Wen. 2015. I can has cheezburger? a nonparanormal approach to combining textual and visual information for predicting and generating popular meme descriptions. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 355-365, Denver, Colorado. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [ |
| "G" |
| ], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime G. Carbonell, Ruslan Salakhutdinov, and Quoc V. Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. CoRR, abs/1906.08237.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "text": "Abstract process of troll memes classification", |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "num": null, |
| "text": "Confusion Matrix of the best model in each approach (a) Multimodal (b) Visual (c) Textual confused in identifying not troll memes as it correctly classified only 6 memes and misclassified 266 from a total 272 not-troll memes. Meanwhile, figure 2 (c) indicates that among 272 not-troll memes, XLNet correctly classified only 87. In contrast, among 395, the model correctly identified 319 troll memes.", |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td colspan=\"3\">Data Sets Troll Not-Troll</td></tr><tr><td>Train</td><td>1026</td><td>814</td></tr><tr><td>Valid</td><td>256</td><td>204</td></tr><tr><td>Test</td><td>395</td><td>272</td></tr><tr><td>Total</td><td>1677</td><td>1290</td></tr></table>", |
| "text": "Suryawanshi and Chakravarthi, 2021). The dataset contains two parts: an image with embedded Tamil code-mixed text, and a caption. Each instance of the dataset labelled as either 'troll' and 'not-troll'. Dataset divided into train, validation and test sets. Statistics of the dataset for each class given in table 1. Dataset divided into train, validation and test sets. Dataset is imbalanced where several data in the 'troll' class is much higher than the 'not-troll' class.", |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "text": "", |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "text": "Performance comparison of different models on test set.", |
| "type_str": "table" |
| } |
| } |
| } |
| } |