| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:13:36.076494Z" |
| }, |
| "title": "CodemixedNLP: An Extensible and Open NLP Toolkit for Code-Mixing", |
| "authors": [ |
| { |
| "first": "Sai", |
| "middle": [ |
| "Muralidhar" |
| ], |
| "last": "Jayanthi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Language Technologies Institute Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "sjayanth@cs.cmu.edu" |
| }, |
| { |
| "first": "Kavya", |
| "middle": [], |
| "last": "Nerella", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Language Technologies Institute Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "knerella@cs.cmu.edu" |
| }, |
| { |
| "first": "Raghavi", |
| "middle": [], |
| "last": "Khyathi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Language Technologies Institute Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Chandu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Language Technologies Institute Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "kchandu@cs.cmu.edu" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Black", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Language Technologies Institute Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The NLP community has witnessed steep progress in a variety of tasks across the realms of monolingual and multilingual language processing recently. These successes, in conjunction with the proliferating mixed language interactions on social media have boosted interest in modeling code-mixed texts. In this work, we present CODEMIXEDNLP, an open-source library with the goals of bringing together the advances in code-mixed NLP and opening it up to a wider machine learning community. The library consists of tools to develop and benchmark versatile model architectures that are tailored for mixed texts, methods to expand training sets, techniques to quantify mixing styles, and fine-tuned state-of-the-art models for 7 tasks in Hinglish 1. We believe this work has a potential to foster a distributed yet collaborative and sustainable ecosystem in an otherwise dispersed space of code-mixing research. The toolkit is designed to be simple, easily extensible, and resourceful to both researchers as well as practitioners 2 .", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The NLP community has witnessed steep progress in a variety of tasks across the realms of monolingual and multilingual language processing recently. These successes, in conjunction with the proliferating mixed language interactions on social media have boosted interest in modeling code-mixed texts. In this work, we present CODEMIXEDNLP, an open-source library with the goals of bringing together the advances in code-mixed NLP and opening it up to a wider machine learning community. The library consists of tools to develop and benchmark versatile model architectures that are tailored for mixed texts, methods to expand training sets, techniques to quantify mixing styles, and fine-tuned state-of-the-art models for 7 tasks in Hinglish 1. We believe this work has a potential to foster a distributed yet collaborative and sustainable ecosystem in an otherwise dispersed space of code-mixing research. The toolkit is designed to be simple, easily extensible, and resourceful to both researchers as well as practitioners 2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Code-mixing refers to fluid alteration between two or more languages in a given utterance. This phenomenon is ubiquitous and more natural in multilingual communities, and is highly prevalent in social media platforms. Developing tools that can comprehend mixed texts can have a multitude of advantages, ranging from socially responsible NLP applications such as moderating abusive content in social media to improve naturalness of ubiquitous technologies such as conversational AI assistants and further to develop socio-cultural studies around human cognition, such as why and when people code-mix.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "NLP tools for monolingual and multilingual language processing have rapidly progressed in the past few years; thanks to the transformer-based models such as Multilingual BERT (Devlin et al., 2019) & XLM-RoBERTa (Conneau et al., 2020) , and their pretraining techniques. On various mixed datasets, recent studies have shown that adopting multilingual pretrained models can perform better than their previous deep learning counterparts (Pires et al., 2019; Khanuja et al., 2020; Chakravarthy et al., 2020; Jayanthi and Gupta, 2021) . While this looks promising for multilingual, the same is not translated to code-mixing. Hence, a critical investigation is required to understand generalizable modeling strategies to enhance performance on mixed texts (Winata et al., 2021; .", |
| "cite_spans": [ |
| { |
| "start": 175, |
| "end": 196, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 211, |
| "end": 233, |
| "text": "(Conneau et al., 2020)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 434, |
| "end": 454, |
| "text": "(Pires et al., 2019;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 455, |
| "end": 476, |
| "text": "Khanuja et al., 2020;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 477, |
| "end": 503, |
| "text": "Chakravarthy et al., 2020;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 504, |
| "end": 529, |
| "text": "Jayanthi and Gupta, 2021)", |
| "ref_id": null |
| }, |
| { |
| "start": 750, |
| "end": 771, |
| "text": "(Winata et al., 2021;", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "At the same time, practitioners who require an off-the-shelf tool into their downstream mixed text application (eg. sentiment or language identification), currently have to resort to monolingual toolkits such as NLTK, Flair, IndicNLP and iNLTK. On the other hand, while there have been several episodic works on mixed text processing, such as proposing novel datasets or shared-tasks or training strategies, there haven't been many initiatives to collate these resources into a common setting; doing so can benefit both researchers and practitioners, thereby accelerating NLP for mixed texts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we address some of these shortcomings by creating an extensible and open-source toolkit for a variety of semantic and syntactic NLP applications in mixed languages. Our toolkit offers-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 simple plug-and-play command line interfaces with fine grained control over inputs, models and tasks for developing, quantifying, benchmarking, and re-using versatile model architectures tailored for mixed texts ( \u00a7 2.1, \u00a7 2.2, \u00a7 2.3) \u2022 easy to use single stop interfacing for a variety of data augmentation techniques including transliteration, spelling variations, expansion with monolingual corpora etc., by leveraging a collation of publicly available tools \u2022 a toolkit library to import fine-tuned and ready-to-use models for 7 different tasks in Hinglish, along with an easy-to-setup web interface wrapper based on flask server ( \u00a7 4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We believe the fine grained plug and play interfacing of the toolkit can serve a multitude of purposes in both academia and industry. Such fine control over the individual components of the model can enable accelerated experimentation in training different model architectures, such as multi-tasking, representation-fusion, and language-informed modeling.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This in-turn helps our understanding of utilizing pretrained transformer-models for mixed datasets. In addition, our toolkit also offers computation of metrics to quantify code-mixing such as Code-Mixing Index, Language Entropy, etc., which can be utilized to find peculiarities of low-performing subsets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Like a curse in disguise, though code-mixing is widely prevalent and available on social media, it is accompanied with non-standard spellings, mixed scripts and ill-formed sentences are common in code-mixing. To combat this, our toolkit offers techniques to augment the training sets with multiple views of each input corresponding to the above problems. Among many potential applications, we first demonstrate our toolkit's utility in benchmarking ( \u00a7 3). In addition, we publish state-of-the-art models for different NLP tasks in Hinglish and wrap them into a command line / deployable web interface ( \u00a7 4). Our toolkit is easily extensiblepractitioners can incorporate new pretrained as well as fine-tuned models, include text processors such as tokenizers, transliterators and translators, and add wrappers on existing methods for downstream NLP applications.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our toolkit is organized into components as depicted in Figure 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 56, |
| "end": 64, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Toolkit", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In a nutshell, an end-to-end model architecture consists of one or more encoder components, a component for combining encodings, and one or more adaptor plus task components. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Toolkit", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Multi-view Integration: Tokens in mixed texts are often manifested in cross-script and mixed forms, that we refer to as views. This infusion motivates integration of text representations in varied forms, such as transliterated, translated, script-normalized, and tokens belonging to one of the participating languages. Especially in the context of pretrained multilingual models, this technique means extracting a holistic representation of a mixed text. To this end, the toolkit facilitates combining representations from different views of an input. Text Tokenization: Motivated by some recent related works on using different word-level and sub-word-level embeddings (Winata et al., 2019; Aguilar and Solorio, 2020), our toolkit offers different tokenization methods for encoding text. Among the encoders available in our toolkit ( \u00a7 2.2), pretrained transformer-based encoders can either be tokenized using their default tokenization technique (i.e. subwords) or by using a character-CNN architecture (Boukkouri et al., 2020) . LSTM-based models can take inputs in the form of tensor representationseg. word-level FastText (Bojanowski et al., 2017) or semi-character (Sakaguchi et al., 2017) representations, or character-level representationseg. char-BiLSTM 3 .", |
| "cite_spans": [ |
| { |
| "start": 1005, |
| "end": 1029, |
| "text": "(Boukkouri et al., 2020)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1127, |
| "end": 1152, |
| "text": "(Bojanowski et al., 2017)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1171, |
| "end": 1195, |
| "text": "(Sakaguchi et al., 2017)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Embeddings", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Studies in the past have shown the usefulness of language tag-aware modeling for mixed and cross-lingual texts (Chandu et al., 2018; Lample and Conneau, 2019) . However, their usefulness in the context of pretrained models and code-mixing is not thoroughly investigated. To this end, we offer a more generalized method in our toolkit to conduct any tag-aware fine-tuning, wherein representations for different kinds of tags can be added to the text representations. Examples of such tags include POS tags, Language IDs, etc.", |
| "cite_spans": [ |
| { |
| "start": 111, |
| "end": 132, |
| "text": "(Chandu et al., 2018;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 133, |
| "end": 158, |
| "text": "Lample and Conneau, 2019)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tag-Informed Modeling:", |
| "sec_num": null |
| }, |
| { |
| "text": "Encoders: An Encoder in our toolkit can consist of a transformer-based or BiLSTM-based architecture. Specifically, for the former, we utilize pretrained models from the HuggingFace library (Wolf et al., 2020) and the latter is implemented in Pytorch (Paszke et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 189, |
| "end": 208, |
| "text": "(Wolf et al., 2020)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 250, |
| "end": 271, |
| "text": "(Paszke et al., 2019)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Encodings from different encoders can be combined, and if required be augmented with (non-trainable) representations before passing through an adaptor. To combine encodings, one can either simply concat them or obtain a (trainable) weighted average, a more parameter-efficient choice than the former. Both choices are available in our toolkit. Adaptors: An adaptor is a task-specific neural layer and currently, BiLSTM and Multi-Layer Perceptron (MLP) choices are available as part our toolkit. The inputs to adaptors are fused representations if multiple encoders are specified, else output from a single encoder. These adaptors serve as task-specific learnable parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation Fusion:", |
| "sec_num": null |
| }, |
| { |
| "text": "Multi-task learning can help models to pick relevant cues from one task to be applied to another. Such a setting was also previously investigated in the context of mixed texts, which showed promising improvements (Chandu et al., 2018) . Furthermore, it is also shown in monolingual NLP that incorporating explicit semantics as an auxiliary task can enhance BERT's performance (Zhang et al., 2020) . Motivated by these, our toolkit offers support to conduct training of one or more tasks. Once a final representation is produced by adaptors of each task, we use a training criterion to compute loss and perform gradient backpropagation.", |
| "cite_spans": [ |
| { |
| "start": 213, |
| "end": 234, |
| "text": "(Chandu et al., 2018)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 376, |
| "end": 396, |
| "text": "(Zhang et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multitasking:", |
| "sec_num": null |
| }, |
| { |
| "text": "Tasks: Our toolkit currently supports two kinds of tasks-sentence-level text classification and word-level sequence tagging, the flow for each is demonstrated in Figure 1 . The decoupled design of our toolkit helps in seamlessly creating multi-task training setups. The kinds of tasks for which we offer support currently are listed in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 162, |
| "end": 170, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 336, |
| "end": 343, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Tasks", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Adaptive Pretraining: Following the successes of task-adaptive and domain-adaptive pretraining in monolingual and multilingual NLP tasks (Gururangan et al., 2020) , users of our toolkit can also perform such adaptive pretrainings using mixed texts on top of pretrained transformer-based models.", |
| "cite_spans": [ |
| { |
| "start": 137, |
| "end": 162, |
| "text": "(Gururangan et al., 2020)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tasks", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Our toolkit offers standardized metrics for quantifying mixing in text, namely Code-Mixing Index (Gamb\u00e4ck and Das, 2014) , Average switch-points (Khanuja et al., 2020) , Multilingual Index, Probability of Switching and Language Entropy (Guzm\u00e1n et al., 2017) . We offer simple command line methods to compute these metrics and also offer metric-based data sampling.", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 120, |
| "text": "(Gamb\u00e4ck and Das, 2014)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 145, |
| "end": 167, |
| "text": "(Khanuja et al., 2020)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 205, |
| "end": 257, |
| "text": "Switching and Language Entropy (Guzm\u00e1n et al., 2017)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Codemixed Quantification", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Our toolkit also offers techniques to do data augmentation. While data augmentation is useful in cases where there is training data scarcity, for mixed datasets, it is also essential to produce a more generalized model. As part of this feature, this toolkit currently offers augmentation through transliteration, spelling variations and monolingual corpora. We currently support transliteration of Indic languages through an off-the-shelf toolindic-trans (Bhat et al., 2015) . Spelling variations include noising spelling, such as randomly removing/replacing vowel characters. Monolingual corpora augmentation is task specific. For a given task, such as sentiment classification, we augment publicly available monolingual corpora based on the task type from one or all of the mixing languages and use it while fine-tuning models.", |
| "cite_spans": [ |
| { |
| "start": 455, |
| "end": 474, |
| "text": "(Bhat et al., 2015)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Augmentation", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "Due to diverse data formats of existing mixed datasets, benchmarking and comparing results across tasks is not readily feasible. To this end, we propose a standardized data format for syntactic, semantic level understanding and generation tasks, and our toolkit offers command line methods to adopt a user's dataset to this standard format.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Format", |
| "sec_num": "2.6" |
| }, |
| { |
| "text": "Among many potential research applications of our toolkit, in this section, we demonstrate one benchmarking. Table 1 presents performances of selected model architectures obtained using our toolkit on some popular mixed datasets. In Table 2 , we also demonstrate the performances of different architectural choices implemented through our toolkit on two Hinglish datasets. For domain-adaptive pretraining of Hinglish datasets, we collate around 160K mixed sentences from several of the publicly available Hinglish datasets. For task-adaptive pretraining, we just use the training and testing data available in the dataset of interest. For training, we use standard optimizers and model configurations. 4", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 109, |
| "end": 116, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 233, |
| "end": 240, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We fine-tune and publish transformer-based models for 7 tasks in Hinglish. We include 3 task types-(1) Semantic (Sentiment Classification, Hate Speech and Aggression Identification), (2) Syntactic (NER, POS and Language Identification), and (3) Generation (Hinglish\u2192English Machine Translation). We present some examples of utilizing these models in Figure 2 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 350, |
| "end": 358, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Demo", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In this work, we presented a unified toolkit for modeling code-mixed texts. Additionally, the toolkit contains various functionalities such as data augmentation, code-mixing quantification, and ready-to-use fine-tuned models for 7 different NLP tasks in Hinglish. Our toolkit is simple enough for practitioners to integrate new features as well as develop wrappers around its existing functionalities. We believe this contribution facilitates a sustainable and extensible ecosystem of models by adding novel pretraining techniques tailored for mixed texts, text normalization techniques to counter spelling variations, error analysis tools to identify peculiarities in incorrect predictions and so on.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Demo is available at: https://bit.ly/3rzOcWb 2 The library and pretrained models are available at github.com/murali1996/CodemixedNLP.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Sequence Tagging with Tensorflow", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Due to the space limitations, we direct the reader to check github.com/murali1996/CodemixedNLP for toolkit usage patterns and for the list of modeling choices.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "LinCE: A centralized benchmark for linguistic code-switching evaluation", |
| "authors": [ |
| { |
| "first": "Gustavo", |
| "middle": [], |
| "last": "Aguilar", |
| "suffix": "" |
| }, |
| { |
| "first": "Sudipta", |
| "middle": [], |
| "last": "Kar", |
| "suffix": "" |
| }, |
| { |
| "first": "Thamar", |
| "middle": [], |
| "last": "Solorio", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "1803--1813", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gustavo Aguilar, Sudipta Kar, and Thamar Solorio. 2020. LinCE: A centralized benchmark for linguistic code-switching evaluation. In Proceedings of the 12th Language Resources and Evaluation Conference, pages 1803-1813, Marseille, France. European Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "From English to code-switching: Transfer learning with strong morphological clues", |
| "authors": [ |
| { |
| "first": "Gustavo", |
| "middle": [], |
| "last": "Aguilar", |
| "suffix": "" |
| }, |
| { |
| "first": "Thamar", |
| "middle": [], |
| "last": "Solorio", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "8033--8044", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.716" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gustavo Aguilar and Thamar Solorio. 2020. From English to code-switching: Transfer learning with strong morphological clues. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8033-8044, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Iiit-h system submission for fire2014 shared task on transliterated search", |
| "authors": [ |
| { |
| "first": "Ahmad", |
| "middle": [], |
| "last": "Irshad", |
| "suffix": "" |
| }, |
| { |
| "first": "Vandan", |
| "middle": [], |
| "last": "Bhat", |
| "suffix": "" |
| }, |
| { |
| "first": "Aniruddha", |
| "middle": [], |
| "last": "Mujadia", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tammewar", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Forum for Information Retrieval Evaluation, FIRE '14", |
| "volume": "", |
| "issue": "", |
| "pages": "48--53", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/2824864.2824872" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Irshad Ahmad Bhat, Vandan Mujadia, Aniruddha Tammewar, Riyaz Ahmad Bhat, and Manish Shrivastava. 2015. Iiit-h system submission for fire2014 shared task on transliterated search. In Proceedings of the Forum for Information Retrieval Evaluation, FIRE '14, pages 48-53, New York, NY, USA. ACM.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A dataset of hindi-english code-mixed social media text for hate speech detection", |
| "authors": [ |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Bohra", |
| "suffix": "" |
| }, |
| { |
| "first": "Deepanshu", |
| "middle": [], |
| "last": "Vijay", |
| "suffix": "" |
| }, |
| { |
| "first": "Vinay", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Manish", |
| "middle": [], |
| "last": "Syed Sarfaraz Akhtar", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Shrivastava", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the second workshop on computational modeling of people's opinions, personality, and emotions in social media", |
| "volume": "", |
| "issue": "", |
| "pages": "36--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aditya Bohra, Deepanshu Vijay, Vinay Singh, Syed Sarfaraz Akhtar, and Manish Shrivastava. 2018. A dataset of hindi-english code-mixed social media text for hate speech detection. In Proceedings of the second workshop on computational modeling of people's opinions, personality, and emotions in social media, pages 36-41.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Enriching word vectors with subword information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Characterbert: Reconciling elmo and bert for word-level open-vocabulary representations from characters", |
| "authors": [ |
| { |
| "first": "Hicham", |
| "middle": [ |
| "El" |
| ], |
| "last": "Boukkouri", |
| "suffix": "" |
| }, |
| { |
| "first": "Olivier", |
| "middle": [], |
| "last": "Ferret", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Lavergne", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiroshi", |
| "middle": [], |
| "last": "Noji", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierre", |
| "middle": [], |
| "last": "Zweigenbaum", |
| "suffix": "" |
| }, |
| { |
| "first": "Junichi", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hicham El Boukkouri, Olivier Ferret, Thomas Lavergne, Hiroshi Noji, Pierre Zweigenbaum, and Junichi Tsujii. 2020. Characterbert: Reconciling elmo and bert for word-level open-vocabulary representations from characters.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Corpus creation for sentiment analysis in code-mixed Tamil-English text", |
| "authors": [ |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Muralidaran", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "Philip" |
| ], |
| "last": "Priyadharshini", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccrae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
| "volume": "", |
| "issue": "", |
| "pages": "202--210", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muralidaran, Ruba Priyadharshini, and John Philip McCrae. 2020. Corpus creation for sentiment analysis in code-mixed Tamil-English text. In Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL), pages 202-210, Marseille, France. European Language Resources association.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Detecting entailment in code-mixed Hindi-English conversations", |
| "authors": [ |
| { |
| "first": "Sharanya", |
| "middle": [], |
| "last": "Chakravarthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Anjana", |
| "middle": [], |
| "last": "Umapathy", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Black", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Sixth Workshop on Noisy User-generated Text (W-NUT 2020)", |
| "volume": "", |
| "issue": "", |
| "pages": "165--170", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.wnut-1.22" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sharanya Chakravarthy, Anjana Umapathy, and Alan W Black. 2020. Detecting entailment in code-mixed Hindi-English conversations. In Proceedings of the Sixth Workshop on Noisy User-generated Text (W-NUT 2020), pages 165-170, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Language informed modeling of code-switched text", |
| "authors": [ |
| { |
| "first": "Khyathi", |
| "middle": [], |
| "last": "Chandu", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Manzini", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumeet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Black", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Workshop on Computational Approaches to Linguistic Code-Switching", |
| "volume": "", |
| "issue": "", |
| "pages": "92--97", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-3211" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Khyathi Chandu, Thomas Manzini, Sumeet Singh, and Alan W. Black. 2018. Language informed modeling of code-switched text. In Proceedings of the Third Workshop on Computational Approaches to Linguistic Code-Switching, pages 92-97, Melbourne, Australia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Unsupervised cross-lingual representation learning at scale", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kartikay", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Wenzek", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "8440--8451", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.747" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettlemoyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8440-8451, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics. Flair. Flair toolkit.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "On measuring the complexity of code-mixing", |
| "authors": [ |
| { |
| "first": "Bj\u00f6rn", |
| "middle": [], |
| "last": "Gamb\u00e4ck", |
| "suffix": "" |
| }, |
| { |
| "first": "Amitava", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 11th International Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1--7", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bj\u00f6rn Gamb\u00e4ck and Amitava Das. 2014. On measuring the complexity of code-mixing. In Proceedings of the 11th International Conference on Natural Language Processing, Goa, India, pages 1-7.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Don't stop pretraining: Adapt language models to domains and tasks", |
| "authors": [ |
| { |
| "first": "Ana", |
| "middle": [], |
| "last": "Suchin Gururangan", |
| "suffix": "" |
| }, |
| { |
| "first": "Swabha", |
| "middle": [], |
| "last": "Marasovi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Swayamdipta", |
| "suffix": "" |
| }, |
| { |
| "first": "Iz", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Doug", |
| "middle": [], |
| "last": "Beltagy", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Downey", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Suchin Gururangan, Ana Marasovi\u0107, Swabha Swayamdipta, Kyle Lo, Iz Beltagy, Doug Downey, and Noah A. Smith. 2020. Don't stop pretraining: Adapt language models to domains and tasks. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Metrics for modeling code-switching across corpora", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Gualberto A Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacqueline", |
| "middle": [], |
| "last": "Ricard", |
| "suffix": "" |
| }, |
| { |
| "first": "Barbara", |
| "middle": [ |
| "E" |
| ], |
| "last": "Serigos", |
| "suffix": "" |
| }, |
| { |
| "first": "Almeida Jacqueline", |
| "middle": [], |
| "last": "Bullock", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Toribio", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "INTERSPEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "2021--2024", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gualberto A Guzm\u00e1n, Joseph Ricard, Jacqueline Serigos, Barbara E Bullock, and Almeida Jacqueline Toribio. 2017. Metrics for modeling code-switching across corpora. In INTERSPEECH, pages 67-71. IndicNLP. Indic nlp library. https: //anoopkunchukuttan.github.io/ indic nlp library/. Accessed: 2021-03-09.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Natural language toolkit for indic languages (inltk)", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "2021--2024", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "iNLTK. Natural language toolkit for indic languages (inltk). https://github.com/goru001/inltk. Accessed: 2021-03-09.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Task-adaptive Pre-Training of Multilingual BERT models for Offensive Language Identification", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Task-adaptive Pre-Training of Multilingual BERT models for Offensive Language Identification.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Cooking is creating emotion: a study on hinglish sentiments of youtube cookery channels using semi-supervised approach", |
| "authors": [ |
| { |
| "first": "Gagandeep", |
| "middle": [], |
| "last": "Kaur", |
| "suffix": "" |
| }, |
| { |
| "first": "Abhishek", |
| "middle": [], |
| "last": "Kaushik", |
| "suffix": "" |
| }, |
| { |
| "first": "Shubham", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Big Data and Cognitive Computing", |
| "volume": "3", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gagandeep Kaur, Abhishek Kaushik, and Shubham Sharma. 2019. Cooking is creating emotion: a study on hinglish sentiments of youtube cookery channels using semi-supervised approach. Big Data and Cognitive Computing, 3(3):37.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "GLUECoS: An evaluation benchmark for code-switched NLP", |
| "authors": [ |
| { |
| "first": "Simran", |
| "middle": [], |
| "last": "Khanuja", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandipan", |
| "middle": [], |
| "last": "Dandapat", |
| "suffix": "" |
| }, |
| { |
| "first": "Anirudh", |
| "middle": [], |
| "last": "Srinivasan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunayana", |
| "middle": [], |
| "last": "Sitaram", |
| "suffix": "" |
| }, |
| { |
| "first": "Monojit", |
| "middle": [], |
| "last": "Choudhury", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3575--3585", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.329" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simran Khanuja, Sandipan Dandapat, Anirudh Srinivasan, Sunayana Sitaram, and Monojit Choudhury. 2020. GLUECoS: An evaluation benchmark for code-switched NLP. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 3575-3585, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Benchmarking aggression identification in social media", |
| "authors": [ |
| { |
| "first": "Ritesh", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Atul", |
| "middle": [], |
| "last": "Kr", |
| "suffix": "" |
| }, |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Ojha", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the First Workshop on Trolling, Aggression and Cyberbullying (TRAC-2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--11", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ritesh Kumar, Atul Kr. Ojha, Shervin Malmasi, and Marcos Zampieri. 2018. Benchmarking aggression identification in social media. In Proceedings of the First Workshop on Trolling, Aggression and Cyberbullying (TRAC-2018), pages 1-11, Santa Fe, New Mexico, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Cross-lingual language model pretraining", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample and Alexis Conneau. 2019. Cross-lingual language model pretraining.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Nltk sentiment analysis toolkit", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "2021--2024", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "NLTK. Nltk sentiment analysis toolkit. http: //www.nltk.org/howto/sentiment.html. Accessed: 2021-03-09.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Pytorch: An imperative style, high-performance deep learning library", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Paszke", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Massa", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Lerer", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Bradbury", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregory", |
| "middle": [], |
| "last": "Chanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Killeen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zeming", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Natalia", |
| "middle": [], |
| "last": "Gimelshein", |
| "suffix": "" |
| }, |
| { |
| "first": "Luca", |
| "middle": [], |
| "last": "Antiga", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1912.01703" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. 2019. Pytorch: An imperative style, high-performance deep learning library. arXiv preprint arXiv:1912.01703.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Sentiment analysis of code-mixed indian languages: An overview of sail code-mixed shared task@ icon-2017", |
| "authors": [ |
| { |
| "first": "Dipankar", |
| "middle": [], |
| "last": "Braja Gopal Patra", |
| "suffix": "" |
| }, |
| { |
| "first": "Amitava", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1803.06745" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Braja Gopal Patra, Dipankar Das, and Amitava Das. 2018. Sentiment analysis of code-mixed indian languages: An overview of sail code-mixed shared task@ icon-2017. arXiv preprint arXiv:1803.06745.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Semeval-2020 task 9: Overview of sentiment analysis of code-mixed tweets", |
| "authors": [ |
| { |
| "first": "Parth", |
| "middle": [], |
| "last": "Patwa", |
| "suffix": "" |
| }, |
| { |
| "first": "Gustavo", |
| "middle": [], |
| "last": "Aguilar", |
| "suffix": "" |
| }, |
| { |
| "first": "Sudipta", |
| "middle": [], |
| "last": "Kar", |
| "suffix": "" |
| }, |
| { |
| "first": "Suraj", |
| "middle": [], |
| "last": "Pandey", |
| "suffix": "" |
| }, |
| { |
| "first": "Pykl", |
| "middle": [], |
| "last": "Srinivas", |
| "suffix": "" |
| }, |
| { |
| "first": "Bj\u00f6rn", |
| "middle": [], |
| "last": "Gamb\u00e4ck", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanmoy", |
| "middle": [], |
| "last": "Chakraborty", |
| "suffix": "" |
| }, |
| { |
| "first": "Thamar", |
| "middle": [], |
| "last": "Solorio", |
| "suffix": "" |
| }, |
| { |
| "first": "Amitava", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 14th International Workshop on Semantic Evaluation (SemEval-2020)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Parth Patwa, Gustavo Aguilar, Sudipta Kar, Suraj Pandey, Srinivas PYKL, Bj\u00f6rn Gamb\u00e4ck, Tanmoy Chakraborty, Thamar Solorio, and Amitava Das. 2020. Semeval-2020 task 9: Overview of sentiment analysis of code-mixed tweets. In Proceedings of the 14th International Workshop on Semantic Evaluation (SemEval-2020), Barcelona, Spain. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "How multilingual is multilingual BERT?", |
| "authors": [ |
| { |
| "first": "Telmo", |
| "middle": [], |
| "last": "Pires", |
| "suffix": "" |
| }, |
| { |
| "first": "Eva", |
| "middle": [], |
| "last": "Schlinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Garrette", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "4996--5001", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1493" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Telmo Pires, Eva Schlinger, and Dan Garrette. 2019. How multilingual is multilingual BERT? In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4996-5001, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Robsut wrod reocginiton via semi-character recurrent neural network", |
| "authors": [ |
| { |
| "first": "Keisuke", |
| "middle": [], |
| "last": "Sakaguchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "3281--3287", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Keisuke Sakaguchi, Kevin Duh, Matt Post, and Benjamin Van Durme. 2017. Robsut wrod reocginiton via semi-character recurrent neural network. In Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence, February 4-9, 2017, San Francisco, California, USA., pages 3281-3287. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "A survey of code-switched speech and language processing", |
| "authors": [ |
| { |
| "first": "Sunayana", |
| "middle": [], |
| "last": "Sitaram", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Khyathi Raghavi Chandu", |
| "suffix": "" |
| }, |
| { |
| "first": "Krishna", |
| "middle": [], |
| "last": "Sai", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Rallabandi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sunayana Sitaram, Khyathi Raghavi Chandu, Sai Krishna Rallabandi, and Alan W Black. 2020. A survey of code-switched speech and language processing.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Are multilingual models effective in code-switching?", |
| "authors": [ |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihan", |
| "middle": [], |
| "last": "Cahyawijaya", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhaojiang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Genta Indra Winata, Samuel Cahyawijaya, Zihan Liu, Zhaojiang Lin, Andrea Madotto, and Pascale Fung. 2021. Are multilingual models effective in code-switching?", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Hierarchical meta-embeddings for code-switching named entity recognition", |
| "authors": [ |
| { |
| "first": "Zhaojiang", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamin", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihan", |
| "middle": [], |
| "last": "Shin", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.08504" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Genta Indra Winata, Zhaojiang Lin, Jamin Shin, Zihan Liu, and Pascale Fung. 2019. Hierarchical meta-embeddings for code-switching named entity recognition. arXiv preprint arXiv:1909.08504.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Transformers: State-of-the-art natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "Remi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Davison", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Shleifer", |
| "suffix": "" |
| }, |
| { |
| "first": "Clara", |
| "middle": [], |
| "last": "Patrick Von Platen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yacine", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Jernite", |
| "suffix": "" |
| }, |
| { |
| "first": "Canwen", |
| "middle": [], |
| "last": "Plu", |
| "suffix": "" |
| }, |
| { |
| "first": "Teven", |
| "middle": [ |
| "Le" |
| ], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sylvain", |
| "middle": [], |
| "last": "Scao", |
| "suffix": "" |
| }, |
| { |
| "first": "Mariama", |
| "middle": [], |
| "last": "Gugger", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Drame", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-demos.6" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "38--45", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Customizable components in our toolkit. Marked in dashed box is an optional component.", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Command line interface for utilizing fine-tuned models. We provide several functionality compatible with the popular Huggingface and Fairseq libraries. Marked in boxes are customizable input arguments.", |
| "uris": null |
| }, |
| "TABREF1": { |
| "html": null, |
| "content": "<table><tr><td colspan=\"3\">HIN-ENG2 (Patra et al., 2018), SPA-ENG(Aguilar et al., 2020)), Aggression Identification (Kumar et al., 2018), Hate Speech</td></tr><tr><td colspan=\"3\">Identification (Bohra et al., 2018), Offensiveness Identification (Chakravarthi et al., 2020), Youtube Comments Classification</td></tr><tr><td colspan=\"3\">(Kaur et al., 2019), Language Identification (Aguilar et al., 2020), Named Entity Recognition and Parts of Speech Tagging</td></tr><tr><td colspan=\"3\">(Khanuja et al., 2020). \u2020 Sentiment</td></tr><tr><td/><td colspan=\"2\">Classification</td></tr><tr><td>Model</td><td colspan=\"2\">HIN-ENG1 HIN-ENG2</td></tr><tr><td>XLM-RoBERTa</td><td>68.9 / 69.1</td><td>61.5 / 61.5</td></tr><tr><td>w/ multi-view integration</td><td>71.1 / 71.3</td><td>62.0 / 62.8</td></tr><tr><td>w/ language-tag informed</td><td>68.9 / 69.3</td><td>62.8 / 63.1</td></tr><tr><td>w/ fasttext-BiLSTM fusion</td><td>69.9 / 70.0</td><td>61.2 / 62.1</td></tr><tr><td>w/ char-BiLSTM fusion</td><td>69.3 / 69.1</td><td>62.0 / 62.3</td></tr><tr><td>w/ semi-char-BiLSTM fusion</td><td>69.4 / 68.9</td><td>60.0 / 60.8</td></tr><tr><td>w/ data noising</td><td>70.5 / 70.5</td><td>61.9 / 62.2</td></tr><tr><td>w/ monolingual corpora</td><td>68.9 / 69.3</td><td>68.2 / 68.3</td></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "text": "Results are reported for eight different tasks, namely, Sentiment Classification (HIN-ENG1(Patwa et al., 2020), Implies results on dev split, otherwise on test splits." |
| }, |
| "TABREF2": { |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "text": "" |
| } |
| } |
| } |
| } |