| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T06:35:09.304290Z" |
| }, |
| "title": "TATL at WNUT-2020 Task 2: A Transformer-based Baseline System for Identification of Informative COVID-19 English Tweets", |
| "authors": [ |
| { |
| "first": "Anh", |
| "middle": [ |
| "Tuan" |
| ], |
| "last": "Nguyen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "NVIDIA", |
| "location": { |
| "region": "Santa Clara", |
| "country": "USA" |
| } |
| }, |
| "email": "tuananhn@nvidia.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "As the COVID-19 outbreak continues to spread throughout the world, more and more information about the pandemic has been shared publicly on social media. For example, there are a huge number of COVID-19 English Tweets daily on Twitter. However, the majority of those Tweets are uninformative, and hence it is important to be able to automatically select only the informative ones for downstream applications. In this short paper, we present our participation in the W-NUT 2020 Shared Task 2: Identification of Informative COVID-19 English Tweets. Inspired by the recent advances in pretrained Transformer language models, we propose a simple yet effective baseline for the task. Despite its simplicity, our proposed approach shows very competitive results in the leaderboard as we ranked 8 over 55 teams participated in total.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "As the COVID-19 outbreak continues to spread throughout the world, more and more information about the pandemic has been shared publicly on social media. For example, there are a huge number of COVID-19 English Tweets daily on Twitter. However, the majority of those Tweets are uninformative, and hence it is important to be able to automatically select only the informative ones for downstream applications. In this short paper, we present our participation in the W-NUT 2020 Shared Task 2: Identification of Informative COVID-19 English Tweets. Inspired by the recent advances in pretrained Transformer language models, we propose a simple yet effective baseline for the task. Despite its simplicity, our proposed approach shows very competitive results in the leaderboard as we ranked 8 over 55 teams participated in total.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The COVID-19 pandemic has been spreading rapidly across the globe and has infected more than 20 millions men and women. As a result, more and more people have been sharing a wide variety of information related to COVID-19 publicly on social media. For example, there are a huge number of COVID-19 English Tweets daily on Twitter. However, the majority of those Tweets are uninformative and do not contain useful information, therefore, systems which can automatically filter out uninformative tweets are needed by the community. Tweets are generally different from traditional written-text such as Wikipedia or news articles due to its short length and informal use of words and grammars (e.g abbreviations, hashtags, marker). These special characteristics of Tweets may pose a challenge for many NLP techniques that focus solely on formally written texts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we present our participation in the W-NUT 2020 Shared Table 1 : Statistics of Shared task 2 dataset. \"#training\", \"#valid\" and \"#test\" denote the size of the training, validation and test sets, listed by categories, respectively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 69, |
| "end": 76, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Informative COVID-19 English Tweets (Nguyen et al., 2020b) . Inspired by the recent success of Transformer-based pre-trained language models in many NLP tasks (Devlin et al., 2019; Nguyen and Nguyen, 2020; Lai et al., 2020) , we propose a simple yet effective baseline for the task. Despite its simplicity, our proposed approach shows very competitive results. In the following sections, we first describe the task definitions in Section 2 and proposed methods in Section 3. We then describe the experiments and their results in Section 4. Finally, in Section 5, we conclude this work and discuss potential future research directions.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 58, |
| "text": "COVID-19 English Tweets (Nguyen et al., 2020b)", |
| "ref_id": null |
| }, |
| { |
| "start": 159, |
| "end": 180, |
| "text": "(Devlin et al., 2019;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 181, |
| "end": 205, |
| "text": "Nguyen and Nguyen, 2020;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 206, |
| "end": 223, |
| "text": "Lai et al., 2020)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The goal of Shared task 2 is to identify whether a COVID 19 English Tweet is informative or not. Such informative Tweet provides information about recovered, suspected, confirmed and death cases as well as location and history of each case. The dataset introduced in this Shared task consists of 10K COVID 19 English Tweets. Dataset statistics can be found in Table 1 3 Method", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 360, |
| "end": 367, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Task Definitions", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The task is formulated as a binary classification of Tweets into informative or uninformative classes. Figure 1 gives a high-level overview of our proposed approach. Given a Tweet consisting of n tokens x = {x 1 , x 2 , ..., x n }, we first form a contextualized representation for each token using a Transformer-based encoder such as BERT (Devlin et al., 2019) . Following common conventions, we append special tokens to the beginning and end of the input Tweet before feeding it to the Transformer model. For example, if we use BERT, x 1 will be the special [CLS] token and x n will be the special [SEP] token. Let H = {h 1 , h 2 , ..., h n } denote the contextualized representations produced by the Transformer model. We then use h 1 as an aggregate representation of the original input and feed it to a linear layer to calculate the final output:", |
| "cite_spans": [ |
| { |
| "start": 340, |
| "end": 361, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 103, |
| "end": 111, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Baseline Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "y = \u03c3(Wh 1 + b) \u2208 R", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Baseline Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where the transformation matrix W and the bias term b are model parameters. \u03c3 denotes the sigmoid function. It squashes the score to a probability between 0 and 1. y is the predicted probability of the input Tweet being informative. In this work, we experiment with various state-of-the-art Transformer models including BERTweet (Nguyen et al., 2020a), XLM-RoBERTa (Conneau et al., 2020) , RoBERTa (Liu et al., 2019) , and ELECTRA (Clark et al., 2020) . In the following subsections, we will briefly describe these Transformer models.", |
| "cite_spans": [ |
| { |
| "start": 365, |
| "end": 387, |
| "text": "(Conneau et al., 2020)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 398, |
| "end": 416, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 431, |
| "end": 451, |
| "text": "(Clark et al., 2020)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "RoBERTa (Liu et al., 2019) improved over BERT (Devlin et al., 2019) by leveraging different training objectives which leads to more robust optimization i.e removing next sentence prediction and using dynamic masking for masked language modelling. Liu et al. (2019) also shows that training the language model longer and with more data hugely benefits the performance on downstream tasks.", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 26, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 46, |
| "end": 67, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 247, |
| "end": 264, |
| "text": "Liu et al. (2019)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RoBERTa", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "Inspired by the success of multilingual language model (Devlin et al., 2019; Lample and Conneau, 2019) , XLM-RoBERTa (Conneau et al., 2020) significantly scaled up the amount of multilingual training data used in unsupervised MLM pretraining compares to previous work (Lample and Conneau, 2019) and achieved state-of-the-art performance in both monolingual and cross-lingual benchmarks.", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 76, |
| "text": "(Devlin et al., 2019;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 77, |
| "end": 102, |
| "text": "Lample and Conneau, 2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 117, |
| "end": 139, |
| "text": "(Conneau et al., 2020)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 268, |
| "end": 294, |
| "text": "(Lample and Conneau, 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "XLM-RoBERTa", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "BERTweet (Nguyen et al., 2020a) is a domainspecific language model pre-trained on a large corpus of English Tweets. Similar to the success of BioBERT in BioNLP domain and the success of SciBERT (Beltagy et al., 2019) in ScientificNLP domain, BERTweet achieved stateof-the-art performance across many TweetNLP tasks, outperformed its counterparts RoBERTa (Liu et al., 2019) and XLM-RoBERTa (Conneau et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 194, |
| "end": 216, |
| "text": "(Beltagy et al., 2019)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 354, |
| "end": 372, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 389, |
| "end": 411, |
| "text": "(Conneau et al., 2020)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BERTweet", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "ELECTRA (Clark et al., 2020) proposed a new pretraining objective which is different from Masked Language Modelling (Devlin et al., 2019; Liu et al., 2019) . Instead of masking input tokens, ELEC-TRA corrupts the tokens using a small generator network to produces distribution over tokens, while the discriminator tries to guess which tokens are actually corrupted by the generator. ELECTRA achieved state-of-the-art results across many tasks in the GLUE benchmark while using much less compute resources compared to other pre-training methods (Devlin et al., 2019; Liu et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 28, |
| "text": "(Clark et al., 2020)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 116, |
| "end": 137, |
| "text": "(Devlin et al., 2019;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 138, |
| "end": 155, |
| "text": "Liu et al., 2019)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 544, |
| "end": 565, |
| "text": "(Devlin et al., 2019;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 566, |
| "end": 583, |
| "text": "Liu et al., 2019)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ELECTRA", |
| "sec_num": "3.1.4" |
| }, |
| { |
| "text": "To further boost the performance of our baseline models, we leverage ensemble learning technique. We performed ensemble learning over all of the Transformer models mentioned in the previous section and employed two different ensemble schemes, namely Unweighted Averaging and Majority Voting.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ensemble Learning", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In this approach, the final prediction is estimated from the unweighted average of the posterior probability from all of our models. Thus, the final prediction is given by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unweighted Averaging", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "p = arg max c 1 M M n=1 p i , p i \u2208 R C (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unweighted Averaging", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "where C is the number of classed, M is the number of models, and p i is the probability vector computed using the softmax function of model i.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unweighted Averaging", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "Majority Voting counts the votes of all the models and select the class with most votes as prediction. Formally, the final prediction is given by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Majority Voting", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "v c = M n=1 F i (c), p = arg max c v c", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Majority Voting", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "where v c denotes the votes of class c from all different models, F i is the binary decision of model i, which is either 0 or 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Majority Voting", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "To fine-tune our baseline models, we employ transformers library (Wolf et al., 2019) . We use AdamW optimizer (Loshchilov and Hutter, 2019) with a fixed batch size of 32 and learning rates in the set {1e \u2212 5, 2e \u2212 5, 5e \u2212 5}. We finetune the models for 30 epochs and select the best checkpoint based on performance of the model on the validation set. Table 2 shows the overall results on the validation set. The large version of RoBERTa achieves the highest F1 score on the validation set (compared to other individual models). To our surprise, we find that BERTweet does not outperform the base version of RoBERTa on the validation set, even though BERTweet was trained on English Tweets using the same training procedure of RoBERTa. Finally, XLM-RoBERTa achieves lower F1 score than both RoBERTa and ELECTRA, suggesting that using a multilingual pretrained language models may not improve the performance since the shared task is mainly about English Tweets. We also evaluate the performance of our ensemble models. The results show that ensemble learning improves the F1 score compare to each individual model and Unweighted Averaging perform better than Majority Voting on the validation set. We also submitted the predictions of both ensemble scheme to the competition and final results on the leaderboard are shown in table 3. We notice that Majority Voting slightly performs better than Unweighted Averaging on the hidden test set.", |
| "cite_spans": [ |
| { |
| "start": 65, |
| "end": 84, |
| "text": "(Wolf et al., 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 110, |
| "end": 139, |
| "text": "(Loshchilov and Hutter, 2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 351, |
| "end": 358, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments 4.1 Finetuning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In this paper, we introduce a simple but effective approach for identifying informative COVID-19 English Tweets. Despite the simplicity of our approach, it achieves very competitive results in the leaderboard as we ranked 8 over 56 teams partici-pated in total. In future work, we will conduct thorough error analysis and apply visualization techniques to gain more understandings of our models (Murugesan et al., 2019) . Furthermore, we will also extend our approach to other languages. Finally, we will investigate the use of advanced techniques such as transfer learning, few-shot learning, and self-training to improve the performance of our system further (Pan et al., 2017; Huang et al., 2018; Lai et al., 2018; Xie et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 395, |
| "end": 419, |
| "text": "(Murugesan et al., 2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 661, |
| "end": 679, |
| "text": "(Pan et al., 2017;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 680, |
| "end": 699, |
| "text": "Huang et al., 2018;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 700, |
| "end": 717, |
| "text": "Lai et al., 2018;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 718, |
| "end": 735, |
| "text": "Xie et al., 2020)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Scibert: Pretrained language model for scientific text", |
| "authors": [ |
| { |
| "first": "Iz", |
| "middle": [], |
| "last": "Beltagy", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Arman", |
| "middle": [], |
| "last": "Cohan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iz Beltagy, Kyle Lo, and Arman Cohan. 2019. Scib- ert: Pretrained language model for scientific text. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Bert for joint intent classification and slot filling", |
| "authors": [ |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhu", |
| "middle": [], |
| "last": "Zhuo", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qian Chen, Zhu Zhuo, and W. Wang. 2019. Bert for joint intent classification and slot filling. ArXiv, abs/1902.10909.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Electra: Pre-training text encoders as discriminators rather than generators. ArXiv, abs", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K. Clark, Minh-Thang Luong, Quoc V. Le, and Christo- pher D. Manning. 2020. Electra: Pre-training text encoders as discriminators rather than generators. ArXiv, abs/2003.10555.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Unsupervised cross-lingual representation learning at scale", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kartikay", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Wenzek", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "8440--8451", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.747" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 8440- 8451, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of NAACL, pages 4171- 4186.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Zero-shot transfer learning for event extraction", |
| "authors": [ |
| { |
| "first": "Lifu", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Heng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Ido", |
| "middle": [], |
| "last": "Dagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| }, |
| { |
| "first": "Clare", |
| "middle": [], |
| "last": "Voss", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "2160--2170", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1201" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lifu Huang, Heng Ji, Kyunghyun Cho, Ido Dagan, Se- bastian Riedel, and Clare Voss. 2018. Zero-shot transfer learning for event extraction. In Proceed- ings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), pages 2160-2170, Melbourne, Australia. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Supervised transfer learning for product information question answering", |
| "authors": [ |
| { |
| "first": "Tuan", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Trung", |
| "middle": [], |
| "last": "Bui", |
| "suffix": "" |
| }, |
| { |
| "first": "Nedim", |
| "middle": [], |
| "last": "Lipka", |
| "suffix": "" |
| }, |
| { |
| "first": "Sheng", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "17th IEEE International Conference on Machine Learning and Applications (ICMLA)", |
| "volume": "", |
| "issue": "", |
| "pages": "1109--1114", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tuan Lai, Trung Bui, Nedim Lipka, and Sheng Li. 2018. Supervised transfer learning for product information question answering. In 2018 17th IEEE Interna- tional Conference on Machine Learning and Appli- cations (ICMLA), pages 1109-1114. IEEE.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A gated self-attention memory network for answer selection", |
| "authors": [ |
| { |
| "first": "Tuan", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Trung", |
| "middle": [], |
| "last": "Quan Hung Tran", |
| "suffix": "" |
| }, |
| { |
| "first": "Daisuke", |
| "middle": [], |
| "last": "Bui", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kihara", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5953--5959", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1610" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tuan Lai, Quan Hung Tran, Trung Bui, and Daisuke Kihara. 2019. A gated self-attention memory net- work for answer selection. In Proceedings of the 2019 Conference on Empirical Methods in Natu- ral Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5953-5959, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A simple but effective bert model for dialog state tracking on resource-limited systems", |
| "authors": [ |
| { |
| "first": "Tuan Manh", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Trung", |
| "middle": [], |
| "last": "Quan Hung Tran", |
| "suffix": "" |
| }, |
| { |
| "first": "Daisuke", |
| "middle": [], |
| "last": "Bui", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kihara", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "8034--8038", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tuan Manh Lai, Quan Hung Tran, Trung Bui, and Daisuke Kihara. 2020. A simple but effective bert model for dialog state tracking on resource-limited systems. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Pro- cessing (ICASSP), pages 8034-8038. IEEE.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Crosslingual language model pretraining", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems (NeurIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample and Alexis Conneau. 2019. Cross- lingual language model pretraining. Advances in Neural Information Processing Systems (NeurIPS).", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "BioBERT: a pre-trained biomedical language representation model for biomedical text mining", |
| "authors": [ |
| { |
| "first": "Jinhyuk", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Wonjin", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungdong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Donghyeon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunkyu", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Chan", |
| "middle": [], |
| "last": "Ho So", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaewoo", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Bioinformatics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/bioinformatics/btz682" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2019. BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv preprint", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.11692" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A Robustly Optimized BERT Pretrain- ing Approach. arXiv preprint, arXiv:1907.11692.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Decoupled Weight Decay Regularization", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Loshchilov", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Hutter", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Loshchilov and Frank Hutter. 2019. Decoupled Weight Decay Regularization. In Proceedings of ICLR.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Deepcompare: Visual and interactive comparison of deep learning model performance", |
| "authors": [ |
| { |
| "first": "Sana", |
| "middle": [], |
| "last": "Sugeerth Murugesan", |
| "suffix": "" |
| }, |
| { |
| "first": "Fan", |
| "middle": [], |
| "last": "Malik", |
| "suffix": "" |
| }, |
| { |
| "first": "Eunyee", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Tuan Manh", |
| "middle": [], |
| "last": "Koh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "IEEE computer graphics and applications", |
| "volume": "39", |
| "issue": "5", |
| "pages": "47--59", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sugeerth Murugesan, Sana Malik, Fan Du, Eunyee Koh, and Tuan Manh Lai. 2019. Deepcompare: Visual and interactive comparison of deep learning model performance. IEEE computer graphics and applications, 39(5):47-59.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "PhoBERT: Pre-trained language models for Vietnamese", |
| "authors": [ |
| { |
| "first": "Anh", |
| "middle": [ |
| "Tuan" |
| ], |
| "last": "Dat Quoc Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dat Quoc Nguyen and Anh Tuan Nguyen. 2020. PhoBERT: Pre-trained language models for Viet- namese. Findings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Bertweet: A pre-trained language model for english tweets", |
| "authors": [ |
| { |
| "first": "Thanh", |
| "middle": [], |
| "last": "Dat Quoc Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Vu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "2020", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dat Quoc Nguyen, Thanh Vu, and A. Nguyen. 2020a. Bertweet: A pre-trained language model for english tweets. EMNLP 2020.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "WNUT-2020 Task 2: Identification of Informative COVID-19 English Tweets", |
| "authors": [ |
| { |
| "first": "Thanh", |
| "middle": [], |
| "last": "Dat Quoc Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Afshin", |
| "middle": [], |
| "last": "Vu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mai", |
| "middle": [ |
| "Hoang" |
| ], |
| "last": "Rahimi", |
| "suffix": "" |
| }, |
| { |
| "first": "Linh", |
| "middle": [ |
| "The" |
| ], |
| "last": "Dao", |
| "suffix": "" |
| }, |
| { |
| "first": "Long", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Doan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 6th Workshop on Noisy User-generated Text", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dat Quoc Nguyen, Thanh Vu, Afshin Rahimi, Mai Hoang Dao, Linh The Nguyen, and Long Doan. 2020b. WNUT-2020 Task 2: Identification of Infor- mative COVID-19 English Tweets. In Proceedings of the 6th Workshop on Noisy User-generated Text.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Crosslingual name tagging and linking for 282 languages", |
| "authors": [ |
| { |
| "first": "Xiaoman", |
| "middle": [], |
| "last": "Pan", |
| "suffix": "" |
| }, |
| { |
| "first": "Boliang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "May", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Nothman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| }, |
| { |
| "first": "Heng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1946--1958", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1178" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaoman Pan, Boliang Zhang, Jonathan May, Joel Nothman, Kevin Knight, and Heng Ji. 2017. Cross- lingual name tagging and linking for 282 languages. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1946-1958, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanpreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [ |
| "R" |
| ], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "7th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019. GLUE: A multi-task benchmark and analysis plat- form for natural language understanding. In 7th International Conference on Learning Representa- tions, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Morgan Funtowicz, and Jamie Brew. 2019. HuggingFace's Transformers: State-of-the-art Natural Language Processing. arXiv preprint", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "R'emi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1910.03771" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R'emi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. HuggingFace's Trans- formers: State-of-the-art Natural Language Process- ing. arXiv preprint, arXiv:1910.03771.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Self-training with noisy student improves imagenet classification", |
| "authors": [ |
| { |
| "first": "Qizhe", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", |
| "volume": "", |
| "issue": "", |
| "pages": "10684--10695", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qizhe Xie, E. Hovy, Minh-Thang Luong, and Quoc V. Le. 2020. Self-training with noisy student improves imagenet classification. 2020 IEEE/CVF Confer- ence on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "A compareaggregate model with latent clustering for answer selection", |
| "authors": [ |
| { |
| "first": "Seunghyun", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Franck", |
| "middle": [], |
| "last": "Dernoncourt", |
| "suffix": "" |
| }, |
| { |
| "first": "Soon", |
| "middle": [], |
| "last": "Doo", |
| "suffix": "" |
| }, |
| { |
| "first": "Trung", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyomin", |
| "middle": [], |
| "last": "Bui", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jung", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 28th ACM International Conference on Information and Knowledge Management", |
| "volume": "", |
| "issue": "", |
| "pages": "2093--2096", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Seunghyun Yoon, Franck Dernoncourt, Doo Soon Kim, Trung Bui, and Kyomin Jung. 2019. A compare- aggregate model with latent clustering for answer selection. In Proceedings of the 28th ACM Inter- national Conference on Information and Knowledge Management, pages 2093-2096.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "A high level overview of our proposed model for the task." |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td>Model</td><td>Test F1</td></tr><tr><td colspan=\"2\">Ensemble (averaging) 0.8988</td></tr><tr><td>Ensemble (voting)</td><td>0.9008</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "Performance of individual models as well as ensemble models on the validation set." |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "Performance of our system on the test set." |
| } |
| } |
| } |
| } |