| { |
| "paper_id": "2022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T01:11:39.935405Z" |
| }, |
| "title": "Foundation Models of Scientific Knowledge for Chemistry: Opportunities, Challenges and Lessons Learned", |
| "authors": [ |
| { |
| "first": "Sameera", |
| "middle": [], |
| "last": "Horawalavithana", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pacific Northwest National Laboratory", |
| "location": { |
| "settlement": "Richland", |
| "region": "WA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ellyn", |
| "middle": [], |
| "last": "Ayton", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pacific Northwest National Laboratory", |
| "location": { |
| "settlement": "Richland", |
| "region": "WA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Shivam", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pacific Northwest National Laboratory", |
| "location": { |
| "settlement": "Richland", |
| "region": "WA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Howland", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pacific Northwest National Laboratory", |
| "location": { |
| "settlement": "Richland", |
| "region": "WA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Megha", |
| "middle": [], |
| "last": "Subramanian", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pacific Northwest National Laboratory", |
| "location": { |
| "settlement": "Richland", |
| "region": "WA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Vasquez", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pacific Northwest National Laboratory", |
| "location": { |
| "settlement": "Richland", |
| "region": "WA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Cosbey", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pacific Northwest National Laboratory", |
| "location": { |
| "settlement": "Richland", |
| "region": "WA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Glenski", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pacific Northwest National Laboratory", |
| "location": { |
| "settlement": "Richland", |
| "region": "WA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Svitlana", |
| "middle": [], |
| "last": "Volkova", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pacific Northwest National Laboratory", |
| "location": { |
| "settlement": "Richland", |
| "region": "WA" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Foundation models pre-trained on large corpora demonstrate significant gains across many natural language processing tasks and domains e.g., law, healthcare, education, etc. However, only limited efforts have investigated the opportunities and limitations of applying these powerful models to science and security applications. In this work, we develop foundation models of scientific knowledge for chemistry to augment scientists with the advanced ability to perceive and reason at scale previously unimagined. Specifically, we build large-scale (1.47B parameter) general-purpose models for chemistry that can be effectively used to perform a wide range of in-domain and out-of-domain tasks. Evaluating these models in a zero-shot setting, we analyze the effect of model and data scaling, knowledge depth, and temporality on model performance in context of model training efficiency. Our novel findings demonstrate that (1) model size significantly contributes to the task performance when evaluated in a zero-shot setting; (2) data quality (aka diversity) affects model performance more than data quantity; (3) similarly, unlike previous work (Luu et al., 2021) temporal order of the documents in the corpus boosts model performance only for specific tasks, e.g., SciQ; and (4) models pre-trained from scratch perform better on in-domain tasks than those tuned from general-purpose models like Open AI's GPT-2.", |
| "pdf_parse": { |
| "paper_id": "2022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Foundation models pre-trained on large corpora demonstrate significant gains across many natural language processing tasks and domains e.g., law, healthcare, education, etc. However, only limited efforts have investigated the opportunities and limitations of applying these powerful models to science and security applications. In this work, we develop foundation models of scientific knowledge for chemistry to augment scientists with the advanced ability to perceive and reason at scale previously unimagined. Specifically, we build large-scale (1.47B parameter) general-purpose models for chemistry that can be effectively used to perform a wide range of in-domain and out-of-domain tasks. Evaluating these models in a zero-shot setting, we analyze the effect of model and data scaling, knowledge depth, and temporality on model performance in context of model training efficiency. Our novel findings demonstrate that (1) model size significantly contributes to the task performance when evaluated in a zero-shot setting; (2) data quality (aka diversity) affects model performance more than data quantity; (3) similarly, unlike previous work (Luu et al., 2021) temporal order of the documents in the corpus boosts model performance only for specific tasks, e.g., SciQ; and (4) models pre-trained from scratch perform better on in-domain tasks than those tuned from general-purpose models like Open AI's GPT-2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The emergence of foundation models (Bommasani et al., 2021) such as large-scale autoencoding models (e.g., BERT (Devlin et al., 2018) , RoBERTa ) and autoregressive language models (e.g., GPT-2 (Radford et al., 2019) , GPT-3 (Brown et al., 2020) , Megatron-Turing (Smith et al., 2022) and Gopher (Rae et al., 2021) ) as well as multimodal vision and language models, such as FLAVA (Singh et al., 2021) and Perceiver (Jaegle et al., 2021) , established a paradigm shift in Artificial Intelligence (AI). These foundation models, also called neural platforms, are built using self-supervised pretraining at scale. They are then able to be easily adapted to a wide range of downstream tasks via transfer learning (Bommasani et al., 2021) and finetuning .", |
| "cite_spans": [ |
| { |
| "start": 35, |
| "end": 59, |
| "text": "(Bommasani et al., 2021)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 112, |
| "end": 133, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 188, |
| "end": 216, |
| "text": "GPT-2 (Radford et al., 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 219, |
| "end": 245, |
| "text": "GPT-3 (Brown et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 264, |
| "end": 284, |
| "text": "(Smith et al., 2022)", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 289, |
| "end": 314, |
| "text": "Gopher (Rae et al., 2021)", |
| "ref_id": null |
| }, |
| { |
| "start": 381, |
| "end": 401, |
| "text": "(Singh et al., 2021)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 416, |
| "end": 437, |
| "text": "(Jaegle et al., 2021)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 709, |
| "end": 733, |
| "text": "(Bommasani et al., 2021)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The wide community adoption of foundation models can be explained by their key properties, two of which are emergent behavior and homogenization -which also make foundation models appealing for adaption across science and security domains. Emergence, or emergent behavior, reflect new behaviors that a model introduces or is capable of that it was not explicitly trained to perform. Homogenization is the consolidation of methods for building machine learning systems across a wide range of tasks. Another key advantage of scaling language models is that they perform competitively on language tasks using in-context learning without fine-tuning or gradient updates. Thus, in-context learning allows foundation models to be effectively used across new downstream tasks with only simple instructions and a few optional examples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work we focus on a science domain (chemistry) and demonstrate the value and limitations of large-scale language models evaluated across a wide range of in-domain (science-focused) and out-of-domain tasks. Unlike the majority of work on foundation models that focuses on pretraining these models on book corpora, web pages, Wikipedia and mixed sources, e.g., the Pile , we pretrain our models on scientific literature. Using scientific literature presents unique opportunities and challenges. Opportunities include the scale and diversity of scientific literature, the explicit structure, and explicit alignment across different modalities in the papers, e.g., table and figure references. Challenges include limited benchmarks that can be used to perform model evaluation, model prompting and interactions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There are three major contributions of this work: (1) we collect and release a 0.67TB dataset covering research publication data across 10+ sources for chemistry; (2) we release 28 auto-regressive foundation models for chemistry that have been pretrained from scratch; and (3) we present a rigorous evaluation of model performance on 15+ indomain and out-of-domain tasks that investigates the effects of model and data scaling, knowledge depth (aka diversity), and temporal order on performance as described in research questions below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "(RQ1) Science-Focused Benchmarks What are the strengths and weaknesses of foundation models pretrained on scientific literature when evaluated on out-of-domain vs. in-domain tasks?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "(RQ2) Scaling Effect How does model scale affect the downstream performance? Do neural scaling laws presented in (Kaplan et al., 2020) hold for the foundation models for science?", |
| "cite_spans": [ |
| { |
| "start": 113, |
| "end": 134, |
| "text": "(Kaplan et al., 2020)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "(RQ3) Diversity Effect How does the depth of scientific knowledge, e.g., from paper abstracts vs. full text, affect downstream performance? (RQ4) Temporal Effect How does the recency of scientific knowledge, e.g., when manipulating the temporal order of the documents processed by the model, affect downstream performance?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section we summarize previous efforts in two categories: mixed-domain continual pretraining that continues pretraining of a base model on domain data and in-domain pretraining from scratch that pretrains a from scratch on domain data. We present a model summary in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 273, |
| "end": 280, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Many efforts have focused on continual pretraining of a BERT (Devlin et al., 2018) base model. Several models have been developed for the biomedical domain and the most frequently used corpora for domain-specific continual preraining are PubMed abstracts and PubMed Central full-text articles (PMC) (Lee et al., 2020; Peng et al., 2019; Phan et al., 2021) . In the Chemistry domain, Guo et al. (2021) performed continual pretraining of a base BERT model on 200K chemistry journal articles for product extraction (ChemBERT) and reaction role labeling (ChemRxnBERT).", |
| "cite_spans": [ |
| { |
| "start": 61, |
| "end": 82, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 299, |
| "end": 317, |
| "text": "(Lee et al., 2020;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 318, |
| "end": 336, |
| "text": "Peng et al., 2019;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 337, |
| "end": 355, |
| "text": "Phan et al., 2021)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mixed-Domain Continual Pretraining", |
| "sec_num": null |
| }, |
| { |
| "text": "In-Domain Pretraining from Scratch Previous work has shown that pretraining models from scratch on domain-specific data has a significant benefit over continual pretraining of generaldomain language models (Gu et al., 2021) . This is mainly due to the availability of in-domain data for both generating the vocabulary and pretraining. SciBERT (Beltagy et al., 2019) is pretrained according to this procedure using the vocabulary generated from computer science and biomedical domains. PubMedBERT (Gu et al., 2021) is another example of pretraining the base BERT model from scratch using PubMed. Unlike any previous work, we use both continual and from scratch pretraining to build the largest foundation model for Chemistry (1.47B) on the largest (0.67TB) and the most diverse corpus (10+ sources) collected to date.", |
| "cite_spans": [ |
| { |
| "start": 206, |
| "end": 223, |
| "text": "(Gu et al., 2021)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 343, |
| "end": 365, |
| "text": "(Beltagy et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 496, |
| "end": 513, |
| "text": "(Gu et al., 2021)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mixed-Domain Continual Pretraining", |
| "sec_num": null |
| }, |
| { |
| "text": "Unlike the majority of related models that rely on a base BERT (or variant) model, we adapt the Open-AI's GPT-2 transformer decoder architecture (Radford et al., 2019) to train autoregressive language models for Chemistry. To understand the impact of model size (RQ2), we experiment with four different Transformer sizes: small (S), medium (M), large (L), and extra-large (XL). These models differ in the number of decoder layers, hidden size of the model, and the number of attention heads in transformer blocks as shown in Table 2 .", |
| "cite_spans": [ |
| { |
| "start": 145, |
| "end": 167, |
| "text": "(Radford et al., 2019)", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 525, |
| "end": 532, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Pretraining", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our experiments leverage the GPT-NeoX Python library (Andonian et al., 2021) developed with Megatron (Shoeybi et al., 2019) and Deep-Speed (Rasley et al., 2020) . We optimize the autoregressive log-likelihood (i.e., cross-entropy loss) averaged over a 2048-token context. We set the micro batch size per GPU as 4, and the learning rate to 2 \u00d7 10 \u22124 , and rely on the cosine decay. We use an Adam optimizer with \u03b2 1 = 0.9, \u03b2 2 = 0.99, and \u03c3 = 10 \u22128 and clip the gradient norm at 1.0. In addition, ZeRO optimizer (Rajbhandari et al., 2019) was used to reduce memory footprint by distributing optimizer states across several processes.", |
| "cite_spans": [ |
| { |
| "start": 53, |
| "end": 76, |
| "text": "(Andonian et al., 2021)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 101, |
| "end": 123, |
| "text": "(Shoeybi et al., 2019)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 139, |
| "end": 160, |
| "text": "(Rasley et al., 2020)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Pretraining", |
| "sec_num": "3" |
| }, |
| { |
| "text": "To reduce memory and increase training throughput, we use mixed-precision training (Rasley et al., 2020) and the parallel attention and feed-forward implementations available in GPT-NeoX (Black et al., 2022) . We also use the Rotary positional embeddings instead of the learned positional embeddings used in the GPT-2 model (Radford et al., 2019) because they offer performance advantages in tasks with longer texts by capturing relative position dependency in self-attention. Our models are pretrained across multiple workers with data parallelism. As the largest model in our experiments fit on a single GPU, we didn't use the model (tensor) or pipeline parallelism. Models are pretrained from scratch for a total of 320K steps. The original GPT-2 models are fine-tuned for 150K steps. We perform experiments in a single DGX-A100 machine with 8 80Gb GPUs.", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 104, |
| "text": "(Rasley et al., 2020)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 187, |
| "end": 207, |
| "text": "(Black et al., 2022)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Pretraining", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We collected a large corpus of 53.45 million chemistry-focused scientific articles and abstracts, resulting in 670GB of text data. As shown in Table 3, our corpus was collected from 10 different data sources: Arxiv, Aminer (AMiner), CORD-19 (Wang et al., 2020b) , CORE (Pontika et al., 2016) , Microsoft Academic Graph (MAG) (Wang et al., 2020a) , OSTI, PubMed (abstracts and fulltexts), and the Web of Science (WoS). See Appendix A for full data descriptions. Because the data sources we relied on comprise research publications from many science domains, we sampled articles using a list of domain-specific keywords for chemistry to create the dataset summarized in Table 3 . These keywords were extracted by using a Correlation Explanation (Gallagher et al., 2017) topic model followed by manual filtering by subject matter experts. This resulted in a list of more than 1K chemistry-related entities, ranging from compound names like ethyl acetate, methyl methacrylate, sulfoxide, etc. to experiment and procedures like tunneling microscopy, neutralization, enzymatic hydrolysis, etc. Data Cleaning Recent research has shown that duplicates in training data can significantly impact the downstream task performance of LLMs (Lee et al., 2021; Carlini et al., 2022) . To this end, we performed deduplication of our corpus based on overlap of titles within and across data sources. We processed titles to strip punctuation and casefold and considered two articles A 1 and A 2 to be duplicates if they had the same processed title. With this technique, we were able to remove significant amounts of duplicate scientific articles both within and across sources. The deduplication process reduced our corpus from 875GB to 670GB (67.8M to 53.5M publications), removing 14.3M duplicates.", |
| "cite_spans": [ |
| { |
| "start": 233, |
| "end": 261, |
| "text": "CORD-19 (Wang et al., 2020b)", |
| "ref_id": null |
| }, |
| { |
| "start": 269, |
| "end": 291, |
| "text": "(Pontika et al., 2016)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 325, |
| "end": 345, |
| "text": "(Wang et al., 2020a)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 1226, |
| "end": 1244, |
| "text": "(Lee et al., 2021;", |
| "ref_id": null |
| }, |
| { |
| "start": 1245, |
| "end": 1266, |
| "text": "Carlini et al., 2022)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 668, |
| "end": 675, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Collection and Processing", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Tokenization As used in GPT-2 model, we use a Byte Pair Encoding (BPE) tokenizer. We train BPE tokenizers for each data sample with a vocabulary size of 64K as preliminary experiments varying vocabulary sizes from 64K to 256K for smaller scale model pretraining did not show significant differences in performance. We compare the GPT-2 vocabulary generated from the WebText and the in-domain vocabularies generated from our corpora and find that the in-domain vocabulary breaks chemical entities into fewer tokens. For example, dimethylnitroxide was tokenized into #dimethyl, #nitr, #oxide using the in-domain vocabulary and #dim, #ethyl, #nit, #rox, #ide using the GPT-2 vocabulary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Collection and Processing", |
| "sec_num": "4" |
| }, |
| { |
| "text": "This section presents the analysis of 28 pretrained models evaluated on 15+ in-domain and out-ofdomain downstream tasks (RQ1, Section 5.1). We investigate the effects of model and data scaling (RQ2, Section 5.2), knowledge diversity (RQ3, Section 5.3), and temporal order (RQ4, Section 5.4) on the downstream performance. We also compare the results from continual vs. from scratch pretraining (Section 5.5) and present the analysis of large-scale training efficiency (Section 5.6).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis and Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Baseline Models As we use a similar model architecture, we identify Open AI's GPT-2 (Radford et al., 2019) as a baseline comparison model. We compare our performance with four variants of the original GPT-2 models, corresponding to small (S), medium (M), large (L), and extra-large (XL) sized transformer architectures shown in Table 2 . We note that GPT-2 models were pretrained on Web-Text -8 million web documents (40Gb). Thus, we also include a base GPT-2 model (medium) that has been updated with continual pretraining using our Combined \u2329A+FT\u232a dataset.", |
| "cite_spans": [ |
| { |
| "start": 84, |
| "end": 106, |
| "text": "(Radford et al., 2019)", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 328, |
| "end": 335, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis and Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our Models We pretrained models with individual datasets (AMiner, CORE, MAG, PubMed, S2ORC, WOS) and combined abstracts and fulltexts. Our goal is to systematically study data biases in the model performance when pretraining models with individual datasets. For example, PubMed publications cover mostly bio-medicinal terms (Gu et al., 2021) , while the majority of S2ORC publications are from medicine, biology, physics, and mathematics (Lo et al., 2020) . We only use 4 GPUs for the models pretrained with individual datasets and 8 GPUs for the rest. This is to control the number of tokens seen during model pretraining (320,000 steps * 4 GPUs * 4 micro batch size * 2,048 context size = 10B tokens) relative to the maximum number of tokens available in the respective datasets (as reported in Table 3 ). We also trained one XL (4x) model with 4x larger batch size than what used in XL model to evaluate the impact of the number of training tokens.", |
| "cite_spans": [ |
| { |
| "start": 324, |
| "end": 341, |
| "text": "(Gu et al., 2021)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 438, |
| "end": 455, |
| "text": "(Lo et al., 2020)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 797, |
| "end": 804, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis and Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We evaluate our models using several benchmarks to assess the effectiveness in both in-domain and out-of-domain tasks. The benchmarks we include are described in Appendix B. We use the lmevaluation-harness Python repository (Gao et al., Table 4 : Downstream Zero-shot In-Domain Task Performance. We use \u2021 to indicate the baseline model tuned from the base GPT-2 model. Pile performance is reported using perplexity, with all other tasks reported using accuracy. We highlight the top-4 performance per task in bold, with top performance indicated with an underline. XL (4x) model is trained with 4x larger batch size that used in other models. 2021) for the benchmark implementation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 237, |
| "end": 244, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Zero-shot Performance", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In-domain Evaluation We consider five existing chemistry benchmarks, specifically Hendryck-sTest (Hendrycks et al., 2020) for high school (HT-HC) and college (HT-CC) levels, and sciencefocused -ARC , SciQ (Welbl et al., 2017) , OpenBookQA (Mihaylov et al., 2018) , Pile-PubMed-Abstracts ). As shown in Table 4 , one or more of our models outperform baseline GPT-2 models for the two chemistry tasks, general science QA (SciQ) and the sciencefocused language modelling. Of the remaining tasks, our models perform within 1-4% of GPT-2 baselines.", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 121, |
| "text": "(Hendrycks et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 205, |
| "end": 225, |
| "text": "(Welbl et al., 2017)", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 239, |
| "end": 262, |
| "text": "(Mihaylov et al., 2018)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 302, |
| "end": 309, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Zero-shot Performance", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Out-of-domain Evaluation We evaluate outof-domain performance using 9 commonly used LLM benchmarks: BoolQ (Clark et al., 2019) , CB (De Marneffe et al., 2019) , WIC (Pilehvar and Camacho-Collados, 2018), WSC (Levesque et al., 2012 ), MathQA (Amini et al., 2019) , PIQA (Bisk et al., 2020) , PubMedQA , Lambada (Paperno et al., 2016) and WikiText (Merity et al., 2016) . As shown in Table 5 , our models outperform baseline GPT-2 models for CB, WIC and WSC and match the best accuracy for BoolQ but the GPT-2 baselines outperform on the remaining tasks, particularly Lambada and Wikitext -the two general language modeling tasks.", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 126, |
| "text": "(Clark et al., 2019)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 136, |
| "end": 158, |
| "text": "Marneffe et al., 2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 208, |
| "end": 230, |
| "text": "(Levesque et al., 2012", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 241, |
| "end": 261, |
| "text": "(Amini et al., 2019)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 269, |
| "end": 288, |
| "text": "(Bisk et al., 2020)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 310, |
| "end": 332, |
| "text": "(Paperno et al., 2016)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 346, |
| "end": 367, |
| "text": "(Merity et al., 2016)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 382, |
| "end": 389, |
| "text": "Table 5", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Zero-shot Performance", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Previous work (Kaplan et al., 2020) has shown that upstream cross entropy loss scales as a power-law with model size, dataset size, and the amount of compute. In this section, we revisit these claims on scaling Transformer architectures.", |
| "cite_spans": [ |
| { |
| "start": 14, |
| "end": 35, |
| "text": "(Kaplan et al., 2020)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scaling Effect", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Analyzing upstream cross entropy loss During pretraining, we group each dataset into training/validation/test (949/50/1) splits. We report the model performance on validation data using cross entropy loss in nats. This measure will be averaged over the 2048-token context. We find that the cross entropy loss decreases as we increase the model size (as shown in Figure 2 ). Larger models reach a given loss value in a higher rate than the smaller models. This observation illustrates the relationship between model performance (as measured by the upstream cross entropy loss) and model size, confirming (Kaplan et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 603, |
| "end": 624, |
| "text": "(Kaplan et al., 2020)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 362, |
| "end": 370, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Scaling Effect", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Analyzing downstream task performance Can we speculate downstream task performance of a model from the pretraining performance? First, we find that the models perform considerably well on Pile in comparison to the Lambada or WikiText.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scaling Effect", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "There is a 48% performance advantage in this task over the best performing baseline GPT-2 model. This may be due to the models capturing scientific language better than general language. It is im- portant to note that we exclude PubMed Abstracts in the individual data collection to avoid potential contamination between the training and Pile testing data. As shown in Table 4 , larger models perform well on these language modeling tasks. Second, we noticed that the XL (4x) model trained for more tokens performs significantly better than the similar sized XL model. Specifically, XL (4x) model was trained with 128 total batch size compared to the 32 total batch size used in XL model. XL (4x) model achieves the lowest Lambada and WikiText perplexity values across all our models trained from scratch (as shown in Table 5 ). The same model also achieves the best SciQ performance with 0.84 accuracy and comparable in other tasks performance with the XL model. This experiment highlights the importance of training models with larger batch size. We note that the baseline models (Radford et al., 2019) were trained with 4x larger batch size (total batch size 512) than what used in XL (4x) model. We believe that the XL (4x) model can reach the similar perplexity values when trained for this data scale.", |
| "cite_spans": [ |
| { |
| "start": 1082, |
| "end": 1104, |
| "text": "(Radford et al., 2019)", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 369, |
| "end": 376, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 818, |
| "end": 825, |
| "text": "Table 5", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Scaling Effect", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Third, we find that zero-shot task performance in SciQ, HT-CC and ARC-E increases as we increase the model size (see Table 5 ). However, there is no clear relationship between the task performance and the model sizes in the rest of benchmark datasets. We suggest that pretraining performance may not be the ideal indicator to speculate the overall downstream task performance, especially in the zero-shot setting. However, model size significantly contributes to the task performance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 117, |
| "end": 124, |
| "text": "Table 5", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Scaling Effect", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "While abstracts often provide a summary of scientific publications, the full text contains more details.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Diversity Effect", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "In this section, we analyze the performance of models trained on paper abstracts versus full texts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Diversity Effect", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "First, the XL models trained with the combined abstract dataset achieve the lowest perplexity score (22.77) on the Pile -a 45% performance advantage over the full text version. There are might be several factors that contribute to this, but one may be the focused language in abstracts. Second, the model trained with the combined abstracts achieves the second best accuracy (0.83 in comparison to 0.79 for the full text model) in SciQ. Some of the models pretrained on individual abstract data achieve comparable performance in SciQ, e.g., MAG and AMiner models achieve 0.8 and 0.78 accuracy, respectively. We believe the diversity of scientific knowledge provided from the abstract data is useful since SciQ questions span biology, chemistry, earth science, and physics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Diversity Effect", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Third, we compare model performance trained with abstracts vs. full texts in the HT task and see that the best accuracy is achieved using the MAG and S2ORC datasets rather than the combined abstracts. This suggests the importance of contextual knowledge provided by different data sources.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Diversity Effect", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Finally, combined full text model performs better than the model trained with the abstracts in all outof-domain tasks except PIQA. This performance difference may be due to the more expressive and diverse language presented in the full texts than in the abstracts. Thus, expanding full text coverage may improve out-of-domain task generalization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Diversity Effect", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Scientific knowledge evolves over time reflecting new research ideas, innovations, and findings. In this section, we test how continual pretraining on temporal-aligned scientific publications impacts downstream performance. For this experiment, we maintain two variants of the MAG dataset with random-ordered and temporal-ordered articles, splitting each into ten equal subsets. We continue pretraining a base medium (M) sized model iteratively with the subsets in the order they appeared in the respective data variant. For example, in the temporally-aligned experiments, we first pretrain a model with 3.4M (10%) articles from before 1978, and then use it as the base model to continue pretraining with another 3.4M (10%) articles from between 1978 and 1989. We train the initial model for 150K steps and each subsequent model for 10K steps with additional data. Figure 3 shows the performance of model checkpoints across in-domain and out-of-domain tasks.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 865, |
| "end": 873, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Temporal Effect", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "There are two key findings. First, SciQ and ARC-E zero-shot task performances improve over time with the models trained with temporally-ordered scientific texts (as shown in Figure 3b ). For example, SciQ accuracy improves from 0.64 to 0.73 from the base model checkpoint to the final model checkpoint. Similarly, ARC-E accuracy improves from 0.43 to 0.45. This is due to the temporal order of the knowledge acquired by the model. When the model was pretrained with random-ordered data subsets, we observe only a slight (< 1%) performance increase (as shown in Figure 3a) .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 174, |
| "end": 183, |
| "text": "Figure 3b", |
| "ref_id": null |
| }, |
| { |
| "start": 561, |
| "end": 571, |
| "text": "Figure 3a)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Temporal Effect", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "There are mixed patterns in performance across out-of-domain tasks. For example, a slight performance increase in the PIQA, CB, PubMedQA, and WIC over time with the models trained with temporally-ordered scientific texts. On the other hand, there is a performance drop in the BoolQ and WSC over time. This may be due to the catastrophic forgetting prevalent in continual learning (Ramasesh et al., 2021) . Future work will in- Figure 3 : The effect of temporal order of publications during pretraining. We align publications in the MAG corpus by year and split them into ten equal subsets. We repeat the process in a randomly-ordered corpus for comparison, recording model checkpoints after performing continual pretraining on each data subset.", |
| "cite_spans": [ |
| { |
| "start": 380, |
| "end": 403, |
| "text": "(Ramasesh et al., 2021)", |
| "ref_id": "BIBREF47" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 427, |
| "end": 435, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Temporal Effect", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "vestigate other confounding factors that may contribute to this performance patterns.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Temporal Effect", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "In this section, we test whether the continual pretraining of a base GPT model with additional domain-specific data is helpful in the downstream task performance. We report the zero-shot performance of the tuned model across in-domain (Table 4) and out-of-domain (Table 5 ) tasks. We have two main observations from this experiment.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 263, |
| "end": 271, |
| "text": "(Table 5", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Continual vs. From Scratch Pretraining", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "First, fine-tuned models fall behind other baselines in a majority of in-domain tasks. HT-CC is the only in-domain task that the tuned model outperforms the rest of models, yet fails to outperform the best performing model trained from scratch.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual vs. From Scratch Pretraining", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "Second, fine-tuned models have a significant performance drop in the general language modeling tasks (Lambada and Wikitext). For example, the tuned model records 6x performance drop in the Wikitext compared to the best performing model. There are several factors in the continual pretraining that may contribute to this. As the tuned model uses the original GPT-2 vocabulary, it must use the fragmented general subwords to tokenize the chemistry terms available in our corpora. On the other hand, the tuned model starts with the suboptimal initialization from the general-domain language model (Gu et al., 2021) . This initialization may diverge the model in the optimization process that may not be recovered.", |
| "cite_spans": [ |
| { |
| "start": 594, |
| "end": 611, |
| "text": "(Gu et al., 2021)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual vs. From Scratch Pretraining", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "We use several dimensions to describe the training efficiency, i.e., #FLOPs, throughput (speed), and memory. We compare these compute dimensions across the four model sizes described in the Table 2. The smallest (S) model has 59% FLOPs of the largest (XL) model, twice the speed (steps/s), 32% per device GPU memory savings, and 76% total parameter savings (see Figure 4) . With such compute budget, small (S) models only outperforms the XL model in 21% in-domain and 34% out-of-domain evaluation tasks. This suggests the importance of compute budget required in scaling foundation models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 362, |
| "end": 371, |
| "text": "Figure 4)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training Efficiency", |
| "sec_num": "5.6" |
| }, |
| { |
| "text": "In this paper, we collected and released 0.67TB of research publication data collected across 10+ sources for chemistry. We pretrained and released 25+ foundation models for chemistry. We rigorously analyzed model performance on 15+ indomain and out-of-domain tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "AMiner ArnetMiner (AMiner) is a service that crawls research publications, performs profile extraction of scientists, models academic networks by integrating publication data from the existing libraries. For the experiments described in this work, we use a sub-sampled version of the data presented in the Open Academic Graph (OAG) version of the AMiner dataset, which originally consisted of more that 172M articles, with 18.5M chemistry-related abstracts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Data Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "CORE COnnecting REpositories (CORE) (Pontika et al., 2016) is a large-scale aggregation system which provides an open access to the global network of scientific journals and publications. CORE currently contains more than 207M openaccess articles collected from over 10 thousand data providers, out of which more than 92M are open access full-text research papers. We sub-sampled the original collect into our chemistry-specific corpus consisting of more than 7M full-text articles.", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 58, |
| "text": "(Pontika et al., 2016)", |
| "ref_id": "BIBREF43" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Data Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "CORD-19 CORD-19 corpus contains COVID-19 (Cord19) and other coronavirus-related publications (e.g. SARS, MERS, etc.) from PubMed's PMC open access corpus, bioRxiv, and medRxiv pre-prints, in addition to COVID-19 articles maintained by the World Health Organization (WHO).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Data Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "MAG Microsoft Academic Graph (MAG) is a heterogeneous graph created by extracting knowledge from scholarly publications on the web (Wang et al., 2020a) . The data used in this work is a subsample from the OAG version of the MAG dataset, which originally consisted of > 208M articles, with 34M chemistry-related articles with abstracts.", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 151, |
| "text": "(Wang et al., 2020a)", |
| "ref_id": "BIBREF54" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Data Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "PubMed PubMed is a domain-specific data source that allows for search and retrieval of the biomedical and life sciences literature. It is maintained by the National Centre for Biotechnology Information (NCBI) at the U.S. National Library of Medicine (NLM). For this work we utilized the PubMed Central data provided in the Pile corpus . As presented in Table 3 the sub-sampled data consists of documents with more than 280K abstracts and 700K full text articles.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 353, |
| "end": 360, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Data Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "The Semantic Scholar Open Research Corpus (S2ORC) (Lo et al., 2020 ) is a large academic corpus consisting of 81.1M documents. The data includes the metadata, abstracts, bibliographical references and full-text publications for over 8M open access research articles. In this work, we utilize the sub-sampled version of the original data specific to chemistry, which includes more than 10M abstracts.", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 66, |
| "text": "(Lo et al., 2020", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "S2ORC", |
| "sec_num": null |
| }, |
| { |
| "text": "WoS The Web of Science (WoS) is a multidiscipline citation database produced by the Institute of Scientific Information. The platform hosts over 171M records across various disciplines, which, when sub-sampled for our chemistry domain, rounded to more than 7M records with abstracts available.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "S2ORC", |
| "sec_num": null |
| }, |
| { |
| "text": "HendrycksTest-Chemistry The Hendrycks Test (Hendrycks et al., 2020 ) is a large scale collection of multiple choice questions covering 57 subjects. In our experiments, we subsampled college chemistry (HT-CC) and high school chemistry (HT-HC). HT-CC contains 100 questions related to analytical, organic, inorganic, physical, etc. and HT-HC contains 203 questions related chemical reactions, ions, acids and bases, etc.", |
| "cite_spans": [ |
| { |
| "start": 43, |
| "end": 66, |
| "text": "(Hendrycks et al., 2020", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "ARC The ARC dataset contains 7,787 genuine grade-school level, science MCQs and is partitioned into a Challenge Set (ARC-C) and an Easy Set (ARC-E). Additionally, 14M science-related sentences are provided with relevant knowledge to answer the ARC questions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "SciQ The SciQ dataset (Welbl et al., 2017) contains 13,679 crowdsourced multiple-choice science exam questions about Physics, Chemistry and Biology, among others.", |
| "cite_spans": [ |
| { |
| "start": 22, |
| "end": 42, |
| "text": "(Welbl et al., 2017)", |
| "ref_id": "BIBREF56" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "OpenBookQA The OpenBookQA (Mihaylov et al., 2018) dataset consists of 5,957 multiple choice questions and 1,326 elementary-level science facts. The facts alone do not contain enough information to correctly answer the multiple choice questions, therefore the task is designed to evaluate systems beyond paraphrase matching.", |
| "cite_spans": [ |
| { |
| "start": 26, |
| "end": 49, |
| "text": "(Mihaylov et al., 2018)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "Pile PubMed Abstracts The Pile dataset contains 800GB of diverse text sources for benchmarking language models. We limit this task to only include abstracts from the Pile's PubMed collection. As this is framed as a language modeling task, we report word level perplexity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "BoolQ BoolQ (Clark et al., 2019 ) is a reading comprehension dataset comprised of 16k real, naturally formed queries to the Google search engine with a yes or no answer. Each question-answer pair is accompanied by a Wikipedia article providing evidence to support the correct answer.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 31, |
| "text": "(Clark et al., 2019", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "CB Commitment Bank (CB) (De Marneffe et al., 2019 ) is a 3-way classification of textual entailment (true, false, unknown) from 1,200 short text segments where at least one sentence contains an embedded clause. The dataset contains passages from three sources: the Wall Street Journal, the British National Corpus, and Switchboard.", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 49, |
| "text": "Marneffe et al., 2019", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "WIC The Word-in-Context dataset (WIC) (Pilehvar and Camacho-Collados, 2018) is a benchmark for evaluating context-sensitive word embeddings. The task is to classify if a target word has the same meaning in two context sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "WSC The Winograd Schema Challenge (WSC) (Levesque et al., 2012) dataset is a collection of 804 sentences in which the task is to resolve coreferences.", |
| "cite_spans": [ |
| { |
| "start": 40, |
| "end": 63, |
| "text": "(Levesque et al., 2012)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "MathQA MathQA (Amini et al., 2019 ) is a dataset containing 37k multiple choice math word problems built from the existing dataset, AQuA (Ling et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 14, |
| "end": 33, |
| "text": "(Amini et al., 2019", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 137, |
| "end": 156, |
| "text": "(Ling et al., 2017)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "PIQA The Physical Interactions: Question Answering (PIQA) (Bisk et al., 2020) benchmark dataset provides 21k questions about the physical world and plausible interactions encountered by humans. Annotators provided correct and incorrect answers to questions extracted from instructables.com, a website of instructions for completing many everyday tasks.", |
| "cite_spans": [ |
| { |
| "start": 58, |
| "end": 77, |
| "text": "(Bisk et al., 2020)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "PubMedQA The PubMedQA dataset ) is a collection of 273.5k biomedical research questions and related PubMed articles with yes/no/maybe answers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "Lambada Lambada (Paperno et al., 2016) contains passages and target sentences from 5,325 novels collected from Book Corpus (Zhu et al., 2015) , and the goal is to predict the last word of the target sentence given the context passage. This task was designed to test genuine language understanding since accurate prediction of the final word would be improbable without the context passage.", |
| "cite_spans": [ |
| { |
| "start": 16, |
| "end": 38, |
| "text": "(Paperno et al., 2016)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 123, |
| "end": 141, |
| "text": "(Zhu et al., 2015)", |
| "ref_id": "BIBREF58" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "WikiText The Wikitext benchmark (Merity et al., 2016 ) is a language modeling dataset of 29k articles from Wikipedia. Only articles classified as Good or Featured by Wikipedia editors are included since they are considered to be well written and neutral in language. All results are reported on Wikitext-2.", |
| "cite_spans": [ |
| { |
| "start": 32, |
| "end": 52, |
| "text": "(Merity et al., 2016", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Task Descriptions", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Publicly available clinical bert embeddings", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Alsentzer", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "Willie", |
| "middle": [], |
| "last": "Murphy", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Hung", |
| "middle": [], |
| "last": "Boag", |
| "suffix": "" |
| }, |
| { |
| "first": "Di", |
| "middle": [], |
| "last": "Weng", |
| "suffix": "" |
| }, |
| { |
| "first": "Tristan", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Naumann", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mcdermott", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1904.03323" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily Alsentzer, John R Murphy, Willie Boag, Wei- Hung Weng, Di Jin, Tristan Naumann, and Matthew McDermott. 2019. Publicly available clinical bert embeddings. arXiv preprint arXiv:1904.03323.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Mathqa: Towards interpretable math word problem solving with operation-based formalisms", |
| "authors": [ |
| { |
| "first": "Aida", |
| "middle": [], |
| "last": "Amini", |
| "suffix": "" |
| }, |
| { |
| "first": "Saadia", |
| "middle": [], |
| "last": "Gabriel", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Rik", |
| "middle": [], |
| "last": "Koncel-Kedziorski", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannaneh", |
| "middle": [], |
| "last": "Hajishirzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1905.13319" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aida Amini, Saadia Gabriel, Peter Lin, Rik Koncel- Kedziorski, Yejin Choi, and Hannaneh Hajishirzi. 2019. Mathqa: Towards interpretable math word problem solving with operation-based formalisms. arXiv preprint arXiv:1905.13319.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "GPT-NeoX: Large scale autoregressive language modeling in pytorch", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Andonian", |
| "suffix": "" |
| }, |
| { |
| "first": "Quentin", |
| "middle": [], |
| "last": "Anthony", |
| "suffix": "" |
| }, |
| { |
| "first": "Stella", |
| "middle": [], |
| "last": "Biderman", |
| "suffix": "" |
| }, |
| { |
| "first": "Sid", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "Preetham", |
| "middle": [], |
| "last": "Gali", |
| "suffix": "" |
| }, |
| { |
| "first": "Leo", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Hallahan", |
| "suffix": "" |
| }, |
| { |
| "first": "Josh", |
| "middle": [], |
| "last": "Levy-Kramer", |
| "suffix": "" |
| }, |
| { |
| "first": "Connor", |
| "middle": [], |
| "last": "Leahy", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucas", |
| "middle": [], |
| "last": "Nestler", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Andonian, Quentin Anthony, Stella Biderman, Sid Black, Preetham Gali, Leo Gao, Eric Hallahan, Josh Levy-Kramer, Connor Leahy, Lucas Nestler, Kip Parker, Michael Pieler, Shivanshu Purohit, Tri Songz, Phil Wang, and Samuel Weinbach. 2021. GPT-NeoX: Large scale autoregressive language modeling in py- torch.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Scibert: A pretrained language model for scientific text", |
| "authors": [ |
| { |
| "first": "Iz", |
| "middle": [], |
| "last": "Beltagy", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Arman", |
| "middle": [], |
| "last": "Cohan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1903.10676" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iz Beltagy, Kyle Lo, and Arman Cohan. 2019. Scibert: A pretrained language model for scientific text. arXiv preprint arXiv:1903.10676.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Piqa: Reasoning about physical commonsense in natural language", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Bisk", |
| "suffix": "" |
| }, |
| { |
| "first": "Rowan", |
| "middle": [], |
| "last": "Zellers", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the AAAI conference on artificial intelligence", |
| "volume": "34", |
| "issue": "", |
| "pages": "7432--7439", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Bisk, Rowan Zellers, Jianfeng Gao, Yejin Choi, et al. 2020. Piqa: Reasoning about physical com- monsense in natural language. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 7432-7439.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Gpt-neox-20b: An open-source autoregressive language model", |
| "authors": [ |
| { |
| "first": "Sid", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "Stella", |
| "middle": [], |
| "last": "Biderman", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Hallahan", |
| "suffix": "" |
| }, |
| { |
| "first": "Quentin", |
| "middle": [], |
| "last": "Anthony", |
| "suffix": "" |
| }, |
| { |
| "first": "Leo", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurence", |
| "middle": [], |
| "last": "Golding", |
| "suffix": "" |
| }, |
| { |
| "first": "Horace", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Connor", |
| "middle": [], |
| "last": "Leahy", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Mcdonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Phang", |
| "suffix": "" |
| } |
| ], |
| "year": 2022, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, et al. 2022. Gpt-neox-20b: An open-source autoregressive language model.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "On the opportunities and risks of foundation models", |
| "authors": [ |
| { |
| "first": "Rishi", |
| "middle": [], |
| "last": "Bommasani", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Drew", |
| "suffix": "" |
| }, |
| { |
| "first": "Ehsan", |
| "middle": [], |
| "last": "Hudson", |
| "suffix": "" |
| }, |
| { |
| "first": "Russ", |
| "middle": [], |
| "last": "Adeli", |
| "suffix": "" |
| }, |
| { |
| "first": "Simran", |
| "middle": [], |
| "last": "Altman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Arora", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sydney Von Arx", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeannette", |
| "middle": [], |
| "last": "Bernstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bohg", |
| "suffix": "" |
| }, |
| { |
| "first": "Emma", |
| "middle": [], |
| "last": "Bosselut", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Brunskill", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2108.07258" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rishi Bommasani, Drew A Hudson, Ehsan Adeli, Russ Altman, Simran Arora, Sydney von Arx, Michael S Bernstein, Jeannette Bohg, Antoine Bosse- lut, Emma Brunskill, et al. 2021. On the opportuni- ties and risks of foundation models. arXiv preprint arXiv:2108.07258.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Language models are few-shot learners", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Mann", |
| "suffix": "" |
| }, |
| { |
| "first": "Nick", |
| "middle": [], |
| "last": "Ryder", |
| "suffix": "" |
| }, |
| { |
| "first": "Melanie", |
| "middle": [], |
| "last": "Subbiah", |
| "suffix": "" |
| }, |
| { |
| "first": "Jared", |
| "middle": [ |
| "D" |
| ], |
| "last": "Kaplan", |
| "suffix": "" |
| }, |
| { |
| "first": "Prafulla", |
| "middle": [], |
| "last": "Dhariwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Arvind", |
| "middle": [], |
| "last": "Neelakantan", |
| "suffix": "" |
| }, |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Shyam", |
| "suffix": "" |
| }, |
| { |
| "first": "Girish", |
| "middle": [], |
| "last": "Sastry", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanda", |
| "middle": [], |
| "last": "Askell", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Advances in neural information processing systems", |
| "volume": "33", |
| "issue": "", |
| "pages": "1877--1901", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Quantifying memorization across neural language models", |
| "authors": [ |
| { |
| "first": "Nicholas", |
| "middle": [], |
| "last": "Carlini", |
| "suffix": "" |
| }, |
| { |
| "first": "Daphne", |
| "middle": [], |
| "last": "Ippolito", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Jagielski", |
| "suffix": "" |
| }, |
| { |
| "first": "Katherine", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Tramer", |
| "suffix": "" |
| }, |
| { |
| "first": "Chiyuan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2022, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2202.07646" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nicholas Carlini, Daphne Ippolito, Matthew Jagielski, Katherine Lee, Florian Tramer, and Chiyuan Zhang. 2022. Quantifying memorization across neural lan- guage models. arXiv preprint arXiv:2202.07646.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Boolq: Exploring the surprising difficulty of natural yes/no questions", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kwiatkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher Clark, Kenton Lee, Ming-Wei Chang, Tom Kwiatkowski, Michael Collins, and Kristina Toutanova. 2019. Boolq: Exploring the surprising difficulty of natural yes/no questions. In NAACL.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Think you have solved question answering? try arc, the ai2 reasoning challenge", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Isaac", |
| "middle": [], |
| "last": "Cowhey", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| }, |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Sabharwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Carissa", |
| "middle": [], |
| "last": "Schoenick", |
| "suffix": "" |
| }, |
| { |
| "first": "Oyvind", |
| "middle": [], |
| "last": "Tafjord", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1803.05457" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. 2018. Think you have solved question an- swering? try arc, the ai2 reasoning challenge. arXiv preprint arXiv:1803.05457.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "The commitmentbank: Investigating projection in naturally occurring discourse", |
| "authors": [ |
| { |
| "first": "Marie-Catherine De", |
| "middle": [], |
| "last": "Marneffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandy", |
| "middle": [], |
| "last": "Simons", |
| "suffix": "" |
| }, |
| { |
| "first": "Judith", |
| "middle": [], |
| "last": "Tonhauser", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "proceedings of Sinn und Bedeutung", |
| "volume": "23", |
| "issue": "", |
| "pages": "107--124", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marie-Catherine De Marneffe, Mandy Simons, and Ju- dith Tonhauser. 2019. The commitmentbank: Inves- tigating projection in naturally occurring discourse. In proceedings of Sinn und Bedeutung, volume 23, pages 107-124.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Anchored correlation explanation: Topic modeling with minimal domain knowledge", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Ryan", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Gallagher", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Reing", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "Ver" |
| ], |
| "last": "Kale", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Steeg", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "5", |
| "issue": "", |
| "pages": "529--542", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan J Gallagher, Kyle Reing, David Kale, and Greg Ver Steeg. 2017. Anchored correlation explanation: Topic modeling with minimal domain knowledge. Transactions of the Association for Computational Linguistics, 5:529-542.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "The pile: An 800gb dataset of diverse text for language modeling", |
| "authors": [ |
| { |
| "first": "Leo", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Stella", |
| "middle": [], |
| "last": "Biderman", |
| "suffix": "" |
| }, |
| { |
| "first": "Sid", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurence", |
| "middle": [], |
| "last": "Golding", |
| "suffix": "" |
| }, |
| { |
| "first": "Travis", |
| "middle": [], |
| "last": "Hoppe", |
| "suffix": "" |
| }, |
| { |
| "first": "Charles", |
| "middle": [], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Phang", |
| "suffix": "" |
| }, |
| { |
| "first": "Horace", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Anish", |
| "middle": [], |
| "last": "Thite", |
| "suffix": "" |
| }, |
| { |
| "first": "Noa", |
| "middle": [], |
| "last": "Nabeshima", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2101.00027" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Leo Gao, Stella Biderman, Sid Black, Laurence Gold- ing, Travis Hoppe, Charles Foster, Jason Phang, Ho- race He, Anish Thite, Noa Nabeshima, et al. 2020. The pile: An 800gb dataset of diverse text for lan- guage modeling. arXiv preprint arXiv:2101.00027.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Domain-specific language model pretraining for biomedical natural language processing", |
| "authors": [ |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Tinn", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Lucas", |
| "suffix": "" |
| }, |
| { |
| "first": "Naoto", |
| "middle": [], |
| "last": "Usuyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tristan", |
| "middle": [], |
| "last": "Naumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Hoifung", |
| "middle": [], |
| "last": "Poon", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "ACM Transactions on Computing for Healthcare", |
| "volume": "3", |
| "issue": "1", |
| "pages": "1--23", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu Gu, Robert Tinn, Hao Cheng, Michael Lucas, Naoto Usuyama, Xiaodong Liu, Tristan Naumann, Jianfeng Gao, and Hoifung Poon. 2021. Domain-specific lan- guage model pretraining for biomedical natural lan- guage processing. ACM Transactions on Computing for Healthcare (HEALTH), 3(1):1-23.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Automated chemical reaction extraction from scientific literature", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Jiang Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanyu", |
| "middle": [], |
| "last": "Santiago Ibanez-Lopez", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Connor", |
| "middle": [ |
| "W" |
| ], |
| "last": "Quach", |
| "suffix": "" |
| }, |
| { |
| "first": "Klavs", |
| "middle": [ |
| "F" |
| ], |
| "last": "Coley", |
| "suffix": "" |
| }, |
| { |
| "first": "Regina", |
| "middle": [], |
| "last": "Jensen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Barzilay", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Journal of Chemical Information and Modeling", |
| "volume": "0", |
| "issue": "0", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1021/acs.jcim.1c00284" |
| ], |
| "PMID": [ |
| "34115937" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiang Guo, A. Santiago Ibanez-Lopez, Hanyu Gao, Vic- tor Quach, Connor W. Coley, Klavs F. Jensen, and Regina Barzilay. 2021. Automated chemical reaction extraction from scientific literature. Journal of Chem- ical Information and Modeling, 0(0):null. PMID: 34115937.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Mantas Mazeika, Dawn Song, and Jacob Steinhardt. 2020. Measuring massive multitask language understanding", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Hendrycks", |
| "suffix": "" |
| }, |
| { |
| "first": "Collin", |
| "middle": [], |
| "last": "Burns", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Basart", |
| "suffix": "" |
| }, |
| { |
| "first": "Andy", |
| "middle": [], |
| "last": "Zou", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2009.03300" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. 2020. Measuring massive multitask language under- standing. arXiv preprint arXiv:2009.03300.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Perceiver: General perception with iterative attention", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Jaegle", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Gimeno", |
| "suffix": "" |
| }, |
| { |
| "first": "Andy", |
| "middle": [], |
| "last": "Brock", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Zisserman", |
| "suffix": "" |
| }, |
| { |
| "first": "Joao", |
| "middle": [], |
| "last": "Carreira", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "4651--4664", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew Jaegle, Felix Gimeno, Andy Brock, Oriol Vinyals, Andrew Zisserman, and Joao Carreira. 2021. Perceiver: General perception with iterative attention. In International Conference on Machine Learning, pages 4651-4664. PMLR.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Pubmedqa: A dataset for biomedical research question answering", |
| "authors": [ |
| { |
| "first": "Qiao", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| }, |
| { |
| "first": "Bhuwan", |
| "middle": [], |
| "last": "Dhingra", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengping", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Xinghua", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.06146" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qiao Jin, Bhuwan Dhingra, Zhengping Liu, William W Cohen, and Xinghua Lu. 2019. Pubmedqa: A dataset for biomedical research question answering. arXiv preprint arXiv:1909.06146.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Bioelectra: pretrained biomedical text encoder using discriminators", |
| "authors": [ |
| { |
| "first": "Bhuvana", |
| "middle": [], |
| "last": "Kamal Kanakarajan", |
| "suffix": "" |
| }, |
| { |
| "first": "Malaikannan", |
| "middle": [], |
| "last": "Kundumani", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sankarasubbu", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 20th Workshop on Biomedical Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "143--154", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kamal Kanakarajan, Bhuvana Kundumani, and Malaikannan Sankarasubbu. 2021. Bioelectra: pre- trained biomedical text encoder using discriminators. In Proceedings of the 20th Workshop on Biomedical Language Processing, pages 143-154.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "and Dario Amodei. 2020. Scaling laws for neural language models", |
| "authors": [ |
| { |
| "first": "Jared", |
| "middle": [], |
| "last": "Kaplan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Mccandlish", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Henighan", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Tom", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Chess", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Gray", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2001.08361" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. 2020. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Mixout: Effective regularization to finetune large-scale pretrained language models", |
| "authors": [ |
| { |
| "first": "Cheolhyoung", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Wanmo", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.11299" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cheolhyoung Lee, Kyunghyun Cho, and Wanmo Kang. 2019. Mixout: Effective regularization to fine- tune large-scale pretrained language models. arXiv preprint arXiv:1909.11299.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Biobert: a pre-trained biomedical language representation model for biomedical text mining", |
| "authors": [ |
| { |
| "first": "Jinhyuk", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Wonjin", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungdong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Donghyeon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunkyu", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Chan", |
| "middle": [], |
| "last": "Ho So", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaewoo", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Bioinformatics", |
| "volume": "36", |
| "issue": "4", |
| "pages": "1234--1240", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2020. Biobert: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics, 36(4):1234-1240.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Chris Callison-Burch, and Nicholas Carlini. 2021. Deduplicating training data makes language models better", |
| "authors": [ |
| { |
| "first": "Katherine", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Daphne", |
| "middle": [], |
| "last": "Ippolito", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Nystrom", |
| "suffix": "" |
| }, |
| { |
| "first": "Chiyuan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Douglas", |
| "middle": [], |
| "last": "Eck", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2107.06499" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katherine Lee, Daphne Ippolito, Andrew Nystrom, Chiyuan Zhang, Douglas Eck, Chris Callison-Burch, and Nicholas Carlini. 2021. Deduplicating training data makes language models better. arXiv preprint arXiv:2107.06499.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "The winograd schema challenge", |
| "authors": [ |
| { |
| "first": "Hector", |
| "middle": [ |
| "J" |
| ], |
| "last": "Levesque", |
| "suffix": "" |
| }, |
| { |
| "first": "Ernest", |
| "middle": [], |
| "last": "Davis", |
| "suffix": "" |
| }, |
| { |
| "first": "Leora", |
| "middle": [], |
| "last": "Morgenstern", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "12", |
| "issue": "", |
| "pages": "552--561", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hector J. Levesque, Ernest Davis, and Leora Morgen- stern. 2012. The winograd schema challenge. KR'12, page 552-561. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Pretrained language models for biomedical and clinical tasks: Understanding and extending the state-of-the-art", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 3rd Clinical Natural Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "146--157", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick Lewis, Myle Ott, Jingfei Du, and Veselin Stoy- anov. 2020. Pretrained language models for biomedi- cal and clinical tasks: Understanding and extending the state-of-the-art. In Proceedings of the 3rd Clini- cal Natural Language Processing Workshop, pages 146-157.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Program induction by rationale generation: Learning to solve and explain algebraic word problems", |
| "authors": [ |
| { |
| "first": "Wang", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Dani", |
| "middle": [], |
| "last": "Yogatama", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1705.04146" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang Ling, Dani Yogatama, Chris Dyer, and Phil Blun- som. 2017. Program induction by rationale genera- tion: Learning to solve and explain algebraic word problems. arXiv preprint arXiv:1705.04146.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Oag-bert: Pretrain heterogeneous entity-augmented academic language models", |
| "authors": [ |
| { |
| "first": "Xiao", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xingjian", |
| "middle": [], |
| "last": "Da Yin", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kan", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongxia", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2103.02410" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiao Liu, Da Yin, Xingjian Zhang, Kai Su, Kan Wu, Hongxia Yang, and Jie Tang. 2021. Oag-bert: Pre- train heterogeneous entity-augmented academic lan- guage models. arXiv preprint arXiv:2103.02410.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Roberta: A robustly optimized bert pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.11692" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "S2orc: The semantic scholar open research corpus", |
| "authors": [ |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucy", |
| "middle": [ |
| "Lu" |
| ], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Rodney", |
| "middle": [ |
| "Michael" |
| ], |
| "last": "Kinney", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "S" |
| ], |
| "last": "Weld", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyle Lo, Lucy Lu Wang, Mark Neumann, Rod- ney Michael Kinney, and Daniel S. Weld. 2020. S2orc: The semantic scholar open research corpus. In ACL.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Time waits for no one! analysis and challenges of temporal misalignment", |
| "authors": [ |
| { |
| "first": "Kelvin", |
| "middle": [], |
| "last": "Luu", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Khashabi", |
| "suffix": "" |
| }, |
| { |
| "first": "Suchin", |
| "middle": [], |
| "last": "Gururangan", |
| "suffix": "" |
| }, |
| { |
| "first": "Karishma", |
| "middle": [], |
| "last": "Mandyam", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah A", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2111.07408" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kelvin Luu, Daniel Khashabi, Suchin Gururangan, Kar- ishma Mandyam, and Noah A Smith. 2021. Time waits for no one! analysis and challenges of temporal misalignment. arXiv preprint arXiv:2111.07408.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Pointer sentinel mixture models", |
| "authors": [ |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Merity", |
| "suffix": "" |
| }, |
| { |
| "first": "Caiming", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Bradbury", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1609.07843" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. 2016. Pointer sentinel mixture mod- els. arXiv preprint arXiv:1609.07843.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Can a suit of armor conduct electricity? a new dataset for open book question answering", |
| "authors": [ |
| { |
| "first": "Todor", |
| "middle": [], |
| "last": "Mihaylov", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Sabharwal", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1809.02789" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Todor Mihaylov, Peter Clark, Tushar Khot, and Ashish Sabharwal. 2018. Can a suit of armor conduct elec- tricity? a new dataset for open book question answer- ing. arXiv preprint arXiv:1809.02789.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Electramed: a new pre-trained language representation model for biomedical nlp", |
| "authors": [ |
| { |
| "first": "Giacomo", |
| "middle": [], |
| "last": "Miolo", |
| "suffix": "" |
| }, |
| { |
| "first": "Giulio", |
| "middle": [], |
| "last": "Mantoan", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlotta", |
| "middle": [], |
| "last": "Orsenigo", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2104.09585" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Giacomo Miolo, Giulio Mantoan, and Carlotta Orsenigo. 2021. Electramed: a new pre-trained language repre- sentation model for biomedical nlp. arXiv preprint arXiv:2104.09585.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Benchmarking for biomedical natural language processing tasks with a domain specific albert", |
| "authors": [ |
| { |
| "first": "Usman", |
| "middle": [], |
| "last": "Naseem", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Adam", |
| "suffix": "" |
| }, |
| { |
| "first": "Matloob", |
| "middle": [], |
| "last": "Dunn", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinman", |
| "middle": [], |
| "last": "Khushi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2107.04374" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Usman Naseem, Adam G Dunn, Matloob Khushi, and Jinman Kim. 2021. Benchmarking for biomedical natural language processing tasks with a domain spe- cific albert. arXiv preprint arXiv:2107.04374. OAG. https://www.microsoft.com/en- us/research/project/open-academic-graph/.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "The lambada dataset: Word prediction requiring a broad discourse context", |
| "authors": [ |
| { |
| "first": "Denis", |
| "middle": [], |
| "last": "Paperno", |
| "suffix": "" |
| }, |
| { |
| "first": "Germ\u00e1n", |
| "middle": [], |
| "last": "Kruszewski", |
| "suffix": "" |
| }, |
| { |
| "first": "Angeliki", |
| "middle": [], |
| "last": "Lazaridou", |
| "suffix": "" |
| }, |
| { |
| "first": "Ngoc", |
| "middle": [], |
| "last": "Quan", |
| "suffix": "" |
| }, |
| { |
| "first": "Raffaella", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandro", |
| "middle": [], |
| "last": "Bernardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Pezzelle", |
| "suffix": "" |
| }, |
| { |
| "first": "Gemma", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Raquel", |
| "middle": [], |
| "last": "Boleda", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fern\u00e1ndez", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1606.06031" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Denis Paperno, Germ\u00e1n Kruszewski, Angeliki Lazari- dou, Quan Ngoc Pham, Raffaella Bernardi, Sandro Pezzelle, Marco Baroni, Gemma Boleda, and Raquel Fern\u00e1ndez. 2016. The lambada dataset: Word pre- diction requiring a broad discourse context. arXiv preprint arXiv:1606.06031.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Transfer learning in biomedical natural language processing: an evaluation of bert and elmo on ten benchmarking datasets", |
| "authors": [ |
| { |
| "first": "Yifan", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Shankai", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyong", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.05474" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yifan Peng, Shankai Yan, and Zhiyong Lu. 2019. Trans- fer learning in biomedical natural language process- ing: an evaluation of bert and elmo on ten bench- marking datasets. arXiv preprint arXiv:1906.05474.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Erol Bahadroglu, Alec Peltekian, and Gr\u00e9goire Altan-Bonnet. 2021. Scifive: a text-to-text transformer model for biomedical literature", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [ |
| "T" |
| ], |
| "last": "Long N Phan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Anibal", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaurya", |
| "middle": [], |
| "last": "Tran", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chanana", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2106.03598" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Long N Phan, James T Anibal, Hieu Tran, Shaurya Chanana, Erol Bahadroglu, Alec Peltekian, and Gr\u00e9- goire Altan-Bonnet. 2021. Scifive: a text-to-text transformer model for biomedical literature. arXiv preprint arXiv:2106.03598.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Wic: the word-in-context dataset for evaluating context-sensitive meaning representations", |
| "authors": [ |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Taher Pilehvar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jose", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1808.09121" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammad Taher Pilehvar and Jose Camacho-Collados. 2018. Wic: the word-in-context dataset for evaluat- ing context-sensitive meaning representations. arXiv preprint arXiv:1808.09121.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Developing infrastructure to support closer collaboration of aggregators with open repositories", |
| "authors": [ |
| { |
| "first": "Nancy", |
| "middle": [], |
| "last": "Pontika", |
| "suffix": "" |
| }, |
| { |
| "first": "Petr", |
| "middle": [], |
| "last": "Knoth", |
| "suffix": "" |
| }, |
| { |
| "first": "Matteo", |
| "middle": [], |
| "last": "Cancellieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Pearce", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "LIBER Quarterly", |
| "volume": "25", |
| "issue": "4", |
| "pages": "172--188", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nancy Pontika, Petr Knoth, Matteo Cancellieri, and Samuel Pearce. 2016. Developing infrastructure to support closer collaboration of aggregators with open repositories. LIBER Quarterly, 25(4):172-188.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Language models are unsupervised multitask learners", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "OpenAI blog", |
| "volume": "1", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Scaling language models: Methods, analysis & insights from training gopher", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Jack W Rae", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Borgeaud", |
| "suffix": "" |
| }, |
| { |
| "first": "Katie", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Millican", |
| "suffix": "" |
| }, |
| { |
| "first": "Francis", |
| "middle": [], |
| "last": "Hoffmann", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarah", |
| "middle": [], |
| "last": "Aslanides", |
| "suffix": "" |
| }, |
| { |
| "first": "Roman", |
| "middle": [], |
| "last": "Henderson", |
| "suffix": "" |
| }, |
| { |
| "first": "Susannah", |
| "middle": [], |
| "last": "Ring", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2112.11446" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jack W Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susan- nah Young, et al. 2021. Scaling language models: Methods, analysis & insights from training gopher. arXiv preprint arXiv:2112.11446.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Zero: memory optimization towards training a trillion parameter models", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "S Rajbhandari", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rasley", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Ruwase", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S Rajbhandari, J Rasley, O Ruwase, and Y He. 2019. Zero: memory optimization towards training a trillion parameter models. arxiv e-prints arxiv: 11910.02054 (2019).", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Effect of scale on catastrophic forgetting in neural networks", |
| "authors": [ |
| { |
| "first": "Aitor", |
| "middle": [], |
| "last": "Vinay Venkatesh Ramasesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Ethan", |
| "middle": [], |
| "last": "Lewkowycz", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vinay Venkatesh Ramasesh, Aitor Lewkowycz, and Ethan Dyer. 2021. Effect of scale on catastrophic forgetting in neural networks. In International Con- ference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters", |
| "authors": [ |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Rasley", |
| "suffix": "" |
| }, |
| { |
| "first": "Samyam", |
| "middle": [], |
| "last": "Rajbhandari", |
| "suffix": "" |
| }, |
| { |
| "first": "Olatunji", |
| "middle": [], |
| "last": "Ruwase", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuxiong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "3505--3506", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeff Rasley, Samyam Rajbhandari, Olatunji Ruwase, and Yuxiong He. 2020. Deepspeed: System optimiza- tions enable training deep learning models with over 100 billion parameters. In Proceedings of the 26th ACM SIGKDD International Conference on Knowl- edge Discovery & Data Mining, pages 3505-3506.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Biomegatron: Larger biomedical domain language model", |
| "authors": [ |
| { |
| "first": "Hoo-Chang", |
| "middle": [], |
| "last": "Shin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Evelina", |
| "middle": [], |
| "last": "Bakhturina", |
| "suffix": "" |
| }, |
| { |
| "first": "Raul", |
| "middle": [], |
| "last": "Puri", |
| "suffix": "" |
| }, |
| { |
| "first": "Mostofa", |
| "middle": [], |
| "last": "Patwary", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Shoeybi", |
| "suffix": "" |
| }, |
| { |
| "first": "Raghav", |
| "middle": [], |
| "last": "Mani", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2010.06060" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hoo-Chang Shin, Yang Zhang, Evelina Bakhturina, Raul Puri, Mostofa Patwary, Mohammad Shoeybi, and Raghav Mani. 2020. Biomegatron: Larger biomedical domain language model. arXiv preprint arXiv:2010.06060.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Megatron-lm: Training multi-billion parameter language models using model parallelism", |
| "authors": [ |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Shoeybi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mostofa", |
| "middle": [], |
| "last": "Patwary", |
| "suffix": "" |
| }, |
| { |
| "first": "Raul", |
| "middle": [], |
| "last": "Puri", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Legresley", |
| "suffix": "" |
| }, |
| { |
| "first": "Jared", |
| "middle": [], |
| "last": "Casper", |
| "suffix": "" |
| }, |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Catanzaro", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.08053" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catan- zaro. 2019. Megatron-lm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:1909.08053.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Flava: A foundational language and vision alignment model", |
| "authors": [ |
| { |
| "first": "Amanpreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Ronghang", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Vedanuj", |
| "middle": [], |
| "last": "Goswami", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Couairon", |
| "suffix": "" |
| }, |
| { |
| "first": "Wojciech", |
| "middle": [], |
| "last": "Galuba", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcus", |
| "middle": [], |
| "last": "Rohrbach", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2112.04482" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 2021. Flava: A founda- tional language and vision alignment model. arXiv preprint arXiv:2112.04482.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Using deepspeed and megatron to train megatron-turing nlg 530b, a large-scale generative language model", |
| "authors": [ |
| { |
| "first": "Shaden", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Mostofa", |
| "middle": [], |
| "last": "Patwary", |
| "suffix": "" |
| }, |
| { |
| "first": "Brandon", |
| "middle": [], |
| "last": "Norick", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Legresley", |
| "suffix": "" |
| }, |
| { |
| "first": "Samyam", |
| "middle": [], |
| "last": "Rajbhandari", |
| "suffix": "" |
| }, |
| { |
| "first": "Jared", |
| "middle": [], |
| "last": "Casper", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrimai", |
| "middle": [], |
| "last": "Prabhumoye", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Zerveas", |
| "suffix": "" |
| }, |
| { |
| "first": "Vijay", |
| "middle": [], |
| "last": "Korthikanti", |
| "suffix": "" |
| } |
| ], |
| "year": 2022, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2201.11990" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shaden Smith, Mostofa Patwary, Brandon Norick, Patrick LeGresley, Samyam Rajbhandari, Jared Casper, Zhun Liu, Shrimai Prabhumoye, George Zerveas, Vijay Korthikanti, et al. 2022. Using deep- speed and megatron to train megatron-turing nlg 530b, a large-scale generative language model. arXiv preprint arXiv:2201.11990.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Roformer: Enhanced transformer with rotary position embedding", |
| "authors": [ |
| { |
| "first": "Jianlin", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shengfeng", |
| "middle": [], |
| "last": "Pan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yunfeng", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2104.09864" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jianlin Su, Yu Lu, Shengfeng Pan, Bo Wen, and Yun- feng Liu. 2021. Roformer: Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Microsoft academic graph: When experts are not enough", |
| "authors": [ |
| { |
| "first": "Kuansan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhihong", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Chiyuan", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chieh-Han", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuxiao", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Anshul", |
| "middle": [], |
| "last": "Kanakia", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Quantitative Science Studies", |
| "volume": "1", |
| "issue": "1", |
| "pages": "396--413", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kuansan Wang, Zhihong Shen, Chiyuan Huang, Chieh- Han Wu, Yuxiao Dong, and Anshul Kanakia. 2020a. Microsoft academic graph: When experts are not enough. Quantitative Science Studies, 1(1):396-413.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "CORD-19: The COVID-19 open research dataset", |
| "authors": [ |
| { |
| "first": "Lucy", |
| "middle": [ |
| "Lu" |
| ], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoganand", |
| "middle": [], |
| "last": "Chandrasekhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Russell", |
| "middle": [], |
| "last": "Reas", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiangjiang", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Doug", |
| "middle": [], |
| "last": "Burdick", |
| "suffix": "" |
| }, |
| { |
| "first": "Darrin", |
| "middle": [], |
| "last": "Eide", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathryn", |
| "middle": [], |
| "last": "Funk", |
| "suffix": "" |
| }, |
| { |
| "first": "Yannis", |
| "middle": [], |
| "last": "Katsis", |
| "suffix": "" |
| }, |
| { |
| "first": "Rodney", |
| "middle": [ |
| "Michael" |
| ], |
| "last": "Kinney", |
| "suffix": "" |
| }, |
| { |
| "first": "Yunyao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ziyang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Merrill", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| }, |
| { |
| "first": "Dewey", |
| "middle": [ |
| "A" |
| ], |
| "last": "Murdick", |
| "suffix": "" |
| }, |
| { |
| "first": "Devvret", |
| "middle": [], |
| "last": "Rishi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jerry", |
| "middle": [], |
| "last": "Sheehan", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhihong", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Brandon", |
| "middle": [], |
| "last": "Stilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [ |
| "D" |
| ], |
| "last": "Wade", |
| "suffix": "" |
| }, |
| { |
| "first": "Kuansan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the 1st Workshop on NLP for COVID-19 at ACL 2020, Online", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lucy Lu Wang, Kyle Lo, Yoganand Chandrasekhar, Russell Reas, Jiangjiang Yang, Doug Burdick, Darrin Eide, Kathryn Funk, Yannis Katsis, Rodney Michael Kinney, Yunyao Li, Ziyang Liu, William Merrill, Paul Mooney, Dewey A. Murdick, Devvret Rishi, Jerry Sheehan, Zhihong Shen, Brandon Stilson, Alex D. Wade, Kuansan Wang, Nancy Xin Ru Wang, Christopher Wilhelm, Boya Xie, Douglas M. Ray- mond, Daniel S. Weld, Oren Etzioni, and Sebastian Kohlmeier. 2020b. CORD-19: The COVID-19 open research dataset. In Proceedings of the 1st Work- shop on NLP for COVID-19 at ACL 2020, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "Crowdsourcing multiple choice science questions", |
| "authors": [ |
| { |
| "first": "Johannes", |
| "middle": [], |
| "last": "Welbl", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Nelson", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1707.06209" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Johannes Welbl, Nelson F Liu, and Matt Gardner. 2017. Crowdsourcing multiple choice science questions. arXiv preprint arXiv:1707.06209.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "Songfang Huang, and Fei Huang. 2021. Improving biomedical pretrained language models with knowledge", |
| "authors": [ |
| { |
| "first": "Zheng", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yijia", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chuanqi", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2104.10344" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zheng Yuan, Yijia Liu, Chuanqi Tan, Songfang Huang, and Fei Huang. 2021. Improving biomedical pre- trained language models with knowledge. arXiv preprint arXiv:2104.10344.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Aligning books and movies: Towards story-like visual explanations by watching movies and reading books", |
| "authors": [ |
| { |
| "first": "Yukun", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Kiros", |
| "suffix": "" |
| }, |
| { |
| "first": "Rich", |
| "middle": [], |
| "last": "Zemel", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Raquel", |
| "middle": [], |
| "last": "Urtasun", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Torralba", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanja", |
| "middle": [], |
| "last": "Fidler", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the IEEE international conference on computer vision", |
| "volume": "", |
| "issue": "", |
| "pages": "19--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhut- dinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE in- ternational conference on computer vision, pages 19-27.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Sources with <1% (upper right, black box) Figure 1: Summary of data source representation within the Combined A+F data sample. Coloring illustrates whether a data source contains peer reviewed (Blue), mixed (Purple), or not peer reviewed (Red) articles.", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Distribution of validation loss by model size: performance improves as the model size increases.", |
| "uris": null |
| }, |
| "FIGREF3": { |
| "num": null, |
| "type_str": "figure", |
| "text": "(a) GPU computation in #Floating Point Operations (b) GPU Memory Allocation Figure 4: GPU system performance during pretraining.", |
| "uris": null |
| }, |
| "TABREF0": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Model</td><td/><td>Data Source</td><td>Pretraining</td><td>Corpus</td><td>#Params (B)</td></tr><tr><td colspan=\"5\">Lee et al. 2020 Alsentzer et al. 2019 Peng et al. 2019 Liu et al. 2021 Guo et al. 2021 Chem(Rxn)BERT \u2020 Wiki + Books continual pretraining Chemistry Journals BioBERT Wiki + Books continual pretraining PubMed ClinicalBERT Wiki + Books continual pretraining MIMIC 1 BlueBERT Wiki + Books continual pretraining PubMed + MIMIC MATH-BERT Arxiv continual pretraining Arxiv Phan et al. 2021 SciFive C4 continual pretraining PubMed Naseem et al. 2021 BioALBERT Wiki + Books continual pretraining PMC + MIMIC-II Lewis et al. 2020 BioRoBERTa Wiki + Books continual pretraining PMC + MIMIC-III Yuan et al. 2021 KeBioLM PubMed continual pretraining PubMed + UMLS 2</td><td>0.11 0.11 0.11 0.11 0.11 0.22 0.77 0.02 0.30 0.34</td></tr><tr><td>Shin et al. 2020 Kanakarajan et al. 2021 Miolo et al. 2021 Beltagy et al. 2019 Liu et al. 2021 Gu et al. 2021</td><td>BioMegatron BioELECTRA ELECTRAMed SciBERT OAG-BERT PubMedBERT</td><td>PubMed PubMed PubMed PMC + CS OAG PubMed</td><td>from scratch continual pretraining from scratch from scratch from scratch from scratch from scratch</td><td>PubMed PubMed PubMed PMC + CS OAG PubMed</td><td>0.80 1.20 0.11 0.11 0.11 0.11 0.34</td></tr><tr><td colspan=\"2\">Our Work (autoregressive) \u2020</td><td>10+ sources (Chemistry)</td><td>from scratch continual pretraining</td><td>10+ sources (Chemistry)</td><td>1.47</td></tr></table>", |
| "text": "Foundation models for science focus on the biomedical, math, computer science and chemistry domains. We use \u2020 to indicate models trained for chemistry.", |
| "num": null |
| }, |
| "TABREF1": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"2\">We compare model configurations between GPT-NeoX and OpenAI's GPT-2. GPT-NeoX architecture is originally from GPT-3 (Brown et al., 2020)</td></tr><tr><td>Size S M L XL</td><td>Model GPT-NeoX 12 dL d dim d heads #Params (B) 768 12 0.18 GPT-2 12 768 12 GPT-NeoX 24 1024 16 0.40 GPT-2 24 1024 16 GPT-NeoX 24 1536 16 0.80 GPT-2 36 1280 20 GPT-NeoX 24 2048 16 1.47 GPT-2 48 1600 25</td></tr></table>", |
| "text": "Our model configurations: d L is the number of decoder layers, d dim is the hidden size of the model, d heads is the number of attention heads.", |
| "num": null |
| }, |
| "TABREF2": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Source MAG \u2329A\u232a Aminer \u2329A\u232a S2ORC \u2329A\u232a WoS \u2329A\u232a CORD-19 \u2329A\u232a OSTI \u2329A\u232a Arxiv \u2329A\u232a PubMed \u2329A\u232a PubMed \u2329FT\u232a CORE \u2329FT\u232a Combined \u2329A\u232a Combined \u2329FT\u232a Combined \u2329A+FT\u232a</td><td>#Articles (M) #Tokens (B) Size (Gb) 34.26 7.43 46 18.50 5.80 35 10.44 2.05 32 7.90 3.31 18 < 0.01 < 0.01 0.2 0.05 < 0.01 0.1 0.38 0.04 0.4 0.28 0.08 0.5 0.70 7.34 32 7.27 215.50 743 46.94 16.18 67 6.52 184.42 603 53.45 200.61 670</td></tr></table>", |
| "text": "Dataset statistics: combined datasets are after the de-duplication process. We split datasets to those that include abstracts \u2329A\u232a vs. full texts \u2329FT\u232a.", |
| "num": null |
| }, |
| "TABREF4": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Model</td><td>Size</td><td>BoolQ</td><td colspan=\"6\">CB WIC WSC MathQA PIQA PubMedQA</td><td>Lambada Wikitext</td></tr><tr><td colspan=\"2\">Baseline AMiner CORE MAG PubMed-F S2ORC WoS Combined-A Combined-F Combined-A+F XL S M L XL M \u2021 S M L XL S M L XL S M L XL S M L XL S M L XL S M L XL XL XL Combined-A+F XL (4x)</td><td colspan=\"2\">0.49 0.41 0.59 0.43 0.60 0.45 0.61 0.39 0.62 0.34 0.41 0.39 0.40 0.39 0.61 0.48 0.50 0.39 0.62 0.41 0.62 0.41 0.61 0.41 0.61 0.38 0.41 0.23 0.38 0.07 0.51 0.14 0.40 0.11 0.58 0.41 0.61 0.39 0.57 0.41 0.60 0.41 0.38 0.41 0.38 0.43 0.38 0.46 0.38 0.50 0.38 0.39 0.38 0.45 0.41 0.36 0.57 0.34 0.56 0.16 0.62 0.38 0.61 0.41 0.61 0.41</td><td>0.49 0.50 0.50 0.50 0.50 0.50 0.51 0.50 0.50 0.50 0.50 0.50 0.50 0.50 0.50 0.50 0.51 0.50 0.50 0.50 0.50 0.50 0.50 0.50 0.50 0.50 0.50 0.47 0.50 0.50 0.50 0.50 0.50</td><td>0.43 0.40 0.46 0.50 0.36 0.44 0.41 0.47 0.37 0.37 0.37 0.37 0.37 0.40 0.37 0.35 0.62 0.45 0.38 0.38 0.39 0.63 0.63 0.63 0.63 0.63 0.63 0.54 0.37 0.37 0.37 0.39 0.37</td><td>0.21 0.23 0.23 0.24 0.20 0.22 0.21 0.22 0.21 0.20 0.21 0.21 0.22 0.21 0.21 0.22 0.22 0.21 0.20 0.21 0.22 0.20 0.22 0.21 0.20 0.21 0.19 0.21 0.20 0.21 0.22 0.23 0.24</td><td>0.63 0.68 0.70 0.71 0.55 0.56 0.57 0.58 0.58 0.55 0.56 0.57 0.58 0.56 0.57 0.59 0.59 0.57 0.58 0.59 0.59 0.57 0.56 0.56 0.56 0.55 0.54 0.56 0.55 0.60 0.57 0.59 0.60</td><td>0.44 0.53 0.54 0.59 0.55 0.46 0.43 0.36 0.43 0.55 0.55 0.51 0.45 0.43 0.41 0.39 0.34 0.54 0.49 0.42 0.49 0.34 0.34 0.34 0.33 0.34 0.34 0.42 0.56 0.50 0.55 0.48 0.56</td><td>40.06 18.25 12.97 10.63 2834.51 2825.84 1802.35 661.81 786.22 671.43 273.06 173.15 79.95 1142.83 628.72 282.39 364.54 2670.39 1742.00 843.83 679.80 122739.30 80151.10 89136.68 107065.48 140552.69 182967.37 148609.73 192970.64 250.88 72.50 71.43 30.40</td><td>37.37 26.75 22.61 20.38 126.55 158.85 116.93 87.23 91.28 100.53 77.96 69.62 50.47 118.40 91.36 67.74 70.71 148.88 119.74 95.75 90.38 403.48 330.56 327.53 351.81 556.00 498.36 480.91 509.06 61.07 48.96 48.65 33.05</td></tr></table>", |
| "text": "Downstream Out-of-domain Task Performance. We use \u2021 to indicate the baseline model tuned from the base GPT-2 model. Performance on Lambada and Wikitext is reported using perplexity, all other tasks report accuracy . Top-4 performance highlighted in bold, with best performance indicated with underlines. XL (4x) model is trained with 4x larger batch size that used in other models.", |
| "num": null |
| } |
| } |
| } |
| } |