| { |
| "paper_id": "U19-1022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:07:45.820963Z" |
| }, |
| "title": "Investigating the Effect of Lexical Segmentation in Transformer-based Models on Medical Datasets", |
| "authors": [ |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Australian National University CSIRO Data61", |
| "location": {} |
| }, |
| "email": "vincent.nguyen@anu.edu.au" |
| }, |
| { |
| "first": "Sarvnaz", |
| "middle": [], |
| "last": "Karimi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "CSIRO", |
| "location": { |
| "addrLine": "Data61 Sydney", |
| "country": "Australia" |
| } |
| }, |
| "email": "sarvnaz.karimi@csiro.au" |
| }, |
| { |
| "first": "Zhenchang", |
| "middle": [], |
| "last": "Xing", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Australian National University", |
| "location": { |
| "settlement": "Canberra", |
| "country": "Australia" |
| } |
| }, |
| "email": "zhenchang.xing@anu.edu.au" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Transformer-based models have been popular recently and have improved performance for many Natural Language Processing (NLP) Tasks, including those in the biomedical field. Previous research suggests that, when using these models, an in-domain vocabulary is more suitable than using an open-domain vocabulary. We investigate the effects of a specialised in-domain vocabulary trained from scratch on a biomedical corpus. Our research suggests that, although the in-domain vocabulary is useful, it is usually constrained by the corpora size because these models needs to be trained from scratch. Instead, it is more useful to have more data, perform additional pretraining steps with a corpus-specific vocabulary. 1", |
| "pdf_parse": { |
| "paper_id": "U19-1022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Transformer-based models have been popular recently and have improved performance for many Natural Language Processing (NLP) Tasks, including those in the biomedical field. Previous research suggests that, when using these models, an in-domain vocabulary is more suitable than using an open-domain vocabulary. We investigate the effects of a specialised in-domain vocabulary trained from scratch on a biomedical corpus. Our research suggests that, although the in-domain vocabulary is useful, it is usually constrained by the corpora size because these models needs to be trained from scratch. Instead, it is more useful to have more data, perform additional pretraining steps with a corpus-specific vocabulary. 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In the natural language processing domain, there is a requirement for a fixed-sized vocabulary during training which could lead to Out-Of-Vocabulary (OOV) problem (Luong et al., 2015) . This problem is when the fixed vocabulary model encounters an unseen word during inference, and the model is unable to handle it appropriately. Word-Piece tokenisation, initially used in machine translation systems (Wu et al., 2016) , has been widely successful in addressing the OOV problem by segmenting unseen words into word pieces as a representation for the unknown word. Previous research has either replaced unseen words with a special token (Luong et al., 2015) , used character word embeddings (Labeau and Allauzen, 2017) as a fall-back, or ignored these words completely. These techniques have shortcomings as they do not attempt to represent the unseen word or require additional processing and memory as with character embeddings. WordPiece tokenisation is WordPiece: arthralgias \u2192 art-hra-al-gia-s Ideal: arthralgias \u2192 arthr-algias arthr-means joints, -algias means pain a trade-off, where there is no need for special handling of out-of-vocabulary, as unseen words are segmented into sub-word units. It allows a limited vocabulary to represent an infinitely sized vocabulary space.", |
| "cite_spans": [ |
| { |
| "start": 163, |
| "end": 183, |
| "text": "(Luong et al., 2015)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 401, |
| "end": 418, |
| "text": "(Wu et al., 2016)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 636, |
| "end": 656, |
| "text": "(Luong et al., 2015)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 690, |
| "end": 717, |
| "text": "(Labeau and Allauzen, 2017)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Models that successfully use WordPiece tokenisation include the transformer-based architectures: BERT (Devlin et al., 2019) , RoBERTa (Liu et al., 2019) , and XLNet (Yang et al., 2019) . BERT uses WordPieces as morphemes to aid the contextual representation of words. BERT performs at a state-of-the-art level on the GLUE tasks due to its ability to fine-tune specifically to each task. Given this success, the model has also been applied to the biomedical domain through models such as BioBERT , which applies additional pretraining on the MEDLINE and PubMed corpora for biomedical text representation. However, these BioBERT models sometimes do not perform well on biomedical tasks, and in some instances are even worse than vanilla BERT (Zhu et al., 2019; Peng et al., 2019; Nguyen et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 123, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 134, |
| "end": 152, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 165, |
| "end": 184, |
| "text": "(Yang et al., 2019)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 740, |
| "end": 758, |
| "text": "(Zhu et al., 2019;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 759, |
| "end": 777, |
| "text": "Peng et al., 2019;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 778, |
| "end": 798, |
| "text": "Nguyen et al., 2019)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We hypothesise that a reason for this failure could be due to the vocabulary limitation of BioBERT, where the authors keep the opendomain vocabulary of BERT. This is problematic because the original BERT vocabulary is not suited for the biomedical domain due to the lack of medical suffixes and prefixes in its vocabulary leading to incorrect segmentation of the words (see the example in Figure 1 ). This is important because the suffixes and prefixes (the morphemes) in medical terminology carry distinct meanings, and almost the entire medical vocabulary can be constructed from prefix and suffix combinations (Stanfield et al., 2008) . Thus, we aim to validate the importance of having the additional biomedical vocabulary for downstream tasks.", |
| "cite_spans": [ |
| { |
| "start": 613, |
| "end": 637, |
| "text": "(Stanfield et al., 2008)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 389, |
| "end": 397, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Most of biomedical natural language processing is adapted from open-domain state-of-the-art techniques, from word embeddings (Chiu et al., 2016) , to BiLSTM-CRF (Kalyan and Sangeetha, 2019) . BERT is a deeply bidirectional encoder that is based on the transformer architecture. It uses self-attention as a mechanism of encoding input contextually by attending to different aspects of the sentence using multi-headed selfattention head, passed through layer normalisation and a Multi-Layer Perceptrons (MLP). The BERT model has been successful in the open-domain as it scored State-of-the-Art (SOTA) performance on the SQUAD (Rajpurkar et al., 2016) and GLUE datasets because it addresses the polysemy problem (Molla and Gonzlez, 2007) , through contextual clues, for richer representations.", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 144, |
| "text": "(Chiu et al., 2016)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 161, |
| "end": 189, |
| "text": "(Kalyan and Sangeetha, 2019)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 624, |
| "end": 648, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 709, |
| "end": 734, |
| "text": "(Molla and Gonzlez, 2007)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "However, it was realised that directly applying the model to a closed domain can be problematic due to two factors: (1) The BERT model is trained on Wikipedia and BookCorpus meaning that the internal representations for specialised words may not be properly learned for a specialised domain; and, (2) The internal vocabulary that BERT has learned is suitable for tasks in the open-domain and a separate or additional vocabulary is needed .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To address the first problem, BioBERT takes the original BERT model and performs additional pretraining steps on academic biomedical literature, PubMed and MEDLINE, to improve downstream medical tasks. However, BioBERT does not change the open-domain vocabulary to a medically-focused one. Furthermore, the language in academic corpora is different to clinical text and patient language. These issues resulted in lower than expected performance on biomedical datasets, such the MEDIQA (Ben Abacha et al., 2019; Nguyen et al., 2019) and in some cases worse than the original BERT models (Zhu et al., 2019) from which they were trained from.", |
| "cite_spans": [ |
| { |
| "start": 490, |
| "end": 510, |
| "text": "Abacha et al., 2019;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 511, |
| "end": 531, |
| "text": "Nguyen et al., 2019)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 586, |
| "end": 604, |
| "text": "(Zhu et al., 2019)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Alleviating the dataset problem, Clinical BERT (Alsentzer et al., 2019) , performs further pretraining steps for the BioBERT and BERT models on domain-specific corpora showing marked improvements on downstream clinical tasks. However, they did not change the internal vocabulary of the models as this would require training the models from scratch which may limit performance.", |
| "cite_spans": [ |
| { |
| "start": 47, |
| "end": 71, |
| "text": "(Alsentzer et al., 2019)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Addressing both the vocabulary and the dataset problem, SciBERT was trained from scratch on the Semantic Scholar corpus and a specialised SentencePiece vocabulary trained on the corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our paper is a first look empirical study into the effect of vocabulary and dataset in applying BERT-based models to downstream tasks. Although our study is limited in scope, it still explores an important problem and our research suggests that some of previous studies may have drawn incorrect conclusions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The original WordPiece algorithm addresses the OOV problem and handles arbitrary sequences of characters found on the web. This algorithm greedily maximises the likelihood of the vocabulary over the training data. The algorithm is similar to the byte-pair encoding algorithm, which uses frequency rather than likelihood to train the model. By using word pieces, the tokenisation procedure can break down OOV words into their word subunits. For instance, jumped can be broken down into jump ##ed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sub-word Models", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The SentencePiece algorithm (Kudo and Richardson, 2018) is similar to WordPiece except that it performs direct training from raw sentences with language independence. It treats all sentences as a sequence of Unicode characters without a special reliance on spaces, allowing for reliable multi-lingual de-tokenisation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sub-word Models", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We propose vocabulary adaption to investigate the segmentation problems for medical text in BERT models and their variants. We propose two different methods to achieve this: (1) Adding additional vocabulary from a common medical vocabulary of suffixes and prefixes 2 to the existing BERT vocabulary and perform additional pretraining steps; and, (2) Training a separate Senten-cePiece tokeniser and pretraining a BERT model from scratch on the medical corpus. We compare these methods against BERT, BioBERT and SciB-ERT models on downstream medical tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We use PubMed Central (PMC) 3 corpus for pretraining our BERT Model. It consists of two million articles, 300 million sentences and one billion tokens at the time of writing. Note that we use the full text of the articles, not just the abstracts, as this was shown to be effective in SciBERT (Beltagy et al., 2019). For fine-tuning, we select the MEDIQA datasets (Ben Abacha et al., 2019) which contains three tasks: MEDical Natural Language Inference (MEDNLI) , Recognising Question Entailment (RQE) (Abacha and Demner-Fushman, 2016) , and Question Answering (QA).", |
| "cite_spans": [ |
| { |
| "start": 501, |
| "end": 534, |
| "text": "(Abacha and Demner-Fushman, 2016)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In order to comply with BERT's formatting for pretraining, for tokenisation and sentence segmentation, we use ScispaCy (Neumann et al., 2019) , with a biomedical model (en core sci sm) for its speed and ability to parse biomedical data. We then train a SentencePiece model with a fixed vocabulary size of 32,000 on a subset of 20 million PubMed text articles to extract a vocabulary that maximises likelihood over the dataset. We then adapt the SentencePiece vocabulary to be compatible with BERT by pruning ' ' characters, replacing them with '##' and removing start and end of sequence tokens.", |
| "cite_spans": [ |
| { |
| "start": 119, |
| "end": 141, |
| "text": "(Neumann et al., 2019)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "3 PubMed Central Dump", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Due to the large size of PMC and time and computing resources limitation, we randomly select a subset of 60 million sentences for pretraining. We use the default settings for pretraining the BERT model as described in the original paper. We also use the same pretraining schedule as the original BERT implementation where the model is first trained on a sequence length of 128, which we call the intermediate model, until convergence before being trained on a sequence length of 512, the final model. We set the learning rate of 1e-4 for the SentencePiece model as this is being trained from scratch and 2e-5 for the Medical Vocabulary model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pretraining", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "After pretraining, we fine-tune our model to each task in the dataset. We use a learning rate of 5e-5 for five epochs. We also use a fixed seed of 42 for all libraries. We train our model on the official training data and report our results on the development and test sets of each task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fine-tuning", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We fine-tune 12 models to three separate tasks and evaluate on both the development and the test sets due to distribution mismatch (the test sets were made much later than the original training/development sets). We fine-tuned the BERT base model plus medical vocabulary with the models pretrained for 10k, 90k (intermediate), 100k (final) steps and a final model without the medical vocabulary. Similarly, we pretrain the BERT model with a PubMed SentencePiece vocabulary on models for 90k (intermediate) and 100k steps (final). We fine-tune all the BioBERT models, where all v1.0 models are trained on abstracts of a specific corpus (e.g., Pubmed or PMC), and the v1.1 model is trained on the full-text corpus. We also fine-tune the SciBERT models with BERT base vocabulary (BaseVocab) and Semantic Scholar SentencePiece vocabulary (SciVocab). Finally, we fine-tune our baseline (BERT base). We report our results in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 919, |
| "end": 926, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Fine-tuning", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Overall, we found that fine-tuned models , with the exception of our SentencePiece model and Medical Vocab model for QA, outperformed the BERT base baseline.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For the NLI task, the SentencePiece models and the Medical Vocab (final) model performed worse on the development set, however the Medical Vocab (final) -Medical Vocab model performed best on the test set. All other models performed scored higher than the BERT base model. The BioBERT models, on average, performed best here as the task involved inference from a medical sentence (a clinical note) to a normalised sentence (summary).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "On the RQE dataset, all models performed reasonably on the development set, with the PubMED models scoring the best, with the exception of the final SentencePiece model as the task required interpretation of patient language in addition to academic. However, all models performed poorly on the test set, with no model scoring higher than random guess due to a marginal distribution mismatch between the training, development sets against the test set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "On the QA dataset, the task involved interpreting a patient's naturally formed question to a medical answer from medical articles. Here, BioBERT performed the best on the test set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In summary, all models performed similarly with only mild discrepancies which we discuss in the following section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We find that the SciVocab model performed better than the BaseVocab model (see Table 1 , rows 11-12). BaseVocab is trained similarly to our medical vocab model where BERT base was finetuned with additional data before further tuned to a downstream task. The reason the SciVocab model performed better is that it had learned better representations during the training phase while the Ba-seVocab model learned noisier representations due to a vocabulary mismatch between the Semantic Scholar dataset and the BERT vocabulary. However, the SciVocab may not be as beneficial due to the academic nature of the vocabulary as the MEDIQA contains a mix of both academic medical terminology and natural patient questions.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 79, |
| "end": 86, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "SciVocab versus BaseVocab", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The BioBERT and SciBERT models are both pretrained/tuned on academic biomedical literature. However, there are two keys differences to note, SciBERT is trained from scratch as it is not possible to completely alter the BERT vocabulary while maintaining the original weights. We found that, contrary to previous research (Zhu et al., 2019) , citing a development accuracy of 43% (RQE) and 68% (NLI), the BioBERT models performed better on the development and test sets of the MEDIQA datasets. We attribute BioBERT's strength to the fact that it was fine-tuned rather than trained from scratch, and thus incorporates both open-domain and biomedical-domain knowledge. Further evaluation with a purely biomedical reasoning task such as clinical term extraction (Si et al., 2019) may be suitable for further comparison.", |
| "cite_spans": [ |
| { |
| "start": 320, |
| "end": 338, |
| "text": "(Zhu et al., 2019)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 757, |
| "end": 774, |
| "text": "(Si et al., 2019)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BioBERT versus SciBERT SciVocab", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We found that the BioBERT models performed better than previously reported and that the size of corpus matters in the performance of the model as the full-text corpus model is generally better.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BioBERT versus SciBERT SciVocab", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We found that, on two of the tasks, the medical vocab model performed better due to the nature of the task. The SentencePiece vocab is adapted only for the PMC corpus, which is academically written without misspellings or colloquialism, in contrast with the datasets. That is, having a corpus specific vocabulary might not be sufficient even within the same domain due to the different nature of writing styles; academic and general audience. Furthermore, we found that the SentencePiece vocab do not contain all the punctuation tokens, which further hurts performance when it comes to understanding questions as '?' is replaced with 'unk'.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Medical versus SentencePiece Vocab", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Consistent with SciBERT and BaseVocab vocabulary overlap, there was a 40% overlap in vocabulary between BERT base vocabulary and the PMC SentencePiece vocabulary, highlighting the vocabulary mismatch between two corpora. Also, there is a 4% overlap in the added medical suffix/prefix vocabulary and the SentencePiece vocabulary suggesting that the PMC corpus was likely not training the representations of the added prefix and suffix tokens correctly because they do not appear frequently enough. Finally, due to the relatively smaller size of pretraining dataset compared with all the other models, the Sentence-Piece model most likely overfit as the performance across all datasets worsened with more training steps (see Table 1 , rows 6-7).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 723, |
| "end": 730, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Medical versus SentencePiece Vocab", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "However, the training with the PMC corpus allowed for better adaption to the downstream tasks. Our models did not perform as well as BioBERT as they are trained on a smaller subset than the original models. We also find that the intermediate SentencePiece model performs better than the final model, and this is because the downstream task had only short sequences, introducing noise and overfitting. The medical vocab model, rather than the SentencePiece model, is more robust against this noise as it is not trained from scratch.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Medical versus SentencePiece Vocab", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Although trained similarly, all the BioBERT models outperformed our pretrained models across all datasets. For a direct comparison, we compare BioBERT v1.1 PubMED as this shares the same dataset and pretraining procedures. The only notable differences between the BioBERT model and ours is that: 1) 3% of the Medical vocab model is augmented with medical suffix/prefix and 2) We trained only a subset of PubMED on the Medical vocab model. We do a preliminary test by removing additional vocabulary (Table 1, row 4) in our model for a comparison against dataset size. We saw that the performance of the model increased slightly on average, leading to the conclusion that the extra vocabulary was hurting performance as they were not well trained. Overall, we also find that the accuracy is still lower than the BioBERT model, suggesting that additional dataset size is crucial to achieving a better performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BioBERT versus Medical Vocab", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "In all the models, although vocabulary helps (e.g., SciVocab vs. BaseVocab), this effect is limited to the pretraining phase when learning representations, but when applying to a downstream task, it is more important to have additional corpus data that is suited to the downstream task. This effect is shown where SciBERT basevocab (fine-tuned from the BERT base model) performed better than the BERT base model. The additional corpus data is useful in the case of BioBERT vs. SciVocab as BioBERT is fine-tuned with additional data on top of the BookCorpus and Wikipedia datasets of the BERT model. We hypothesise that the best way to maximise all these effects is instead of fine-tuning from one corpus to the other, to combine both the opendomain and target domain corpora and pretrained the model from scratch with a well-tuned vocabulary. We leave this to future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of Corpus and Vocabulary", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "There are several limitations to our study which we leave as directions for future work: (1) we only trained on a subset of the PMC dataset for pretraining the Medical Vocab and SentencePiece models as it was computationally intensive to use the full set;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Limitations and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "(2) we only trained and evaluated on BERT base models. For a complete comparison we need to pretrain all the BioBERT models, SciBERT models, our models and also, for completeness, clinical BERT using the BERT large model, and then 3) we would need to train on datasets of varying sizes to see the effect of the corpus. Furthermore, investigation of character embeddings as a segmentation strategy over the use of wordpieces, avoiding the need for a vocabulary could be useful. However, this would require factorisation of the embedding space to reduce the computational cost of increased sequence length (Lan et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 604, |
| "end": 622, |
| "text": "(Lan et al., 2019)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Limitations and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Furthermore, empirically, we did not conduct a significance test due to the use of a fixed seed for all randomisation to emphasise reproducibility, however, in future, re-running each experiment without a fixed seed multiple times to produce reliable statistics is desirable in future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Limitations and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Previous research suggests that using opendomain vocabulary in BERT-based models affects downstream tasks compatibility and leads to a loss in effectiveness. However, our research suggests that this is not the case. An open-domain vocabulary is more useful than an in-domain vocabulary trained on less data, if it is additionally trained on an in-domain corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Our code is publicly available at Lexical-Segmentation-Transformer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "GlobalRPh Common Medical Suffixes and Prefixes (Accessed Nov 2019)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This research is supported by the Australian Research Training Program and the CSIRO Research Office Postgraduate Scholarship.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Recognizing Question Entailment for Medical Question Answering", |
| "authors": [ |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Abacha", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Demner-Fushman", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "American Medical Informatics Association Annual Symposium Proceedings", |
| "volume": "", |
| "issue": "", |
| "pages": "310--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ben Abacha and Demner-Fushman. 2016. Recogniz- ing Question Entailment for Medical Question An- swering. American Medical Informatics Association Annual Symposium Proceedings, 2016:310-318.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Publicly available clinical BERT embeddings", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Alsentzer", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Murphy", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Boag", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Hung", |
| "middle": [], |
| "last": "Weng", |
| "suffix": "" |
| }, |
| { |
| "first": "Di", |
| "middle": [], |
| "last": "Jindi", |
| "suffix": "" |
| }, |
| { |
| "first": "Tristan", |
| "middle": [], |
| "last": "Naumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Mcdermott", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2nd Clinical Natural Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "72--78", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-1909" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily Alsentzer, John Murphy, William Boag, Wei- Hung Weng, Di Jindi, Tristan Naumann, and Matthew McDermott. 2019. Publicly available clin- ical BERT embeddings. In Proceedings of the 2nd Clinical Natural Language Processing Workshop, pages 72-78, Minneapolis, Minnesota, USA. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "SciB-ERT: A pretrained language model for scientific text", |
| "authors": [ |
| { |
| "first": "Iz", |
| "middle": [], |
| "last": "Beltagy", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Arman", |
| "middle": [], |
| "last": "Cohan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3606--3611", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1371" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iz Beltagy, Kyle Lo, and Arman Cohan. 2019. SciB- ERT: A pretrained language model for scientific text. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 3606- 3611, Hong Kong, China. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Overview of the MEDIQA 2019 shared task on textual inference, question entailment and question answering", |
| "authors": [ |
| { |
| "first": "Asma", |
| "middle": [], |
| "last": "Ben Abacha", |
| "suffix": "" |
| }, |
| { |
| "first": "Chaitanya", |
| "middle": [], |
| "last": "Shivade", |
| "suffix": "" |
| }, |
| { |
| "first": "Dina", |
| "middle": [], |
| "last": "Demner-Fushman", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 18th BioNLP Workshop and Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "370--379", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5039" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Asma Ben Abacha, Chaitanya Shivade, and Dina Demner-Fushman. 2019. Overview of the MEDIQA 2019 shared task on textual inference, question en- tailment and question answering. In Proceedings of the 18th BioNLP Workshop and Shared Task, pages 370-379, Florence, Italy.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "How to train good word embeddings for biomedical NLP", |
| "authors": [ |
| { |
| "first": "Billy", |
| "middle": [], |
| "last": "Chiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Gamal", |
| "middle": [], |
| "last": "Crichton", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sampo", |
| "middle": [], |
| "last": "Pyysalo", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 15th Workshop on Biomedical Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "166--174", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W16-2922" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Billy Chiu, Gamal Crichton, Anna Korhonen, and Sampo Pyysalo. 2016. How to train good word em- beddings for biomedical NLP. In Proceedings of the 15th Workshop on Biomedical Natural Language Processing, pages 166-174, Berlin, Germany.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "MIMIC-III, a freely accessible critical care database", |
| "authors": [ |
| { |
| "first": "Alistair", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Pollard", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Li-Wei", |
| "middle": [], |
| "last": "Lehman", |
| "suffix": "" |
| }, |
| { |
| "first": "Mengling", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Ghassemi", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Moody", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Szolovits", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Celi", |
| "suffix": "" |
| }, |
| { |
| "first": "Roger", |
| "middle": [], |
| "last": "Mark", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Scientific Data", |
| "volume": "3", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1038/sdata.2016.35" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alistair Johnson, Tom Pollard, Lu Shen, Li-wei Lehman, Mengling Feng, Mohammad Ghassemi, Benjamin Moody, Peter Szolovits, Anthony Celi, and Roger Mark. 2016. MIMIC-III, a freely accessi- ble critical care database. Scientific Data, 3:160035.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "SECNLP: A survey of embeddings in clinical natural language processing", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Kalyan", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Sangeetha", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K.S Kalyan and S. Sangeetha. 2019. SECNLP: A survey of embeddings in clinical natural lan- guage processing. Computing Research Repository, abs/1903.01039.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Sentence-Piece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
| "authors": [ |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Richardson", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "66--71", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-2012" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taku Kudo and John Richardson. 2018. Sentence- Piece: A simple and language independent subword tokenizer and detokenizer for neural text processing. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 66-71, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Character and subword-based word representation for neural language modeling prediction", |
| "authors": [ |
| { |
| "first": "Matthieu", |
| "middle": [], |
| "last": "Labeau", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Allauzen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the First Workshop on Subword and Character Level Models in NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1--13", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-4101" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthieu Labeau and Alexandre Allauzen. 2017. Char- acter and subword-based word representation for neural language modeling prediction. In Proceed- ings of the First Workshop on Subword and Charac- ter Level Models in NLP, pages 1-13, Copenhagen, Denmark. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", |
| "authors": [ |
| { |
| "first": "Zhenzhong", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingda", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Piyush", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Soricut", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Computing Research Repository", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.11942" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. ALBERT: A Lite BERT for Self-supervised Learning of Language Representations. Computing Research Repository, arXiv:1909.11942.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "BioBERT: a pre-trained biomedical language representation model for biomedical text mining", |
| "authors": [ |
| { |
| "first": "Jinhyuk", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Wonjin", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungdong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Donghyeon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunkyu", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Chan", |
| "middle": [], |
| "last": "Ho So", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaewoo", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Bioinformatics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/bioinformatics/btz682" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2019. BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "RoBERTa: A robustly optimized BERT pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A robustly optimized BERT pretrain- ing approach. Computing Research Repository, abs/1907.11692.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Addressing the rare word problem in neural machine translation", |
| "authors": [ |
| { |
| "first": "Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Wojciech", |
| "middle": [], |
| "last": "Zaremba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "11--19", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/P15-1002" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thang Luong, Ilya Sutskever, Quoc Le, Oriol Vinyals, and Wojciech Zaremba. 2015. Addressing the rare word problem in neural machine translation. In Pro- ceedings of the 53rd Annual Meeting of the Associ- ation for Computational Linguistics and the 7th In- ternational Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 11-19, Beijing, China. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Question answering in restricted domains: An overview", |
| "authors": [ |
| { |
| "first": "Diego", |
| "middle": [], |
| "last": "Molla", |
| "suffix": "" |
| }, |
| { |
| "first": "Jos", |
| "middle": [], |
| "last": "Gonzlez", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Computational Linguistics", |
| "volume": "33", |
| "issue": "", |
| "pages": "41--61", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/coli.2007.33.1.41" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diego Molla and Jos Gonzlez. 2007. Question answer- ing in restricted domains: An overview. Computa- tional Linguistics, 33:41-61.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "ScispaCy: Fast and robust models for biomedical natural language processing", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "King", |
| "suffix": "" |
| }, |
| { |
| "first": "Iz", |
| "middle": [], |
| "last": "Beltagy", |
| "suffix": "" |
| }, |
| { |
| "first": "Waleed", |
| "middle": [], |
| "last": "Ammar", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 18th BioNLP Workshop and Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "319--327", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5034" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Neumann, Daniel King, Iz Beltagy, and Waleed Ammar. 2019. ScispaCy: Fast and robust models for biomedical natural language processing. In Pro- ceedings of the 18th BioNLP Workshop and Shared Task, pages 319-327, Florence, Italy.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "ANU-CSIRO at MEDIQA 2019: Question answering using deep contextual knowledge", |
| "authors": [ |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarvnaz", |
| "middle": [], |
| "last": "Karimi", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhenchang", |
| "middle": [], |
| "last": "Xing", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 18th BioNLP Workshop and Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "478--487", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5051" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vincent Nguyen, Sarvnaz Karimi, and Zhenchang Xing. 2019. ANU-CSIRO at MEDIQA 2019: Ques- tion answering using deep contextual knowledge. In Proceedings of the 18th BioNLP Workshop and Shared Task, pages 478-487, Florence, Italy.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Transfer learning in biomedical natural language processing: An evaluation of BERT and ELMo on ten benchmarking datasets", |
| "authors": [ |
| { |
| "first": "Yifan", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Shankai", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyong", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Workshop on Biomedical Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "58--65", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yifan Peng, Shankai Yan, and Zhiyong Lu. 2019. Transfer learning in biomedical natural language processing: An evaluation of BERT and ELMo on ten benchmarking datasets. In Proceedings of the Workshop on Biomedical Natural Language Pro- cessing, pages 58-65, Florence, Italy.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Squad: 100, 000+ questions for machine comprehension of text. Computing Research Repository", |
| "authors": [ |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Rajpurkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Konstantin", |
| "middle": [], |
| "last": "Lopyrev", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100, 000+ questions for machine comprehension of text. Computing Re- search Repository, abs/1606.05250.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Enhancing clinical concept extraction with contextual embedding", |
| "authors": [ |
| { |
| "first": "Yuqi", |
| "middle": [], |
| "last": "Si", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingqi", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kirk", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Computing Research Repository", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuqi Si, Jingqi Wang, Hua Xu, and Kirk Roberts. 2019. Enhancing clinical concept extraction with contex- tual embedding. Computing Research Repository, abs/1902.08691.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanpreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [ |
| "R" |
| ], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "the Proceedings of International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "353--355", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019. GLUE: A multi-task benchmark and analysis plat- form for natural language understanding. In In the Proceedings of International Conference on Learn- ing Representations, pages 353-355, Brussels, Bel- gium.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation. Computing Research Repository", |
| "authors": [ |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Norouzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Qin", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Klaus", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Klingner", |
| "suffix": "" |
| }, |
| { |
| "first": "Apurva", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| }, |
| { |
| "first": "Melvin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaobing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Gouws", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshikiyo", |
| "middle": [], |
| "last": "Kato", |
| "suffix": "" |
| }, |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideto", |
| "middle": [], |
| "last": "Kazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Keith", |
| "middle": [], |
| "last": "Stevens", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Kurian", |
| "suffix": "" |
| }, |
| { |
| "first": "Nishant", |
| "middle": [], |
| "last": "Patil", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1609.08144" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, \u0141ukasz Kaiser, Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2016. Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Trans- lation. Computing Research Repository, page arXiv:1609.08144.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "XLNet: Generalized Autoregressive Pretraining for Language Understanding. Computing Research Repository", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.08237" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Ruslan Salakhutdinov, and Quoc V. Le. 2019. XLNet: Generalized Autoregressive Pretrain- ing for Language Understanding. Computing Re- search Repository, page arXiv:1906.08237.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "PANLP at MEDIQA 2019: Pre-trained language models, transfer learning and knowledge distillation", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaofeng", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Keqiang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xun", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiepeng", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Ni", |
| "suffix": "" |
| }, |
| { |
| "first": "Guotong", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 18th BioNLP Workshop and Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "380--388", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5040" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Zhu, Xiaofeng Zhou, Keqiang Wang, Xun Luo, Xiepeng Li, Yuan Ni, and Guotong Xie. 2019. PANLP at MEDIQA 2019: Pre-trained language models, transfer learning and knowledge distillation. In Proceedings of the 18th BioNLP Workshop and Shared Task, pages 380-388, Florence, Italy.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "Word segmentation in WordPiece and the ideal segmentation using medical morphemes.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF1": { |
| "text": "Comparing accuracy of all models in three tasks using the MEDIQA datasets.", |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |