| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:53:24.072775Z" |
| }, |
| "title": "Learning Entity-Likeness with Multiple Approximate Matches for Biomedical NER", |
| "authors": [ |
| { |
| "first": "An", |
| "middle": [ |
| "Nguyen" |
| ], |
| "last": "Le", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Artificial Intelligence Laboratory", |
| "institution": "Nakahara Ward", |
| "location": { |
| "addrLine": "Fujitsu Ltd. 4 Chome-1-1 Kamikodanaka", |
| "settlement": "Kawasaki", |
| "region": "Kanagawa", |
| "country": "Japan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hajime", |
| "middle": [], |
| "last": "Morita", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Artificial Intelligence Laboratory", |
| "institution": "Nakahara Ward", |
| "location": { |
| "addrLine": "Fujitsu Ltd. 4 Chome-1-1 Kamikodanaka", |
| "settlement": "Kawasaki", |
| "region": "Kanagawa", |
| "country": "Japan" |
| } |
| }, |
| "email": "hmorita@fujitsu.com" |
| }, |
| { |
| "first": "Tomoya", |
| "middle": [], |
| "last": "Iwakura", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Artificial Intelligence Laboratory", |
| "institution": "Nakahara Ward", |
| "location": { |
| "addrLine": "Fujitsu Ltd. 4 Chome-1-1 Kamikodanaka", |
| "settlement": "Kawasaki", |
| "region": "Kanagawa", |
| "country": "Japan" |
| } |
| }, |
| "email": "iwakura.tomoya@fujitsu.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Biomedical Named Entities are complex, so approximate matching has been used to improve entity coverage. However, the usual approximate matching approach fetches only one matching result, which is often noisy. In this work, we propose a method for biomedical NER that fetches multiple approximate matches for a given phrase to leverage their variations to estimate entity-likeness. The model uses pooling to discard the unnecessary information from the noisy matching results, and learn the entity-likeness of the phrase with multiple approximate matches. Experimental results on three benchmark datasets from the biomedical domain, BC2GM, NCBI-disease, and BC4CHEMD, demonstrate the effectiveness. Our model improves the average Fmeasures by up to 0.21 percentage points compared to a BioBERT-based NER.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Biomedical Named Entities are complex, so approximate matching has been used to improve entity coverage. However, the usual approximate matching approach fetches only one matching result, which is often noisy. In this work, we propose a method for biomedical NER that fetches multiple approximate matches for a given phrase to leverage their variations to estimate entity-likeness. The model uses pooling to discard the unnecessary information from the noisy matching results, and learn the entity-likeness of the phrase with multiple approximate matches. Experimental results on three benchmark datasets from the biomedical domain, BC2GM, NCBI-disease, and BC4CHEMD, demonstrate the effectiveness. Our model improves the average Fmeasures by up to 0.21 percentage points compared to a BioBERT-based NER.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In the biomedical field, obtaining labelled data is very costly. Biomedical Named Entities (NEs) are complex and new NEs are continuously increasing in significant numbers, leading to unknown-word issues in Biomedical Named Entity Recognition (BioNER) tasks. One reason why biomedical NEs are complex is that they have many variations with the interchangeability of Roman numbers and Latin characters, spaces and hyphens, etc. The number of new biomedical research papers is increasing, wherein approximately two papers per minute, resulting in more than 1 million papers each year, are added to the PubMed database (Landhuis, 2016) . With this number of publications, new NEs are constantly being reported.", |
| "cite_spans": [ |
| { |
| "start": 616, |
| "end": 632, |
| "text": "(Landhuis, 2016)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In the last few years, NER using pre-trained language models (LMs), such as BERT (Devlin et al., 2018) , ELMo (Peters et al., 2018) , and Flair (Akbik et al., 2019) , has shown state-of-the-art performance. In the biomedical domain, pre-trained LMs such as BioBERT (Lee et al., 2019a) and BioELMo , which are BERT and ELMo trained on a biomedical domain text, have achieved the state-of-the-art performance in many biomedical natural language processing tasks including NER. However, only using previously trained LMs cannot cover the continuously increasing new entities due to complex characteristics of biomedical NEs, lead to unknown words problem. Despite being used as approaches to avoid unknown words problem, subword segmentation (Sennrich et al., 2015; Kudo and Richardson, 2018) methods consider subwords represented as unique IDs, but not words or their synonyms. therefore, it is difficult for subword or character based LMs to cover biomedical NEs , which are complex and contain various of expression described in section 3. Moreover, LM pre-training is costly, time-consuming, and computationally expensive. Training BioBERT on biomedical corpora based on the BERT model requires 10 to 23 days on eight NVIDIA V100 GPUs (Lee et al., 2019a) .", |
| "cite_spans": [ |
| { |
| "start": 81, |
| "end": 102, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 110, |
| "end": 131, |
| "text": "(Peters et al., 2018)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 144, |
| "end": 164, |
| "text": "(Akbik et al., 2019)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 265, |
| "end": 284, |
| "text": "(Lee et al., 2019a)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 739, |
| "end": 762, |
| "text": "(Sennrich et al., 2015;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 763, |
| "end": 789, |
| "text": "Kudo and Richardson, 2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1236, |
| "end": 1255, |
| "text": "(Lee et al., 2019a)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To deal with the complex and continuously increasing entities, the use of dictionary-based approaches can be an effective approach in previous works (Collobert et al., 2011; Rijhwani et al., 2020) . In contrast to pre-training models, we can cover new NEs by adding entries to the dictionary, without needing time-consuming pre-training. There are two types of dictionary application methods: exact matching and approximate matching. Exact matching has been incorporated into neural NER (Collobert et al., 2011; Chiu and Nichols, 2016; Wu et al., 2018) and non-neural NER methods (Uchimoto et al., 2000) to improve accuracy.", |
| "cite_spans": [ |
| { |
| "start": 149, |
| "end": 173, |
| "text": "(Collobert et al., 2011;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 174, |
| "end": 196, |
| "text": "Rijhwani et al., 2020)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 487, |
| "end": 511, |
| "text": "(Collobert et al., 2011;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 512, |
| "end": 535, |
| "text": "Chiu and Nichols, 2016;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 536, |
| "end": 552, |
| "text": "Wu et al., 2018)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 580, |
| "end": 603, |
| "text": "(Uchimoto et al., 2000)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Exact matching cannot totally cover all of the complex and newly-created NEs. In the biomedical domain, new NEs are created by modifying the endings of the existing one. For example, the new gene TAAR7P was named by modifying the ending of the existing gene TAAR8. To improve the coverage of entities, approximate matching has been used to manage new NEs in non-neural NER (Cohen and Sarawagi, 2004) . However, the approximate matching approach fetches only one matching result, which cannot cover all variations of NEs. For example, NEs \"Type-1 angiotensin II receptorassociated protein\" have many variations such as \"Type-1 angiotensin II receptor associated protein\", \"Type-1 angiotensin 2 receptor associated protein\", and \"Type 1 angiotensin II receptor-associated protein\". Also, approximate matching results are often noisy.", |
| "cite_spans": [ |
| { |
| "start": 373, |
| "end": 399, |
| "text": "(Cohen and Sarawagi, 2004)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we propose a method to improve neural BioNER by learning the entity-likeness of a given input sentence using multiple approximate matches of the input sentence with a dictionary. We define the entity-likeness as the degree to which a certain input sentence is likely to appear in the dictionary. It is estimated from matching results between the input sentence and entities in the dictionary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We evaluated our method with three biomedical domain benchmarks, i.e., BC2GM, NCBI-disease, and BC4CHEMD dataset. The experimental results show the effectiveness of our approach. It improves F-measures by up to +0.21 points on the biomedical benchmark, and +2.2 points when probing the biomedical ELMo , which is a recent state-of-the-art pre-training method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "For the NER task, previous studies have examined the application of dictionaries in machine learning. Dictionary matching was employed in SVM-based NER (Ratinov and Roth, 2009) and partial matching computed by distance feature between a token and entity in dictionary was considered in semi-Markov extraction processes (Cohen and Sarawagi, 2004) .", |
| "cite_spans": [ |
| { |
| "start": 152, |
| "end": 176, |
| "text": "(Ratinov and Roth, 2009)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 319, |
| "end": 345, |
| "text": "(Cohen and Sarawagi, 2004)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Dictionary matching is also used in Neural NER approaches. Liu et al. added a pre-trained module that softly matches the gazetteers to the semi-Markov CRF-based segmental NER task. Soft matching of gazetteers is also used in the work of Rijhwani et al. (2020) for low-resource NER. Exact matching was used by Collobert et al. (2011) ; they use a network layer to map words of dictionary into feature vectors by a lookup table operation and train the features as input in their model. Chiu and Nichols proposed the use of the longest match-ing, including partial lexicon matching in neural networks. Each word vector has dimensions to express dictionary matching.", |
| "cite_spans": [ |
| { |
| "start": 237, |
| "end": 259, |
| "text": "Rijhwani et al. (2020)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 309, |
| "end": 332, |
| "text": "Collobert et al. (2011)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In the CRF-based sequence labeling model for NER, the clustering results of phrases in the search engine query logs were used as features by Lin and Wu (2009) . To improve word representation, a word embedding learning method that leverages information from relevant lexicons to phrase embedding was proposed by Passos et al. (2014) . Handcrafting features obtained from gazetteers were also incorporated to model additional information in the named entity (Wu et al., 2018; .", |
| "cite_spans": [ |
| { |
| "start": 141, |
| "end": 158, |
| "text": "Lin and Wu (2009)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 312, |
| "end": 332, |
| "text": "Passos et al. (2014)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 457, |
| "end": 474, |
| "text": "(Wu et al., 2018;", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Related to approaches employing approximate string matching in Biomedical NER, Tsuruoka and Tsujii proposed a method to recognize entity candidates by approximate searching and filtering out false positives using a binary classifier. Yang et al. used approximate string matching and added preand post-keywords for each bio-entity name to expand the coverage of the dictionary. Xu et al. constructed a dictionary attention layer to incorporate exact dictionary matching and a document-level attention mechanism to improve disease NER.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Approaches based on neural network were also applied for Biomedical NER (Habibi et al., 2017; Crichton et al., 2017; Wang et al., 2018) . For a transformer-based approach, Khan et al. used a shared transformer encoder to capture the embedding vector of each token in input sentence and task specific linear layers to generate representations of multi-tasks including Biomedical NER.", |
| "cite_spans": [ |
| { |
| "start": 72, |
| "end": 93, |
| "text": "(Habibi et al., 2017;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 94, |
| "end": 116, |
| "text": "Crichton et al., 2017;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 117, |
| "end": 135, |
| "text": "Wang et al., 2018)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Differing from these works, we propose a method to learn the entity-likeness of a sentence by leveraging multiple approximate matches of the sentence with one or multiple dictionaries. Recent approaches based on pre-training for specific domains, such as biomedical (Lee et al., 2019a; , clinical (Huang et al., 2019) and scientific (Beltagy et al., 2019) , have shown high levels of accuracy; our method is complementary to these approaches.", |
| "cite_spans": [ |
| { |
| "start": 266, |
| "end": 285, |
| "text": "(Lee et al., 2019a;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 297, |
| "end": 317, |
| "text": "(Huang et al., 2019)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 333, |
| "end": 355, |
| "text": "(Beltagy et al., 2019)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Biomedical NEs are complex and ambiguous due to the following characteristics:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NEs in Biomedical Domain", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Variation of Expression Biomedical NEs have various synonyms, including abbreviations, interchangeability of Roman numbers and Latin characters, insertions and deletions of hyphens and spaces, and changes in word order. For example, the gene \"Angiotensin II Receptor Type 1\" has the official name \"AGTR1\", as well as more than ten other names, e.g., AGTR -1, Type -1 Angiotensin II Receptor, Angiotensin Receptor 1B, and AT1 Receptor. Even if the dictionary is further expanded, exact matching cannot entirely cover all possible variations of NEs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NEs in Biomedical Domain", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Composite Mentions NEs in the biomedical domain are frequently connected by \"and,\" \"or\" in a single span which refers to more than one entity. For example, \"alpha and beta globin\" refers to \"alpha globin\" and \"beta globin\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NEs in Biomedical Domain", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Nested NEs Nested NEs Ringland et al., 2019) , where one NE is completely contained by the other, are also commonly used in biomedical data. For example, both \"adenylate cyclase activating polypeptide 1\" and \"adenylate cyclase\" are the names of proteins.", |
| "cite_spans": [ |
| { |
| "start": 22, |
| "end": 44, |
| "text": "Ringland et al., 2019)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NEs in Biomedical Domain", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Entity ambiguity The same mention may often refer to many different entities depending on context. For example, \"VHL\" can be either a disease name \"Von Hippel-Lindau (VHL) disease\" or a gene name \"VHL gene\" depending on context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NEs in Biomedical Domain", |
| "sec_num": "3" |
| }, |
| { |
| "text": "NEs in the biomedical domain are continuously increasing in number every year. When using exact matching or pre-trained LMs for BioNER, it is difficult to sufficiently cover all possible combinations of NEs, leading to the omission of NE recognition.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NEs in Biomedical Domain", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The concept of our approach is that the entitylikeness of a given input sentence can be estimated by its maximal similarity to entities in a dictionary. Our motivation is to assign the entitylikeness to each word of the input sentence. The overall flow of the proposed approach is as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Entity-likeness with Multiple Approximate Matches", |
| "sec_num": "4" |
| }, |
| { |
| "text": "1. Given an input sentence, we first fetch matching results between the input sentence and a specified dictionary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Entity-likeness with Multiple Approximate Matches", |
| "sec_num": "4" |
| }, |
| { |
| "text": "2. We create matching patterns based on the matching results, and assign them to each word in the input sentence. The matching pattern is a label that indicates how each word matches with the dictionary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Entity-likeness with Multiple Approximate Matches", |
| "sec_num": "4" |
| }, |
| { |
| "text": "3. For each word in the input sentence, we build a vector for predicting entity-likeness from the multiple matching patterns by a pooling operation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Entity-likeness with Multiple Approximate Matches", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We build an NER model learning both vector of entity-likeness and contextual embedding derived from pre-trained LMs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "4.", |
| "sec_num": null |
| }, |
| { |
| "text": "Given an input sentence, we first fetch the matching results between the input sentence and entities in a dictionary. Since we cannot specify which part of the input sentence contains the entity, we calculate the string similarity of all continuous word level N -grams (N \u2264 5) in the input sentence with all dictionary entries. The matching returns entries whose similarity with the N -gram is larger than a specified threshold 1 . We regard a match of Ngram with an entity with threshold 1.0 as an exact matching. By employing the multiple approximate matchings of N -gram with the dictionary, it is possible to obtain useful information about the multiple matches for estimating the entity-likeness of the N -grams, especially in the case of predicting a new NE which is similar to the existing one. For example, we can obtain information on the interchangeability of Greek or Roman characters in NEs from dictionary entries \"beta-1 Adrenergic Receptor\", \"\u03b2-1 Adrenergic Receptor\" and other synonyms. The information is useful for recognizing the unknown NE \"\u03b1-1 Adrenergic Receptor\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Creating Multiple Approximate Matches", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Based on the matching results of N -grams (N \u2264 5) with a dictionary obtained in section 4.1, we create a set of dictionary matching patterns that includes the information of the dictionary that is used, the types of matching, and the matching position; this information is assigned to each word in the input sentence. The type of matching is set to \"Exact\" if the N -gram exactly matches the dictionary entry, otherwise it is set to \"Approximate\". There are three types of matching positions (B (Beginning), I (Inside), and E (Ending)) which indicate the position of the word in the N -gram.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Creating Dictionary Matching Patterns", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For example, as shown in Figure 1 , the input sentence \"EGFR is epidermal growth factor receptor\" is matched with a gene/protein dictionary. The gene/protein dictionary includes entries such as \"epidermal growth factor receptor substrate Figure 1 : Method to create matching patterns using a gene/protein dictionary. The blue markers represent N -grams of the input sentence, and the purple, yellow, and red markers represent N -grams matching with the corresponding dictionary entries. The green marker describes the current word and corresponding matching patterns that are created and assigned to the current word.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 25, |
| "end": 33, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 238, |
| "end": 246, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Creating Dictionary Matching Patterns", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "15,\" \"epidermal growth factor receptor GRB-7,\" etc. As shown in Figure 1 , 3-gram N1 with the beginning word w 0 \"epidermal\" exactly matches with gene/protein dictionary entry M1 and it approximately matches with entries M3, M4 and M5. The matching result of the 3-gram N1 assigns matching patterns: \"Gene-Exact-B\" and \"Gene-Approximate-B\" to w 0 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 64, |
| "end": 72, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Creating Dictionary Matching Patterns", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In the same way, 4-gram N2 approximately matches with dictionary entries M6 and M7. In this case, the word \"epidermal\" is inside the N2 and therefore the matching result of the N2 assigns matching patterns:\"Gene-Approximate-I' to w 0 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Creating Dictionary Matching Patterns", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Based on matching results between all N -grams of the input sentence and the dictionary, we can obtain a set of matching patterns for each word in the input sentence. The possible matching patterns for each word are {Number of dictionaries} \u00d7 {Exact, Approximate} \u00d7 {B, I, E}. For example, in Figure 1 , a set of matching patterns with the Gene dictionary for the third word \"epidermal\" are {\"Gene-Exact-B,\" \"Gene-Approximate-B,\" \"Gene-Approximate-I\"}.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 293, |
| "end": 301, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Creating Dictionary Matching Patterns", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "After creating sets of dictionary matching patterns corresponding to each word, we build a representation for the dictionary matching patterns.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Suppose each word w i corresponds to a subset of matching patterns S i \u2282 S, where S is the possible matching patterns, S i is obtained in section 4.2. Here, S i represents the likeliness of that the word forms a part of entities. E i corresponds to embeddings of S i :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "E i = {emb(s)|s \u2208 S i } (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where emb(\u2022) indicates an embedding operation. In experiments, embedding emb(s) is randomly initialized from a normal distribution but not finetuned. Next, we build a vector representation D i of entity-likeness by pooling the embeddings E i ; D i has the same dimension as E i :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "D i = f pool (E i )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where f pool is a pooling operation. The aim of the pooling is to aggregate information for learning from various matching patterns. In order to investigate the effect of various pooling functions, we consider four types of pooling: Sum, Max, Average and Convolution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Sum Pooling It is expected that summarizing all features of the possible matching pattern embeddings gives information for estimating entitylikeness of words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "f sum (E i ) = v\u2208E i v", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "(3) Figure 2 : Illustration of the proposed model architecture. T i and D i are the corresponding contextual word embedding module and dictionary matching pattern module for each word w i in the input sentence, respectively. V i represents interaction between each word and its entity-likeness . The model predicts the token-level NE label, l i . am 1 B, am 1 I,... are embeddings of matching patterns.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 4, |
| "end": 12, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Max Pooling Instead of sum pooling, we use max pooling to compose the set of matching pattern embeddings:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "f max (E i ) = max(E i )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Average Pooling In the same way, we consider the average variation of the pooling method:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "f avg (E i ) = avg(E i )", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Convolution As a way to combine embeddings, we apply 1-D convolution over the set of matching pattern embeddings to build the dictionary matching embedding:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "f conv (E i ) = Conv1d(E i )", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "4.4 Learning Representations of Entity-likeness with NER Figure 2 shows the overview of our method. Given the output of the contextual word embedding T i , and vector representation of entity-likeness D i , the label prediction module predicts the IOB2 labels of input sentence w i . By learning T i and D i together, it is possible to recognize new NEs which were not in the dictionary or training data of LMs. For the pre-trained LMs, we use BioBERT (Lee et al., 2019a) or BioELMo depending on experiments. The layer numbers and the internal details of the label prediction layer vary depending on the used pre-trained LMs. We follow the settings of the original studies (Lee et al., 2019a; . In the case of BioBERT, we use a single linear layer to compute token level IOB2 probabilities. In the case of BioELMo, we follow the probing settings in the work of . We use several linear layers to compute the probabilities.", |
| "cite_spans": [ |
| { |
| "start": 452, |
| "end": 471, |
| "text": "(Lee et al., 2019a)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 673, |
| "end": 692, |
| "text": "(Lee et al., 2019a;", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 57, |
| "end": 65, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Representation of Multiple Matching Patterns", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In this section, we conduct three experiments. Experiment 1 confirms the effectiveness of learning both entity-likeness and contextual embedding for BioNER. Also, we want to confirm if applying appropriate pooling operations can reduce noise in the case of approximate matching. Experiment 2 confirms portability by using our method with different pre-trained LMs. Experiment 3 confirms the effectiveness of our method not only in the biomedical domain but also in the general domain. For pre-trained LMs, we employed BioBERT and BioELMo trained on PubMed and PMC biomedical articles. For experiments on a general domain dataset, we applied the pre-trained BERT base cased LMs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this study, the results were obtained by adopting the proposed and BioBERT-based methods to three benchmark biomedical datasets, BC2GM, NCBIdisease, and BC4CHEMD, which are exclusively annotated with protein, disease, and chemical entities 2 , respectively. For the general domain, we used the CoNLL 2003 dataset (Tjong Kim Sang and De Meulder, 2003) . Table 1 shows the size of the datasets. All datasets are publicly available.", |
| "cite_spans": [ |
| { |
| "start": 316, |
| "end": 353, |
| "text": "(Tjong Kim Sang and De Meulder, 2003)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 356, |
| "end": 363, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We consider the dictionary as a set of names including synonyms of the entities, e.g., Gene, Disease, and Drug. In the biomedical domain, there are several publicly available databases that can be used to create dictionaries. The dictionaries are built from Dataset train dev test BC2GM 12,574 2,519 5,038 NCBI-disease 5,424 923 940 BC4CHEMD 30,682 30,639 26,364 CoNLL 2003 14,987 3,466 3,684 the databases. Therefore, we do not need to create and maintain dictionaries from scratch. We construct dictionaries for genes/proteins, diseases, and drugs, to train the proposed model on the BC2GM, NCBI-disease, and BC4CHEMD datasets, respectively. Further, three dictionaries of person (PER), location (LOC), and organization (ORG) are built to train the proposed model on the CoNLL 2003 dataset.", |
| "cite_spans": [ |
| { |
| "start": 353, |
| "end": 397, |
| "text": "30,682 30,639 26,364 CoNLL 2003 14,987 3,466", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 253, |
| "end": 352, |
| "text": "from Dataset train dev test BC2GM 12,574 2,519 5,038 NCBI-disease 5,424 923 940 BC4CHEMD", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dictionary", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Gene/protein dictionary We created a gene/protein dictionary from public databases: Human Gene Nomenclature (HGNC) and NCBI Entrez Gene (Maglott et al., 2019) . HGNC is a database containing unique names and alias names for human genes. NCBI Entrez Gene is the National Center for Biotechnology Information (NCBI)'s database for gene-specific information (Maglott et al., 2011) . We extracted gene names, their symbols, alias symbols, and alias names to build our gene/protein dictionary. The dictionary contains 292,853 gene entity surfaces.", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 158, |
| "text": "(Maglott et al., 2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 355, |
| "end": 377, |
| "text": "(Maglott et al., 2011)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dictionary", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We built a disease dictionary based on Human Disease Ontology (LM et al., 2019) . Our disease dictionary is built from disease names and their synonyms based on the ontology with 30,426 disease entities.", |
| "cite_spans": [ |
| { |
| "start": 62, |
| "end": 79, |
| "text": "(LM et al., 2019)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Disease dictionary", |
| "sec_num": null |
| }, |
| { |
| "text": "Drug dictionary For the drug dictionary, we used DrugBank Vocabulary 3 from DrugBank (DS et al., 2019) . We entered common names and synonyms as drug names into the dictionary. The dictionary contains 26,235 drug entities.", |
| "cite_spans": [ |
| { |
| "start": 85, |
| "end": 102, |
| "text": "(DS et al., 2019)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Disease dictionary", |
| "sec_num": null |
| }, |
| { |
| "text": "PER, LOC, and ORG dictionaries We constructed three dictionaries on person (PER), location (LOC), and organization (ORG) from the DBpedia database 4 to train the proposed model on the CoNLL 2003 dataset. We used categories from the 2019-8-30 Version and extracted categories that include keywords such as \"Person,\" \"Organization,\" and \"Places\" to construct the dictionaries. The dictionary consists of 710,492 PER, 37,687 ORG, and 69,028 LOC entities.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Disease dictionary", |
| "sec_num": null |
| }, |
| { |
| "text": "To obtain multiple approximate matches of the input sentence and dictionary, we used Simstring (Okazaki and Tsujii, 2010) , an approximate string matching library that searches for similarities between a set of characters (e.g., \"cosine,\" \"jaccard\") with a query string length exceeding a specified threshold. Simstring is known as a fast and efficient algorithm for approximate dictionary matching.", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 121, |
| "text": "(Okazaki and Tsujii, 2010)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "We used Simstring to obtain matching results for N -gram (N \u2264 5) with the dictionary. The cosine similarity threshold between N -grams of the input sentence and dictionary entries was empirically set to 0.8. This is because the threshold value of 0.8 revealed good results during preliminary experiments. Next, we created a set of matching patterns based on the matching results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "For hyperparameter tuning, entity-likeness representation dimension sizes of 50, 100, and 300, and batch sizes of 16 and 32, were selected. Therein, we decided the parameter for entity-likeness representation and batch size are 100 and 32, respectively. Contextual word embedding derived from the pre-trained model is concatenated with 100dimensional entity-likeness representation embeddings, and then fed into a label prediction layer. We applied four types of pooling: Sum, Max, Average, and Convolution. We trained for 20 epochs and the NER results were averaged over five seeds.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "All experiments were conducted using a single NVIDIA GeForce RTX 16 GB GPU. Pytorch version was 1.4.0. We used the HuggingFace PyTorch implementation of (Wolf et al., 2019) 5 to conduct the experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Experiment 1: Learning Entity-likeness with BioBERT We followed the recipe of Lee et al. (2019a) to train the model with the following hyperparameters: learning rates of 1e-5; batch sizes of 32; and weight-decay of 0.001. We used the pre-trained model BioBERT v1.0 (Wiki + Books + PubMed 200K + PMC 270K) 6 as a contextual word embedding.", |
| "cite_spans": [ |
| { |
| "start": 78, |
| "end": 96, |
| "text": "Lee et al. (2019a)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "For the approach using the approximate matching result, we compared our method with . They proposed a pre-training subtagger softdict that softly matches a sentence with gazetteers for NER. This sub-tagger plays the role of an approximate dictionary look-up. Softdict is trained on gazetteers and non-entity N -grams sampled from the corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "They sampled 1 million non-entity N -grams from 14,987 sentences in the CoNLL 2003 training data. For each dataset, we sampled non-entity Ngrams using the same ratio of data size and sample size. Following the settings in their work, we used pre-trained 50-dimensional Glove word embedding (Pennington et al., 2014) , contextualized ELMo embedding, a convolutional character encoder and the pre-trained softdict to train the NER model.", |
| "cite_spans": [ |
| { |
| "start": 290, |
| "end": 315, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setting", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "We confirmed the performance of the proposed method with other pre-trained LMs. We conducted experiments using contextual embeddings from pre-trained models BioELMo 7 and Bio word2vec (Pyysalo et al., 2013) 8 . We kept the default hyperparameters settings in Jin et al.'s work, with a batch size of 32, Adam learning rate of 0.002, and training for 10 epochs. The embedding derived from BioELMo or Bio word2vec is concatenated with 100-dimensional entity-likeness representation embeddings and then are fed to four feed-forward layers and a CRF output layer.", |
| "cite_spans": [ |
| { |
| "start": 165, |
| "end": 166, |
| "text": "7", |
| "ref_id": null |
| }, |
| { |
| "start": 184, |
| "end": 208, |
| "text": "(Pyysalo et al., 2013) 8", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment 2: Learning Entity-likeness with BioELMo and Bio word2vec", |
| "sec_num": null |
| }, |
| { |
| "text": "Experiment 3: Learning Entity-likeness with BERT For experiments on the CoNLL 2003 dataset, a pre-trained BERT-base-cased model was used instead of BioBERT. Hyperparameters were set the same as for learning entity-likeness with Experiment 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment 2: Learning Entity-likeness with BioELMo and Bio word2vec", |
| "sec_num": null |
| }, |
| { |
| "text": "For learning entity-likeness with BioBERT, we evaluated the accuracy of the results with an entitylevel F-measures. For learning entity-likeness with BioELMo and Bio word2vec, we used the official evaluation codes of BC2GM, which contain multiple ground-truth tags to calculate F-measures, following the work of .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "The experimental results are presented in Tables 2, 3 and 4. In Table 2 , the F-measures were obtained in the experiments conducted based on the Pytorch implementation library of (Wolf et al., 2019) ; the best scores are denoted in bold. The scores are almost the same with scores reported in (Lee et al., 2019b) , which are not the scores reported in the original BioBERT papers (Lee et al., 2019a) .", |
| "cite_spans": [ |
| { |
| "start": 179, |
| "end": 198, |
| "text": "(Wolf et al., 2019)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 293, |
| "end": 312, |
| "text": "(Lee et al., 2019b)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 380, |
| "end": 399, |
| "text": "(Lee et al., 2019a)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 64, |
| "end": 71, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "The difference in scores of the original paper (Lee et al., 2019a) and (Lee et al., 2019b ) is due to the neural network implementation library (Pytorch-based or TensorFlow-based), the implementation framework (HuggingFace, etc.), and the GPU architecture and setting of the random seed. Tables 2, 3 and 4, learning both exact matching and approximate matching outperforms BioBERT-based methods and improves Fmeasures by up to +0.13, +0.21 and +0.06 points on the three biomedical benchmarks BC2GM, NCBIdisease and BC4CHEMD, respectively; BioELMo and Bio word2vec improve F-measures by up to +2.2 and +4.9 points on BC2GM; BERT-based methods improve F-measures by up to +0.25 points on CoNLL 2003.", |
| "cite_spans": [ |
| { |
| "start": 47, |
| "end": 66, |
| "text": "(Lee et al., 2019a)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 71, |
| "end": 89, |
| "text": "(Lee et al., 2019b", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 288, |
| "end": 299, |
| "text": "Tables 2, 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "The experimental results indicate that, in the case of exact matching, F-measures are not highly different for the four types of pooling. As shown in Table 2 , 3 and 4, sum pooling obtains the best results in the case of approximate matching. It is considered to be more informative for summarizing all features of the possible approximate matching patterns to estimate entity-likeness. Precision is improved in exact matching while recall is improved in approximate matching. In approximate matching, even though the matching results are noisy, tuning to select the appropriate pooling can help minimize noise. Our approach has effectiveness for small datasets such as NCBI-disease, and multi-category datasets such as CoNLL 2003, where F-measures improved by up to +0.21 and +0.25 points, respectively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 150, |
| "end": 158, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In Table 2 , the improvement of F-measures is not significant on the BC4CHEMD dataset. It is thought that this is because approximate matching of N -gram (N \u2264 5) returns only dictionary entries which approximately match with N -gram only up to 5-words, while there are drug names whose length are much longer than 5-gram in BC4CHEMD dataset. For datasets containing long NEs, it is necessary to set N-grams with larger values.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Our approach has effectiveness for small datasets with complicated NEs. In reality, obtaining large-scale domain specific data like BC2GM and BC4CHEMD is very costly, while NEs in the biomedical domain are complex and continuously increasing every year.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In this paper, we proposed a new approach: learning the entity-likeness of phrases in sentences by using multiple approximate matching results. The experiments show three properties. The approach has portability with various pre-trained LMs. Our Sum pooling methods efficiently filter noisy approximate matching results for learning entity-likeness. Our approach effectively works particularly on small datasets, not only in the biomedical area but also in more general domains. Moreover, our approach does not require expensive computation. We hope that the proposed approach can contribute to identifying NEs in such cases.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Note that an N -gram can be matched with one or multiple dictionaries when we have two or more dictionaries.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/cambridgeltl/ MTL-Bioinformatics-2016", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://www.drugbank.ca/releases/ 5-1-4/downloads/all-drugbank-vocabulary 4 https://downloads.dbpedia.org/repo/ lts/generic/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/huggingface/ transformers 6 https://github.com/naver/ biobert-pretrained", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/Andy-jqa/bioelmo 8 http://bio.nlplab.org", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "FLAIR: An easy-to-use framework for state-of-theart NLP", |
| "authors": [ |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Akbik", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanja", |
| "middle": [], |
| "last": "Bergmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Duncan", |
| "middle": [], |
| "last": "Blythe", |
| "suffix": "" |
| }, |
| { |
| "first": "Kashif", |
| "middle": [], |
| "last": "Rasul", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Schweter", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Vollgraf", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)", |
| "volume": "", |
| "issue": "", |
| "pages": "54--59", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-4010" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alan Akbik, Tanja Bergmann, Duncan Blythe, Kashif Rasul, Stefan Schweter, and Roland Vollgraf. 2019. FLAIR: An easy-to-use framework for state-of-the- art NLP. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations), pages 54-59, Minneapolis, Minnesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "SciB-ERT: A pretrained language model for scientific text", |
| "authors": [ |
| { |
| "first": "Iz", |
| "middle": [], |
| "last": "Beltagy", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Arman", |
| "middle": [], |
| "last": "Cohan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3613--3618", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iz Beltagy, Kyle Lo, and Arman Cohan. 2019. SciB- ERT: A pretrained language model for scientific text. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 3613- 3618.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Named entity recognition with bidirectional lstm-cnns", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [ |
| "C" |
| ], |
| "last": "Jason", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Chiu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nichols", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "TACL", |
| "volume": "4", |
| "issue": "", |
| "pages": "357--370", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason P. C. Chiu and Eric Nichols. 2016. Named en- tity recognition with bidirectional lstm-cnns. TACL, 4:357-370.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Exploiting dictionaries in named entity extraction: combining semi-markov extraction processes and data integration methods", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunita", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sarawagi", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Tenth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "89--98", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William W. Cohen and Sunita Sarawagi. 2004. Exploit- ing dictionaries in named entity extraction: com- bining semi-markov extraction processes and data integration methods. In Proceedings of the Tenth ACM SIGKDD International Conference on Knowl- edge Discovery and Data Mining, Seattle, Washing- ton, USA, August 22-25, 2004, pages 89-98.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Natural language processing (almost) from scratch", |
| "authors": [ |
| { |
| "first": "Ronan", |
| "middle": [], |
| "last": "Collobert", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "L\u00e9on", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Karlen", |
| "suffix": "" |
| }, |
| { |
| "first": "Koray", |
| "middle": [], |
| "last": "Kavukcuoglu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [ |
| "P" |
| ], |
| "last": "Kuksa", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "J. Mach. Learn. Res", |
| "volume": "12", |
| "issue": "", |
| "pages": "2493--2537", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel P. Kuksa. 2011. Natural language processing (almost) from scratch. J. Mach. Learn. Res., 12:2493-2537.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "A neural network multi-task learning approach to biomedical named entity recognition", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Crichton", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Pyysalo", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Chiu", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "BMC Bioinformatics", |
| "volume": "18", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G. Crichton, S. Pyysalo, B. Chiu, and A. Korho- nen. 2017. A neural network multi-task learning approach to biomedical named entity recognition. BMC Bioinformatics, 18(1):368.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "BERT: pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: pre-training of deep bidirectional transformers for language under- standing. CoRR, abs/1810.04805.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Drugbank: a comprehensive resource for in silico drug discovery and exploration", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "S" |
| ], |
| "last": "Wishart", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Knox", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "C" |
| ], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Shrivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hassanali", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Stothard", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Woolsey", |
| "middle": [ |
| "J" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wishart DS, Knox C, Guo AC, Shrivastava S, Hassanali M, Stothard P, Chang Z, and Woolsey J. 2019. Drug- bank: a comprehensive resource for in silico drug discovery and exploration.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Deep learning with word embeddings improves biomedical named entity recognition", |
| "authors": [ |
| { |
| "first": "Maryam", |
| "middle": [], |
| "last": "Habibi", |
| "suffix": "" |
| }, |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Weber", |
| "suffix": "" |
| }, |
| { |
| "first": "Mariana", |
| "middle": [], |
| "last": "Neves", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "Luis" |
| ], |
| "last": "Wiegandt", |
| "suffix": "" |
| }, |
| { |
| "first": "Ulf", |
| "middle": [], |
| "last": "Leser", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Bioinformatics", |
| "volume": "33", |
| "issue": "14", |
| "pages": "37--48", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/bioinformatics/btx228" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maryam Habibi, Leon Weber, Mariana Neves, David Luis Wiegandt, and Ulf Leser. 2017. Deep learning with word embeddings improves biomed- ical named entity recognition. Bioinformatics, 33(14):i37-i48.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "CB10 1SD, and United Kingdom www.genenames.org. 2019. Hgnc gene name database", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "HUGO Gene Nomenclature Committee (HGNC), European Molecular Biology Laboratory, Euro- pean Bioinformatics Institute (EMBL-EBI), Well- come Genome Campus, Hinxton, Cambridge CB10 1SD, and United Kingdom www.genenames.org. 2019. Hgnc gene name database.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Clinicalbert: Modeling clinical notes and predicting hospital readmission", |
| "authors": [ |
| { |
| "first": "Kexin", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaan", |
| "middle": [], |
| "last": "Altosaar", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajesh", |
| "middle": [], |
| "last": "Ranganath", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kexin Huang, Jaan Altosaar, and Rajesh Ran- ganath. 2019. Clinicalbert: Modeling clinical notes and predicting hospital readmission. CoRR, abs/1904.05342.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Probing biomedical embeddings from language models", |
| "authors": [ |
| { |
| "first": "Qiao", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| }, |
| { |
| "first": "Bhuwan", |
| "middle": [], |
| "last": "Dhingra", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "W" |
| ], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xinghua", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qiao Jin, Bhuwan Dhingra, William W. Cohen, and Xinghua Lu. 2019. Probing biomedical embeddings from language models. CoRR, abs/1904.02181.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Mt-bioner: Multi-task learning for biomedical named entity recognition using deep bidirectional transformers", |
| "authors": [ |
| { |
| "first": "Morteza", |
| "middle": [], |
| "last": "Muhammad Raza Khan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohamed", |
| "middle": [], |
| "last": "Ziyadi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Abdelhady", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muhammad Raza Khan, Morteza Ziyadi, and Mo- hamed AbdelHady. 2020. Mt-bioner: Multi-task learning for biomedical named entity recognition us- ing deep bidirectional transformers.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Genia corpus-a semantically annotated corpus for bio-textmining", |
| "authors": [ |
| { |
| "first": "Jin-Dong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomoko", |
| "middle": [], |
| "last": "Ohta", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuka", |
| "middle": [], |
| "last": "Tateisi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun'ichi", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Bioinformatics", |
| "volume": "19", |
| "issue": "1", |
| "pages": "180--182", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/bioinformatics/btg1023" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jin-Dong Kim, Tomoko Ohta, Yuka Tateisi, and Jun'ichi Tsujii. 2003. Genia corpus-a semantically annotated corpus for bio-textmining. Bioinformatics (Oxford, England), 19 Suppl 1:i180-2.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "SentencePiece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
| "authors": [ |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Richardson", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "66--71", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-2012" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taku Kudo and John Richardson. 2018. SentencePiece: A simple and language independent subword tok- enizer and detokenizer for neural text processing. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 66-71, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Scientific literature: Information overload", |
| "authors": [ |
| { |
| "first": "Esther", |
| "middle": [], |
| "last": "Landhuis", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Nature", |
| "volume": "535", |
| "issue": "", |
| "pages": "457--458", |
| "other_ids": { |
| "DOI": [ |
| "10.1038/nj7612-457a" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Esther Landhuis. 2016. Scientific literature: Informa- tion overload. Nature, 535:457-458.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Biobert: a pre-trained biomedical language representation model for biomedical text mining", |
| "authors": [ |
| { |
| "first": "Jinhyuk", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Wonjin", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungdong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Donghyeon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunkyu", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Chan", |
| "middle": [], |
| "last": "Ho So", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaewoo", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2019a. Biobert: a pre-trained biomedical language representation model for biomedical text mining. CoRR, abs/1901.08746.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Biobert: a pre-trained biomedical language representation model for biomedical text mining", |
| "authors": [ |
| { |
| "first": "Jinhyuk", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Wonjin", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungdong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Donghyeon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunkyu", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Chan", |
| "middle": [], |
| "last": "Ho So", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaewoo", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2019b. Biobert: a pre-trained biomedical language representation model for biomedical text mining.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Phrase clustering for discriminative learning", |
| "authors": [ |
| { |
| "first": "Dekang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyun", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1030--1038", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dekang Lin and Xiaoyun Wu. 2009. Phrase cluster- ing for discriminative learning. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP, pages 1030-1038, Suntec, Singapore. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Towards improving neural named entity recognition with gazetteers", |
| "authors": [ |
| { |
| "first": "Tianyu", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jin-Ge", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "5301--5307", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1524" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tianyu Liu, Jin-Ge Yao, and Chin-Yew Lin. 2019. To- wards improving neural named entity recognition with gazetteers. In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 5301-5307, Florence, Italy. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Human disease ontology 2018 update: classification, content and workflow expansion", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [ |
| "M" |
| ], |
| "last": "Schriml", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Mitraka", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Munro", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Tauber", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Schor", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Nickle", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Felix", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Jeng", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Bearer", |
| "suffix": "" |
| }, |
| { |
| "first": "Lichenstein", |
| "middle": [ |
| "R" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/nar/gky1032" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Schriml LM, Mitraka E, Munro J, Tauber B, Schor M, Nickle L, Felix V, Jeng L, Bearer C, and Lichen- stein R. 2019. Human disease ontology 2018 update: classification, content and workflow expansion.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Entrez gene: gene-centered information at ncbi", |
| "authors": [ |
| { |
| "first": "Donna", |
| "middle": [], |
| "last": "Maglott", |
| "suffix": "" |
| }, |
| { |
| "first": "Jim", |
| "middle": [], |
| "last": "Ostell", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Tatiana", |
| "middle": [], |
| "last": "Pruitt", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tatusova", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Nucleic acids research", |
| "volume": "39", |
| "issue": "", |
| "pages": "52--57", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/nar/gkq1237" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Donna Maglott, Jim Ostell, Kim D Pruitt, and Tatiana Tatusova. 2011. Entrez gene: gene-centered infor- mation at ncbi. Nucleic acids research, 39(Database issue), D52-D57.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Entrez gene: gene-centered information at ncbi", |
| "authors": [ |
| { |
| "first": "Donna", |
| "middle": [], |
| "last": "Maglott", |
| "suffix": "" |
| }, |
| { |
| "first": "Jim", |
| "middle": [], |
| "last": "Ostell", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Tatiana", |
| "middle": [], |
| "last": "Pruitt", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tatusova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Donna Maglott, Jim Ostell, Kim D Pruitt, and Tatiana Tatusova. 2019. Entrez gene: gene-centered infor- mation at ncbi.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Simple and efficient algorithm for approximate dictionary matching", |
| "authors": [ |
| { |
| "first": "Naoaki", |
| "middle": [], |
| "last": "Okazaki", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jun'ichi Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "COLING 2010, 23rd International Conference on Computational Linguistics, Proceedings of the Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "851--859", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Naoaki Okazaki and Jun'ichi Tsujii. 2010. Simple and efficient algorithm for approximate dictionary matching. In COLING 2010, 23rd International Conference on Computational Linguistics, Proceed- ings of the Conference, 23-27 August 2010, Beijing, China, pages 851-859.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Lexicon infused phrase embeddings for named entity resolution", |
| "authors": [ |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Passos", |
| "suffix": "" |
| }, |
| { |
| "first": "Vineet", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the Eighteenth Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "78--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexandre Passos, Vineet Kumar, and Andrew McCal- lum. 2014. Lexicon infused phrase embeddings for named entity resolution. In Proceedings of the Eigh- teenth Conference on Computational Natural Lan- guage Learning, pages 78-86.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "GloVe: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/D14-1162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word representation. In Proceedings of the 2014 Confer- ence on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, Doha, Qatar. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [ |
| "E" |
| ], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. CoRR, abs/1802.05365.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Distributional semantics resources for biomedical text processing", |
| "authors": [ |
| { |
| "first": "Sampo", |
| "middle": [], |
| "last": "Pyysalo", |
| "suffix": "" |
| }, |
| { |
| "first": "Filip", |
| "middle": [], |
| "last": "Ginter", |
| "suffix": "" |
| }, |
| { |
| "first": "Hans", |
| "middle": [], |
| "last": "Moen", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Salakoski", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ananiadou", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sampo Pyysalo, Filip Ginter, Hans Moen, T. Salakoski, and S. Ananiadou. 2013. Distributional semantics resources for biomedical text processing.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Design challenges and misconceptions in named entity recognition", |
| "authors": [ |
| { |
| "first": "Lev", |
| "middle": [], |
| "last": "Ratinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Thirteenth Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "147--155", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lev Ratinov and Dan Roth. 2009. Design chal- lenges and misconceptions in named entity recog- nition. In Proceedings of the Thirteenth Confer- ence on Computational Natural Language Learning (CoNLL-2009), pages 147-155.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Soft gazetteers for lowresource named entity recognition", |
| "authors": [ |
| { |
| "first": "Shruti", |
| "middle": [], |
| "last": "Rijhwani", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuyan", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shruti Rijhwani, Shuyan Zhou, Graham Neubig, and Jaime Carbonell. 2020. Soft gazetteers for low- resource named entity recognition.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "NNE: A dataset for nested named entity recognition in english newswire", |
| "authors": [ |
| { |
| "first": "Nicky", |
| "middle": [], |
| "last": "Ringland", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Hachey", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarvnaz", |
| "middle": [], |
| "last": "Karimi", |
| "suffix": "" |
| }, |
| { |
| "first": "C\u00e9cile", |
| "middle": [], |
| "last": "Paris", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "R" |
| ], |
| "last": "Curran", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nicky Ringland, Xiang Dai, Ben Hachey, Sarvnaz Karimi, C\u00e9cile Paris, and James R. Curran. 2019. NNE: A dataset for nested named entity recognition in english newswire. CoRR, abs/1906.01359.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2015. Neural machine translation of rare words with subword units. CoRR, abs/1508.07909.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Learning named entity tagger using domain-specific dictionary", |
| "authors": [ |
| { |
| "first": "Jingbo", |
| "middle": [], |
| "last": "Shang", |
| "suffix": "" |
| }, |
| { |
| "first": "Liyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiang", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaotao", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Teng", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiawei", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jingbo Shang, Liyuan Liu, Xiang Ren, Xiaotao Gu, Teng Ren, and Jiawei Han. 2018. Learning named entity tagger using domain-specific dictio- nary. CoRR, abs/1809.03599.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition", |
| "authors": [ |
| { |
| "first": "Erik", |
| "middle": [ |
| "F" |
| ], |
| "last": "Tjong", |
| "suffix": "" |
| }, |
| { |
| "first": "Kim", |
| "middle": [], |
| "last": "Sang", |
| "suffix": "" |
| }, |
| { |
| "first": "Fien", |
| "middle": [], |
| "last": "De Meulder", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003", |
| "volume": "", |
| "issue": "", |
| "pages": "142--147", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Erik F. Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition. In Proceedings of the Seventh Conference on Natu- ral Language Learning at HLT-NAACL 2003, pages 142-147.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Improving the performance of dictionary-based approaches in protein name recognition", |
| "authors": [ |
| { |
| "first": "Yoshimasa", |
| "middle": [], |
| "last": "Tsuruoka", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun'ichi", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Journal of Biomedical Informatics", |
| "volume": "37", |
| "issue": "6", |
| "pages": "461--470", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.jbi.2004.08.003" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshimasa Tsuruoka and Jun'ichi Tsujii. 2004. Im- proving the performance of dictionary-based ap- proaches in protein name recognition. Journal of Biomedical Informatics, 37(6):461 -470. Named Entity Recognition in Biomedicine.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Named entity extraction based on A maximum entropy model and transformation rules", |
| "authors": [ |
| { |
| "first": "Kiyotaka", |
| "middle": [], |
| "last": "Uchimoto", |
| "suffix": "" |
| }, |
| { |
| "first": "Qing", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Masaki", |
| "middle": [], |
| "last": "Murata", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiromi", |
| "middle": [], |
| "last": "Ozaku", |
| "suffix": "" |
| }, |
| { |
| "first": "Hitoshi", |
| "middle": [], |
| "last": "Isahara", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "38th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kiyotaka Uchimoto, Qing Ma, Masaki Murata, Hiromi Ozaku, and Hitoshi Isahara. 2000. Named entity ex- traction based on A maximum entropy model and transformation rules. In 38th Annual Meeting of the Association for Computational Linguistics, Hong Kong, China, October 1-8, 2000.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Cross-type biomedical named entity recognition with deep multi-task learning", |
| "authors": [ |
| { |
| "first": "Xuan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiang", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuhao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Marinka", |
| "middle": [], |
| "last": "Zitnik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingbo", |
| "middle": [], |
| "last": "Shang", |
| "suffix": "" |
| }, |
| { |
| "first": "Curtis", |
| "middle": [], |
| "last": "Langlotz", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiawei", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xuan Wang, Yu Zhang, Xiang Ren, Yuhao Zhang, Marinka Zitnik, Jingbo Shang, Curtis Langlotz, and Jiawei Han. 2018. Cross-type biomedical named entity recognition with deep multi-task learning. CoRR, abs/1801.09851.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Huggingface's transformers: State-of-the-art natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "R'emi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamie", |
| "middle": [], |
| "last": "Brew", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R'emi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. Huggingface's trans- formers: State-of-the-art natural language process- ing. ArXiv, abs/1910.03771.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Evaluating the utility of hand-crafted features in sequence labelling", |
| "authors": [ |
| { |
| "first": "Minghao", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2850--2856", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1310" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minghao Wu, Fei Liu, and Trevor Cohn. 2018. Evalu- ating the utility of hand-crafted features in sequence labelling. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Process- ing, pages 2850-2856, Brussels, Belgium. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Document-level attention-based bilstm-crf incorporating disease dictionary for disease named entity recognition", |
| "authors": [ |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhenguo", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Peipei", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenyin", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Computers in Biology and Medicine", |
| "volume": "108", |
| "issue": "", |
| "pages": "122--132", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.compbiomed.2019.04.002" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Xu, Zhenguo Yang, Peipei Kang, Qi Wang, and Wenyin Liu. 2019. Document-level attention-based bilstm-crf incorporating disease dictionary for dis- ease named entity recognition. Computers in Biol- ogy and Medicine, 108:122 -132.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Exploiting the performance of dictionary-based bioentity name recognition in biomedical literature", |
| "authors": [ |
| { |
| "first": "Zhihao", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongfei", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanpeng", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Computational Biology and Chemistry", |
| "volume": "32", |
| "issue": "4", |
| "pages": "287--291", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.compbiolchem.2008.03.008" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhihao Yang, Hongfei Lin, and Yanpeng Li. 2008. Ex- ploiting the performance of dictionary-based bio- entity name recognition in biomedical literature. Computational Biology and Chemistry, 32(4):287 - 291.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "text": "Size of NER datasets used in the experiments. The numbers are in sentences.", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>" |
| }, |
| "TABREF1": { |
| "text": "\u00b10.02 84.82 \u00b10.02 83.56 \u00b10.02 Liu et al. 79.63 \u00b10.002 81.09 \u00b10.009 80.35 \u00b10.004 Exa-Sum 82.58 \u00b10.05 84.65 \u00b10.05 83.60 \u00b10.02 Exa-Max 82.54 \u00b10.01 84.61 \u00b10.02 83.56 \u00b10.00 Exa-Avg 82.52 \u00b10.01 84.61 \u00b10.02 83.55 \u00b10.00 Exa-Conv 82.56 \u00b10.03 84.61 \u00b10.06 83.57 \u00b10.04 App-Sum 82.69 \u00b10.01 84.71 \u00b10.01 83.69 \u00b10.02 App-Max 82.57 \u00b10.01 84.66 \u00b10.03 83.65 \u00b10.03 App-Avg 82.51 \u00b10.04 84.60 \u00b10.02 83.58 \u00b10.00 App-Conv 82.54 \u00b10.04 84.66 \u00b10.01 83.58 \u00b10.02 NCBI-disease BioBERT 86.67 \u00b10.06 90.28 \u00b10.02 88.44 \u00b10.03 Liu et al. 85.21 \u00b10.006 87.01 \u00b10.005 86.10 \u00b10.003 Exa-Sum 86.40 \u00b10.02 90.37 \u00b10.02 88.34 \u00b10.03 Exa-Max 86.67 \u00b10.06 90.30 \u00b10.06 88.44 \u00b10.02 Exa-Avg 86.68 \u00b10.04 90.38 \u00b10.05 88.49 \u00b10.10 Exa-Con 86.57 \u00b10.05 90.26 \u00b10.07 88.38 \u00b10.06 App-Sum 86.74 \u00b10.06 90.64 \u00b10.06 88.65 \u00b10.05 App-Max 86.39 \u00b10.02 90.58 \u00b10.02 88.43 \u00b10.03 App-Avg 86.73 \u00b10.04 90.51 \u00b10.05 88.58 \u00b10.01 App-Con 86.46 \u00b10.06 90.51 \u00b10.12 88.49 \u00b10.08 BC4CHEMD BioBERT 91.89 \u00b10.06 90.95 \u00b10.04 91.41 \u00b10.02 Liu et al. 88.78 \u00b10.06 89.02 \u00b10.02 88.89 \u00b10.03 Exa-Sum 91.79 \u00b10.10 91.08 \u00b10.05 91.43 \u00b10.02 Exa-Max 91.92 \u00b10.06 90.93 \u00b10.10 91.43 \u00b10.05 Exa-Avg 91.90 \u00b10.08 91.00 \u00b10.10 91.44 \u00b10.00 Exa-Con 91.86 \u00b10.06 91.04 \u00b10.03 91.45 \u00b10.01 App-Sum 91.81 \u00b10.10 91.11 \u00b10.05 91.45 \u00b10.02 App-Max 91.94 \u00b10.06 91.01 \u00b10.08 91.47 \u00b10.01 App-Avg 91.88 \u00b10.10 91.06 \u00b10.10 91.47 \u00b10.00 App-Con 91.85 \u00b10.10 91.03 \u00b10.08 91.44 \u00b10.00", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Model</td><td>P</td><td>R</td><td>F</td></tr><tr><td/><td/><td>BC2GM</td><td/></tr><tr><td colspan=\"2\">BioBERT 82.34</td><td/><td/></tr></table>" |
| }, |
| "TABREF2": { |
| "text": "Experimental results of the proposed method with BioBERT-base model on three biomedical datasets BC2GM, NCBI-disease, and BC4CHEMD. Cells represent Precision, Recall and F-measure with standard deviation on each test set, respectively. Exa and App denote Exact and Approximate, respectively. Sum 89.6 \u00b10.61 90.4 \u00b10.56 90.0 \u00b10.06 Exa-Max 90.4 \u00b10.18 89.9 \u00b10.33 90.1 \u00b10.24 Exa-Avg 90.6 \u00b10.38 89.7 \u00b10.53 90.2 \u00b10.35 Exa-Con 89.5 \u00b10.72 89.9 \u00b10.78 89.7 \u00b10.02 App-Sum 91.1 \u00b10.68 90.0 \u00b10.67 90.6 \u00b10.25 App-Max 90.0 \u00b10.34 90.4 \u00b10.50 90.2 \u00b10.29 App-Avg 89.3 \u00b10.55 90.3 \u00b10.18 89.8 \u00b10.19 App-Con 89.1 \u00b10.10 90.0 \u00b10.30 89.5 \u00b10.10 \u00b10.30 80.3 \u00b10.24 83.2 \u00b10.16 Exa-Max 86.2 \u00b10.28 79.9 \u00b10.11 82.9 \u00b10.10 Exa-Avg 86.2 \u00b10.06 80.2 \u00b10.30 83.1 \u00b10.07 Exa-Con 84.9 \u00b10.36 80.7 \u00b10.38 82.7 \u00b10.06 App-Sum 85.7 \u00b10.10 81.3 \u00b10.33 83.4 \u00b10.09 App-Max 85.4 \u00b10.51 80.7 \u00b10.13 83.0 \u00b10.11 App-Avg 85.9 \u00b10.28 80.9 \u00b10.51 83.3 \u00b10.13 App-Con 85.2 \u00b10.29 81.2 \u00b10.07 83.2 \u00b10.10", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Model</td><td>P</td><td>R</td><td>F</td></tr><tr><td/><td/><td>BioELMo</td><td/></tr><tr><td colspan=\"2\">BioELMo -</td><td>-</td><td>88.4</td></tr><tr><td colspan=\"3\">Exa-Bio word2vec</td><td/></tr><tr><td>Bio w2v</td><td>-</td><td>-</td><td>78.5</td></tr><tr><td colspan=\"2\">Exa-Sum 86.3</td><td/><td/></tr></table>" |
| }, |
| "TABREF3": { |
| "text": "Results of learning entity-likeness by probing BioELMo and Bio word2vec on the BC2GM dataset. Cells represent Precision, Recall and F-measure with standard deviation. Exa and App denote Exact and Approximate, respectively.", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>" |
| }, |
| "TABREF4": { |
| "text": "\u00b10.06 92.00 \u00b10.05 91.36 \u00b10.03 Exa-Sum 90.96 \u00b10.02 92.16 \u00b10.02 91.56 \u00b10.01 Exa-Max 90.90 \u00b10.06 92.10 \u00b10.06 91.50 \u00b10.00 Exa-Avg 90.89 \u00b10.04 92.17 \u00b10.05 91.52 \u00b10.03 Exa-Con 90.87 \u00b10.05 92.09 \u00b10.07 91.48 \u00b10.03 App-Sum 91.01 \u00b10.02 92.23 \u00b10.02 91.61 \u00b10.01 App-Max 90.91 \u00b10.06 92.12 \u00b10.06 91.51 \u00b10.00 App-Avg 90.91 \u00b10.04 92.17 \u00b10.05 91.53 \u00b10.03 App-Con 90.86 \u00b10.06 92.11 \u00b10.12 91.48 \u00b10.03", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Model</td><td>P</td><td>R</td><td>F</td></tr><tr><td/><td colspan=\"2\">CoNLL 2003</td><td/></tr><tr><td>BERT</td><td>90.73</td><td/><td/></tr></table>" |
| }, |
| "TABREF5": { |
| "text": "Experimental results of the proposed method with BERT on CoNLL 2003.", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |