| { |
| "paper_id": "S14-2007", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:32:57.705746Z" |
| }, |
| "title": "SemEval-2014 Task 7: Analysis of Clinical Text", |
| "authors": [ |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Pradhan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Harvard University", |
| "location": { |
| "settlement": "Boston", |
| "region": "MA" |
| } |
| }, |
| "email": "sameer.pradhan@childrens.harvard.edu" |
| }, |
| { |
| "first": "No\u00e9mie", |
| "middle": [], |
| "last": "Elhadad", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Columbia University", |
| "location": { |
| "settlement": "New York", |
| "region": "NY" |
| } |
| }, |
| "email": "noemie.elhadad@columbia.edu" |
| }, |
| { |
| "first": "Wendy", |
| "middle": [], |
| "last": "Chapman", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Utah", |
| "location": { |
| "settlement": "Salt Lake City" |
| } |
| }, |
| "email": "wendy.chapman@utah.edu" |
| }, |
| { |
| "first": "Suresh", |
| "middle": [], |
| "last": "Manandhar", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of York", |
| "location": { |
| "settlement": "York", |
| "country": "UK" |
| } |
| }, |
| "email": "suresh@cs.york.ac.uk" |
| }, |
| { |
| "first": "Guergana", |
| "middle": [], |
| "last": "Savova", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Harvard University", |
| "location": { |
| "settlement": "Boston", |
| "region": "MA" |
| } |
| }, |
| "email": "guergana.savova@childrens.harvard.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes the SemEval-2014, Task 7 on the Analysis of Clinical Text and presents the evaluation results. It focused on two subtasks: (i) identification (Task A) and (ii) normalization (Task B) of diseases and disorders in clinical reports as annotated in the Shared Annotated Resources (ShARe) 1 corpus. This task was a follow-up to the ShARe/CLEF eHealth 2013 shared task, subtasks 1a and 1b, 2 but using a larger test set. A total of 21 teams competed in Task A, and 18 of those also participated in Task B. For Task A, the best system had a strict F 1-score of 81.3, with a precision of 84.3 and recall of 78.6. For Task B, the same group had the best strict accuracy of 74.1. The organizers have made the text corpora, annotations, and evaluation tools available for future research and development at the shared task website. 3 evaluation 3", |
| "pdf_parse": { |
| "paper_id": "S14-2007", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes the SemEval-2014, Task 7 on the Analysis of Clinical Text and presents the evaluation results. It focused on two subtasks: (i) identification (Task A) and (ii) normalization (Task B) of diseases and disorders in clinical reports as annotated in the Shared Annotated Resources (ShARe) 1 corpus. This task was a follow-up to the ShARe/CLEF eHealth 2013 shared task, subtasks 1a and 1b, 2 but using a larger test set. A total of 21 teams competed in Task A, and 18 of those also participated in Task B. For Task A, the best system had a strict F 1-score of 81.3, with a precision of 84.3 and recall of 78.6. For Task B, the same group had the best strict accuracy of 74.1. The organizers have made the text corpora, annotations, and evaluation tools available for future research and development at the shared task website. 3 evaluation 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "A large amount of very useful information-both for medical researchers and patients-is present in the form of unstructured text within the clinical notes and discharge summaries that form a patient's medical history. Adapting and extending natural language processing (NLP) techniques to mine this information can open doors to better, novel, clinical studies on one hand, and help patients understand the contents of their clinical records on the other. Organization of this shared task helps establish state-of-the-art benchmarks and paves the way for further explorations. It tackles two important sub-problems in NLPnamed entity recognition and word sense disambiguation. Neither of these problems are new to NLP. Research in general-domain NLP goes back to about two decades. For an overview of the development in the field through roughly 2009, we refer the refer to Nadeau and Sekine (2007) . NLP has also penetrated the field of bimedical informatics and has been particularly focused on biomedical literature for over the past decade. Advances in that sub-field has also been documented in surveys such as one by Leaman and Gonzalez (2008) . Word sense disambiguation also has a long history in the general NLP domain (Navigli, 2009) . In spite of word sense annotations in the biomedical literature, recent work by Savova et al. (2008) highlights the importance of annotating them in clinical notes. This is true for many other clinical and linguistic phenomena as the various characteristics of the clinical narrative present a unique challenge to NLP. Recently various initiatives have led to annotated corpora for clinical NLP research. Probably the first comprehensive annotation performed on a clinical corpora was by Roberts et al. (2009) , but unfortunately that corpus is not publicly available owing to privacy regulations. The i2b2 initiative 4 challenges have focused on such topics as concept recognition (Uzuner et al., 2011) , coreference resolution (Uzuner et al., 2012) , temporal relations (Sun et al., 2013) and their datasets are available to the community. More recently, the Shared Annotated Resources (ShARe) 1 project has created a corpus annotated with disease/disorder mentions in clinical notes as well as normalized them to a concept unique identifier (CUI) within the SNOMED-CT subset of the Unified Medical Language System 5 (Campbell et al., 1998) . The task of normalization is a combination of word/phrase sense disambiguation and semantic similarity where a phrase is mapped to a unique concept in an ontology (based on the description of that concept in the ontology) after disambiguating potential ambiguous surface words, or phrases. This is especially true with abbreviations and acronyms which are much more common in clinical text (Moon et al., 2012) . The SemEval-2014 task 7 was one of nine shared tasks organized at the SemEval-2014. It was designed as a follow up to the shared tasks organized during the ShARe/CLEF eHealth 2013 evaluation Pradhan et al., 2014) . Like the previous shared task, we relied on the ShARe corpus, but with more data for training and a new test set. Furthermore, in this task, we provided the options to participants to utilize a large corpus of unlabeled clinical notes. The rest of the paper is organized as follows. Section 2 describes the characteristics of the data used in the task. Section 3 describes the tasks in more detail. Section 4 explains the evaluation criteria for the two tasks. Section 5 lists the participants of the task. Section 6 discusses the results on this task and also compares them with the ShARe/CLEF eHealth 2013 results, and Section 7 concludes.", |
| "cite_spans": [ |
| { |
| "start": 873, |
| "end": 897, |
| "text": "Nadeau and Sekine (2007)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1122, |
| "end": 1148, |
| "text": "Leaman and Gonzalez (2008)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1227, |
| "end": 1242, |
| "text": "(Navigli, 2009)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1325, |
| "end": 1345, |
| "text": "Savova et al. (2008)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1733, |
| "end": 1754, |
| "text": "Roberts et al. (2009)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1927, |
| "end": 1948, |
| "text": "(Uzuner et al., 2011)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1974, |
| "end": 1995, |
| "text": "(Uzuner et al., 2012)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 2017, |
| "end": 2035, |
| "text": "(Sun et al., 2013)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 2364, |
| "end": 2387, |
| "text": "(Campbell et al., 1998)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 2780, |
| "end": 2799, |
| "text": "(Moon et al., 2012)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 2993, |
| "end": 3014, |
| "text": "Pradhan et al., 2014)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The ShARe corpus comprises annotations over de-identified clinical reports from a US intensive care department (version 2.5 of the MIMIC II database 6 ) (Saeed et al., 2002) . It consists of discharge summaries, electrocardiogram, echocardiogram, and radiology reports. Access to data was carried out following MIMIC user agreement requirements for access to de-identified medical 6 http://mimic.physionet.org -Multiparameter Intelligent Monitoring in Intensive Care data. Hence, all participants were required to register for the evaluation, obtain a US human subjects training certificate 7 , create an account to the password-protected MIMIC site, specify the purpose of data usage, accept the data use agreement, and get their account approved. The annotation focus was on disorder mentions, their various attributes and normalizations to an UMLS CUI. As such, there were two parts to the annotation: identifying a span of text as a disorder mention and normalizing (or mapping) the span to a UMLS CUI. The UMLS represents over 130 lexicons/thesauri with terms from a variety of languages and integrates resources used world-wide in clinical care, public health, and epidemiology. A disorder mention was defined as any span of text which can be mapped to a concept in SNOMED-CT and which belongs to the Disorder semantic group 8 . It also provided a semantic network in which every concept is represented by its CUI and is semantically typed (Bodenreider and Mc-Cray, 2003 In example (E1), lower extremity DVT is marked as the disorder. It corresponds to CUI C0340708 (preferred term: Deep vein thrombosis of lower limb). The span DVT can be mapped to CUI C0149871 (preferred term: Deep Vein Thrombosis), but this mapping would be incorrect because it is part of a more specific disorder in the sentence, namely lower extremity DVT.", |
| "cite_spans": [ |
| { |
| "start": 153, |
| "end": 173, |
| "text": "(Saeed et al., 2002)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 381, |
| "end": 382, |
| "text": "6", |
| "ref_id": null |
| }, |
| { |
| "start": 1446, |
| "end": 1476, |
| "text": "(Bodenreider and Mc-Cray, 2003", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A tumor was found in the left ovary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In example (E2), tumor ... ovary is annotated as a discontiguous disorder mention. This is the best method of capturing the exact disorder mention in clinical notes and its novelty is in the fact that either such phenomena have not been seen frequently enough in the general domain to gather particular attention, or the lack of a manually curated general domain ontology parallel to the UMLS.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Patient admitted with low blood pressure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "2" |
| }, |
| { |
| "text": "There are some disorders that do not have a representation to a CUI as part of the SNOMED CT within the UMLS. However, if they were deemed important by the annotators then they were annotated as CUI-less mentions. In example (E3), low blood pressure is a finding and is normalized as a CUI-less disorder. We constructed the annotation guidelines to require that the disorder be a reasonable synonym of the lexical description of a SNOMED-CT disorder. There are a few instances where the disorders are abbreviated or shortened in the clinical note. One example is w/r/r, which is an abbreviation for concepts wheezing (CUI C0043144), rales (CUI C0034642), and ronchi (CUI C0035508). This abbreviation is also sometimes written as r/w/r and r/r/w. Another is gsw for gunshot wound and tachy for tachycardia. More details on the annotation scheme is detailed in the guidelines 9 and in a forthcoming manuscript. The annotations covered about 336K words. Table 1 shows the quantity of the data and the split across the training, development and test sets as well as in terms of the number of notes and the number of words.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 951, |
| "end": 958, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Each note in the training and development set was annotated by two professional coders trained for this task, followed by an open adjudication step. By the time we reached annotating the test data, the annotators were quite familiar with the annotation and so, in order to save time, we decided to perform a single annotation pass using a senior annotator. This was followed by a correction pass by the same annotator using a checklist of frequent annotation issues faced earlier. Table 2 shows the inter-annotator agreement (IAA) statistics for the adjudicated data. For the disorders we measure the agreement in terms of the F 1 -score as traditional agreement measures such as Cohen's kappa and Krippendorf's alpha are not applicable for measuring agreement for entity mention annotation. We computed agreements between the two annotators as well as between each annotator and the final adjudicated gold standard. The latter is to give a sense of the fraction of corrections made in the process of adjudication. The strict criterion considers two mentions correct if they agree in terms of the class and the exact string, whereas the relaxed criteria considers overlapping strings of the same class as correct. The reason for checking the class is as follows. Although we only use the disorder mention in this task, the corpus has been annotated with some other UMLS types as well and therefore there are instances where a different UMLS type is assigned to the same character span in the text by the second annotator. If exact boundaries are not taken into account then the IAA agreement score is in the mid-90s. For the task of normalization to CUIs, we used accuracy to assess agreement. For the relaxed criterion, all overlapping disorder spans with the same CUI were considered correct. For the strict criterion, only disorder spans with identical spans and the same CUI were considered correct.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 481, |
| "end": 488, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Quality", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The participants were evaluated on the following two tasks:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 Task A -Identification of the character spans of disorder mentions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 Task B -Normalizing disorder mentions to SNOMED-CT subset of UMLS CUIs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "For Task A, participants were instructed to develop a system that predicts the spans for disorder mentions. For Tasks B, participants were instructed to develop a system that predicts the UMLS CUI within the SNOMED-CT vocabulary. The input to Task B were the disorder mention predictions from Task A. Task B was optional. System outputs adhered to the annotation format. Each participant was allowed to submit up to three runs. The en-tire set of unlabeled MIMIC clinical notes (excluding the test notes) were made available to the participants for potential unsupervised approaches to enhance the performance of their systems. They were allowed to use additional annotations in their systems, but this counted towards the total allowable runs; systems that used annotations outside of those provided were evaluated separately. The evaluation for all tasks was conducted using the blind, withheld test data. The participants were provided a training set containing clinical text as well as pre-annotated spans and named entities for disorders (Tasks A and B).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The following evaluation criteria were used:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Criteria", |
| "sec_num": "4" |
| }, |
| { |
| "text": "\u2022 Task A -The system performance was evaluated against the gold standard using the F 1 -score of the Precision and Recall values. There were two variations: (i) Strict; and (ii) Relaxed. The formulae for computing these metrics are mentioned below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Criteria", |
| "sec_num": "4" |
| }, |
| { |
| "text": "P recision = P = D tp D tp + D f p", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Criteria", |
| "sec_num": "4" |
| }, |
| { |
| "text": "(1) in the relaxed case, a span overlapping with the gold standard span was also considered correct.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Criteria", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Recall = R = D tp D tp + D f n", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Evaluation Criteria", |
| "sec_num": "4" |
| }, |
| { |
| "text": "\u2022 Task B -Accuracy was used as the performance measure for Task 1b. It was defined as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Criteria", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Accuracy strict = D tp \u2229 N correct T g", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Evaluation Criteria", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Accuracy", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Criteria", |
| "sec_num": "4" |
| }, |
| { |
| "text": "relaxed = D tp \u2229 N correct D tp (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Criteria", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Where, D tp = Number of true positive disorder mentions with identical spans as in the gold standard; N correct = Number of correctly normalized disorder mentions; and T g = Total number of disorder mentions in the gold standard. For Task B, the systems were only evaluated on annotations they identified in Task A. Relaxed accuracy only measured the ability to normalize correct spans. Therefore, it was possible to obtain very high values for this measure by simply dropping any mention with a low confidence span.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Criteria", |
| "sec_num": "4" |
| }, |
| { |
| "text": "A total of 21 participants from across the world participated in Task A and out of them 18 also participated in Task B. Unfortunately, although interested, the ThinkMiners team (Parikh et al., 2014) could not participate in Task B owing to some UMLS licensing issues. The participating organizations along with the contact user's User ID and their chosen Team ID are mentioned in Table 3 . Eight teams submitted three runs, six submitted two runs and seven submitted just one run. Out of these, only 13 submitted system description papers. We based our analysis on those system descriptions.", |
| "cite_spans": [ |
| { |
| "start": 177, |
| "end": 198, |
| "text": "(Parikh et al., 2014)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 380, |
| "end": 387, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Participants", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Tables 4 and 6 show the performance of the systems on Tasks A and B. None of the systems used any additional annotated data so we did not have to compare them separately. Both tables mention performance of all the different runs that the systems submitted. Given the many variables, we deliberately left the decision on how many and how to define these runs to the individual participant. They used various different ways to differentiate their runs. Some, for example, UTU (Kaewphan et al., 2014) , did it based on the composition of training data, i.e., whether they used just the training data or both the training and the development data for training the final system, which highlighted the fact that adding development data to training bumped the F 1 -score on Task A by about 2 percent points. Some participants, however, did not make use of the development data in training their systems. This was partially due to the fact that we had not explicitly mentioned in the task description that participants were allowed to use the development data for training their final models. In order to be fair, we allowed some users an opportunity to submit runs post evaluation where they used the exact same system that they used for evaluation but used the development data as well. We added a column to the results tables showing whether the participant used only the training data (T) or both training and development data (T+D) for training their system. It can be seen that even though the addition of development data helps, there are still systems that perform in the lower percentile who have used both training and development data for training, indicating that both the features and the machine learning classifier contribute to the models. A novel aspect of the SemEval-2014 shared task that differentiates it from the ShARE/CLEF task-other than the fact that it used more data and a new test set-is the fact that SemEval-2014 allowed the use of a much larger set of unlabeled MIMIC notes to inform the models. Surprisingly, only two of the systems (ULisboa (Leal et al., 2014) and UniPi (Attardi et al., 2014) ) used the unlabeled MIMIC corpus to generalize the lexical features. Another team-UTH CCB (Zhang et al., 2014) -used off-the-shelf Brown clusters 10 as opposed to training them on the unlabeled MIMIC II data. For Task B, the accuracy of a system using the strict metric was positively correlated with its recall on the disorder mentions that were input to it (i.e., recall for Task A), and did not get penalized for lower precision. Therefore one could essentially gain higher accuracy in Task B by tuning a system to provide the highest mention recall in Task A potentially at the cost of precision and the overall F 1 -score and using those mentions as input for Task B. This can be seen from the fact that the run 2 for UTH CCB (Zhang et al., 2014) system with the lowest F 1 -score has the best accuracy for Task B and vice-versa for run 0 with run 1 in between the two. In order to fairly compare the performance between two systems one would have to provide perfect mentions as input to Task B. One of the systems-UWM Ghiasvand and Kate (2014)-did run some ablation experiments using gold standard mentions as input to Task B and obtained a best performance of 89.5F 1 -score (Table 5 of Ghiasvand and Kate (2014) ) as opposed to 62.3 F 1 -score (Table 7) in the more realistic setting which is a huge difference. In the upcoming SemEval-2014 where this same evaluation is going to carried out under Task 14, we plan to perform supplementary evaluation where gold disorder mentions would be input to the system while attempting Task B. An interesting outcome of planning a follow-on evaluation to the ShARe/CLEF eHealth 2013 task was that we could, and did, use the test data from the ShARe/CLEF eHealth 2013 task as the development set for this evaluation. After the main evaluation we asked participants to provide the system performance on the development set using the same number and run convention that they submitted for the main evaluation. These results are presented in Tables 5 and 7 . We have inserted the best performing system score from the ShARe/CLEF eHealth 2013 task in these tables. For Task A, referring to Tables 4 and 5 , there is a boost of 3.7 absolute percent points for the F 1 -score over the same task (Task 1a) in the ShARe/CLEF eHealth 2013. For Task B, referring to Tables 6 and 7 , there is a boost of 13.7 percent points for the F 1 -score over the same task (Task 1b) in the ShARe/CLEF eHealth 2013 evaluation. The participants used various approaches for tackling the tasks, ranging from purely rule-based/unsupervised (RelAgent (Ramanan and Nathan, 2014) , (Matos et al., 2014) , KUL 11 ) to a hybrid of rules and machine learning classifiers. The top performing systems typically used the latter. Various versions of the IOB formulation were used for tagging the disorder mentions. None of the standard variations on the IOB formulation were explicitly designed or used to handle discontiguous mentions. Some systems used novel variations on this approach. Probably the simplest variation was applied by the UWM team (Ghiasvand and Kate, 2014) . In this formulation the following labeled sequence \"the/O left/B atrium/I is/O moderately/O dilated/I\" can be used to represent the discontiguous mention left atrium...dilated, and can be constructed as such from the output of the classification. The most complex variation was the one used by the UTH CCB team (Zhang et al., 2014) where they used the following set of tags-B, I, O, DB, DI, HB, HI. This variation encodes discontiguous mentions by adding four more tags to the I, O and B tags. These are variations of the B and I tags with either a D or a H prefix. The prefix H indicates that the word or word sequence is the shared head, and the prefix D indicates otherwise. Another intermediate approach used by the ULisboa team (Leal et al., 2014) with the tagset-S, B, I, O, E and N. Here, S represents the single token entity to be recognized, E represents the end of an entity (which is part of one of the prior IOB variations) and an N tag to identify non-contiguous mentions. They don't provide an explicit example usage of this tag set in their paper. Yet another variation was used by the SZTE-NLP team (Katona and Farkas, 2014) . This used tags B, I, L, O and U. Here, L is used for the last token similar to E earlier, and U is used for a unit-token mention, similar to S earlier. We believe that the only approach that can distinguish between discontiguous disorders that share the same head word/phrase is the one used by the UTH CCB team (Zhang et al., 2014) . The participants used various machine learning classifiers such as MaxEnt, SVM, CRF in combination with rich syntactic and semantic features to capture the disorder mentions. As mentioned earlier, a few participants used the available unlabeled data and also off-the-shelf clusters to better generalize features. The use of vector space models such as cosine similarities as well as continuous distributed word vector representations was useful in the normalization task. They also availed of tools such as MetaMap and cTakes to generate features as well as candidate CUIs during normalizations.", |
| "cite_spans": [ |
| { |
| "start": 470, |
| "end": 497, |
| "text": "UTU (Kaewphan et al., 2014)", |
| "ref_id": null |
| }, |
| { |
| "start": 2066, |
| "end": 2085, |
| "text": "(Leal et al., 2014)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 2096, |
| "end": 2118, |
| "text": "(Attardi et al., 2014)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 2210, |
| "end": 2230, |
| "text": "(Zhang et al., 2014)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 2851, |
| "end": 2871, |
| "text": "(Zhang et al., 2014)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 3314, |
| "end": 3339, |
| "text": "Ghiasvand and Kate (2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 4690, |
| "end": 4716, |
| "text": "(Ramanan and Nathan, 2014)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 4719, |
| "end": 4739, |
| "text": "(Matos et al., 2014)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 5180, |
| "end": 5206, |
| "text": "(Ghiasvand and Kate, 2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 5520, |
| "end": 5540, |
| "text": "(Zhang et al., 2014)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 5942, |
| "end": 5961, |
| "text": "(Leal et al., 2014)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 6324, |
| "end": 6349, |
| "text": "(Katona and Farkas, 2014)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 6664, |
| "end": 6684, |
| "text": "(Zhang et al., 2014)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 3302, |
| "end": 3310, |
| "text": "(Table 5", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 3372, |
| "end": 3381, |
| "text": "(Table 7)", |
| "ref_id": "TABREF11" |
| }, |
| { |
| "start": 4106, |
| "end": 4120, |
| "text": "Tables 5 and 7", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 4232, |
| "end": 4267, |
| "text": "Task A, referring to Tables 4 and 5", |
| "ref_id": "TABREF7" |
| }, |
| { |
| "start": 4402, |
| "end": 4437, |
| "text": "Task B, referring to Tables 6 and 7", |
| "ref_id": "TABREF10" |
| } |
| ], |
| "eq_spans": [], |
| "section": "System Results", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We have created a reference standard with high inter-annotator agreement and evaluated systems on the task of identification and normalization of diseases and disorders appearing in clinical reports. The results have demonstrated that an NLP system can complete this task with reasonably high accuracy. We plan to annotate another evaluation using the same data as part of the in the SemEval-2015, Task 14 12 adding another task of template filling where the systems will identify and normalize ten attributes the identified disease/disorder mentions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "http://share.healthnlp.org 2 https://sites.google.com/site/shareclefehealth/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.i2b2.org 5 https://uts.nlm.nih.gov/home.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The course was available free of charge on the Internet, for example,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Personal conversation with the participants as it was not very clear in the system description paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Personal communication with participant.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://alt.qcri.org/semeval2015/task14", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We greatly appreciate the hard work and feedback of our program committee members and annotators David Harris, Jennifer Green and Glenn Zaramba. Danielle Mowery, Sumithra Velupillai and Brett South for helping prepare the manuscript by summarizing the approaches used by various systems. This shared task was partially supported by Shared Annotated Resources (ShARe) project NIH 5R01GM090187 and Temporal Histories of Your Medical Events (THYME) project (NIH R01LM010090 and U54LM008748).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "UniPi: Recognition of mentions of disorders in clinical text", |
| "authors": [ |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Attardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Vitoria", |
| "middle": [], |
| "last": "Cozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniele", |
| "middle": [], |
| "last": "Sartiano", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the International Workshop on Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Giuseppe Attardi, Vitoria Cozza, and Daniele Sartiano. 2014. UniPi: Recognition of mentions of disorders in clinical text. In Proceedings of the International Workshop on Semantic Evaluations, Dublin, Ireland, August.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Exploring semantic groups through visual approaches", |
| "authors": [ |
| { |
| "first": "Olivier", |
| "middle": [], |
| "last": "Bodenreider", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexa", |
| "middle": [], |
| "last": "Mccray", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Biomedical Informatics", |
| "volume": "36", |
| "issue": "", |
| "pages": "414--432", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Olivier Bodenreider and Alexa McCray. 2003. Ex- ploring semantic groups through visual approaches. Journal of Biomedical Informatics, 36:414-432.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The Unified Medical Language System: Towards a collaborative approach for solving terminologic problems", |
| "authors": [ |
| { |
| "first": "Keith", |
| "middle": [ |
| "E" |
| ], |
| "last": "Campbell", |
| "suffix": "" |
| }, |
| { |
| "first": "Diane", |
| "middle": [ |
| "E" |
| ], |
| "last": "Oliver", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [ |
| "H" |
| ], |
| "last": "Shortliffe", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "J Am Med Inform Assoc", |
| "volume": "5", |
| "issue": "1", |
| "pages": "12--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Keith E. Campbell, Diane E. Oliver, and Edward H. Shortliffe. 1998. The Unified Medical Language System: Towards a collaborative approach for solv- ing terminologic problems. J Am Med Inform Assoc, 5(1):12-16.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "UWM: Disorder mention extraction from clinical text using crfs and normalization using learned edit distance patterns", |
| "authors": [ |
| { |
| "first": "Omid", |
| "middle": [], |
| "last": "Ghiasvand", |
| "suffix": "" |
| }, |
| { |
| "first": "Rohit", |
| "middle": [ |
| "J" |
| ], |
| "last": "Kate", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the International Workshop on Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omid Ghiasvand and Rohit J. Kate. 2014. UWM: Dis- order mention extraction from clinical text using crfs and normalization using learned edit distance pat- terns. In Proceedings of the International Workshop on Semantic Evaluations, Dublin, Ireland, August.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "UTU: Disease mention recognition and normalization with crfs and vector space representations", |
| "authors": [ |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Suwisa Kaewphan", |
| "suffix": "" |
| }, |
| { |
| "first": "Filip", |
| "middle": [], |
| "last": "Hakaka1", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ginter", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the International Workshop on Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Suwisa Kaewphan, Kai Hakaka1, and Filip Ginter. 2014. UTU: Disease mention recognition and nor- malization with crfs and vector space representa- tions. In Proceedings of the International Workshop on Semantic Evaluations, Dublin, Ireland, August.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "SZTE-NLP: Clinical text analysis with named entity recognition", |
| "authors": [ |
| { |
| "first": "Melinda", |
| "middle": [], |
| "last": "Katona", |
| "suffix": "" |
| }, |
| { |
| "first": "Rich\u00e1rd", |
| "middle": [], |
| "last": "Farkas", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the International Workshop on Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Melinda Katona and Rich\u00e1rd Farkas. 2014. SZTE- NLP: Clinical text analysis with named entity recog- nition. In Proceedings of the International Work- shop on Semantic Evaluations, Dublin, Ireland, Au- gust.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "ULisboa: Identification and classification of medical concepts", |
| "authors": [ |
| { |
| "first": "Andr\u00e9", |
| "middle": [], |
| "last": "Leal", |
| "suffix": "" |
| }, |
| { |
| "first": "Diogo", |
| "middle": [], |
| "last": "Gon\u00e7alves", |
| "suffix": "" |
| }, |
| { |
| "first": "Bruno", |
| "middle": [], |
| "last": "Martins", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [ |
| "M" |
| ], |
| "last": "Couto", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the International Workshop on Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andr\u00e9 Leal, Diogo Gon\u00e7alves, Bruno Martins, and Francisco M. Couto. 2014. ULisboa: Identifica- tion and classification of medical concepts. In Pro- ceedings of the International Workshop on Semantic Evaluations, Dublin, Ireland, August.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Banner: an executable survey of advances in biomedical named entity recognition", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Leaman", |
| "suffix": "" |
| }, |
| { |
| "first": "Graciela", |
| "middle": [], |
| "last": "Gonzalez", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Pacific Symposium on Biocomputing", |
| "volume": "13", |
| "issue": "", |
| "pages": "652--663", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert Leaman and Graciela Gonzalez. 2008. Ban- ner: an executable survey of advances in biomedical named entity recognition. In Pacific Symposium on Biocomputing, volume 13, pages 652-663.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "BioinformaticsUA: Concept recognition in clinical narratives using a modular and highly efficient text processing framework", |
| "authors": [ |
| { |
| "first": "S\u00e9rgio", |
| "middle": [], |
| "last": "Matos", |
| "suffix": "" |
| }, |
| { |
| "first": "Tiago", |
| "middle": [], |
| "last": "Nunes", |
| "suffix": "" |
| }, |
| { |
| "first": "Jos\u00e9 Lu\u00eds", |
| "middle": [], |
| "last": "Oliveira", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the International Workshop on Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S\u00e9rgio Matos, Tiago Nunes, and Jos\u00e9 Lu\u00eds Oliveira. 2014. BioinformaticsUA: Concept recognition in clinical narratives using a modular and highly ef- ficient text processing framework. In Proceedings of the International Workshop on Semantic Evalua- tions, Dublin, Ireland, August.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Automated disambiguation of acronyms and abbreviations in clinical texts: Window and training size considerations", |
| "authors": [ |
| { |
| "first": "Sungrim", |
| "middle": [], |
| "last": "Moon", |
| "suffix": "" |
| }, |
| { |
| "first": "Serguei", |
| "middle": [], |
| "last": "Pakhomov", |
| "suffix": "" |
| }, |
| { |
| "first": "Genevieve", |
| "middle": [ |
| "B" |
| ], |
| "last": "Melton", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "AMIA Annu Symp Proc", |
| "volume": "", |
| "issue": "", |
| "pages": "1310--1319", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sungrim Moon, Serguei Pakhomov, and Genevieve B Melton. 2012. Automated disambiguation of acronyms and abbreviations in clinical texts: Win- dow and training size considerations. In AMIA Annu Symp Proc, pages 1310-1319.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A survey of named entity recognition and classification", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Nadeau", |
| "suffix": "" |
| }, |
| { |
| "first": "Satoshi", |
| "middle": [], |
| "last": "Sekine", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Lingvisticae Investigationes", |
| "volume": "30", |
| "issue": "", |
| "pages": "3--26", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Nadeau and Satoshi Sekine. 2007. A sur- vey of named entity recognition and classification. Lingvisticae Investigationes, 30(1):3-26.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Word sense disambiguation", |
| "authors": [ |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "ACM Computing Surveys", |
| "volume": "41", |
| "issue": "2", |
| "pages": "1--69", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roberto Navigli. 2009. Word sense disambiguation. ACM Computing Surveys, 41(2):1-69, February.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "ThinkMiners: SemEval-2014 task 7: Analysis of clinical text", |
| "authors": [ |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Pvs", |
| "middle": [], |
| "last": "Avinesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Joy", |
| "middle": [], |
| "last": "Mustafi", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the International Workshop on Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankur Parikh, Avinesh PVS, Joy Mustafi, Lalit Agar- walla, and Ashish Mungi. 2014. ThinkMiners: SemEval-2014 task 7: Analysis of clinical text. In Proceedings of the International Workshop on Se- mantic Evaluations, Dublin, Ireland, August.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Task 1: ShARe/CLEF eHealth Evaluation Lab", |
| "authors": [ |
| { |
| "first": "No\u00e9mie", |
| "middle": [], |
| "last": "Sameer Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Brett", |
| "middle": [], |
| "last": "Elhadad", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "South", |
| "suffix": "" |
| }, |
| { |
| "first": "Lee", |
| "middle": [], |
| "last": "Martinez", |
| "suffix": "" |
| }, |
| { |
| "first": "Amy", |
| "middle": [], |
| "last": "Christensen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanna", |
| "middle": [], |
| "last": "Vogel", |
| "suffix": "" |
| }, |
| { |
| "first": "Wendy", |
| "middle": [ |
| "W" |
| ], |
| "last": "Suominen", |
| "suffix": "" |
| }, |
| { |
| "first": "Guergana", |
| "middle": [], |
| "last": "Chapman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Savova", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Working Notes of CLEF eHealth Evaluation Labs", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sameer Pradhan, No\u00e9mie Elhadad, Brett South, David Martinez, Lee Christensen, Amy Vogel, Hanna Suominen, Wendy W. Chapman, and Guergana Savova. 2013. Task 1: ShARe/CLEF eHealth Evaluation Lab 2013. In Working Notes of CLEF eHealth Evaluation Labs.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Evaluating the state of the art in disorder recognition and normalization of the clinical narrative", |
| "authors": [ |
| { |
| "first": "No\u00e9mie", |
| "middle": [], |
| "last": "Sameer Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Brett", |
| "middle": [], |
| "last": "Elhadad", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "South", |
| "suffix": "" |
| }, |
| { |
| "first": "Lee", |
| "middle": [], |
| "last": "Martinez", |
| "suffix": "" |
| }, |
| { |
| "first": "Amy", |
| "middle": [], |
| "last": "Christensen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanna", |
| "middle": [], |
| "last": "Vogel", |
| "suffix": "" |
| }, |
| { |
| "first": "Wendy", |
| "middle": [ |
| "W" |
| ], |
| "last": "Suominen", |
| "suffix": "" |
| }, |
| { |
| "first": "Guergana", |
| "middle": [], |
| "last": "Chapman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Savova", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "In Journal of the American Medical Informatics Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sameer Pradhan, No\u00e9mie Elhadad, Brett South, David Martinez, Lee Christensen, Amy Vogel, Hanna Suominen, Wendy W. Chapman, and Guergana Savova. 2014. Evaluating the state of the art in disorder recognition and normalization of the clin- ical narrative. In Journal of the American Medical Informatics Association (to appear).", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "RelAgent: Entity detection and normalization for diseases in clinical records: a linguistically driven approach", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "V" |
| ], |
| "last": "Ramanan", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Nathan", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the International Workshop on Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. V. Ramanan and P. Senthil Nathan. 2014. RelA- gent: Entity detection and normalization for diseases in clinical records: a linguistically driven approach. In Proceedings of the International Workshop on Se- mantic Evaluations, Dublin, Ireland, August.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Building a semantically annotated corpus of clinical texts", |
| "authors": [ |
| { |
| "first": "Angus", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Gaizauskas", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Hepple", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Demetriou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yikun", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Setzer", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "J Biomed Inform", |
| "volume": "42", |
| "issue": "5", |
| "pages": "950--66", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Angus Roberts, Robert Gaizauskas, Mark Hepple, George Demetriou, Yikun Guo, Ian Roberts, and Andrea Setzer. 2009. Building a semantically an- notated corpus of clinical texts. J Biomed Inform, 42(5):950-66.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "MIMIC II: a massive temporal ICU patient database to support research in intelligent patient monitoring", |
| "authors": [ |
| { |
| "first": "Mohammed", |
| "middle": [], |
| "last": "Saeed", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Lieu", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Raber", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "G" |
| ], |
| "last": "Mark", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Comput Cardiol", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammed Saeed, C. Lieu, G. Raber, and R.G. Mark. 2002. MIMIC II: a massive temporal ICU patient database to support research in intelligent patient monitoring. Comput Cardiol, 29.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Word sense disambiguation across two domains: Biomedical literature and clinical notes", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Guergana", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "R" |
| ], |
| "last": "Savova", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [ |
| "L" |
| ], |
| "last": "Coden", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Sominsky", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "V" |
| ], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "C" |
| ], |
| "last": "Ogren", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "G" |
| ], |
| "last": "De Groen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chute", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "J Biomed Inform", |
| "volume": "41", |
| "issue": "6", |
| "pages": "1088--1100", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guergana K. Savova, A. R. Coden, I. L. Sominsky, R. Johnson, P. V. Ogren, P. C. de Groen, and C. G. Chute. 2008. Word sense disambiguation across two domains: Biomedical literature and clinical notes. J Biomed Inform, 41(6):1088-1100, Decem- ber.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Evaluating temporal relations in clinical text: 2012 i2b2 Challenge", |
| "authors": [ |
| { |
| "first": "Weiyi", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rumshisky", |
| "suffix": "" |
| }, |
| { |
| "first": "Uzuner", |
| "middle": [], |
| "last": "And\u00f6zlem", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Journal of the American Medical Informatics Association", |
| "volume": "20", |
| "issue": "5", |
| "pages": "806--819", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Weiyi Sun, Anna Rumshisky, and\u00d6zlem Uzuner. 2013. Evaluating temporal relations in clinical text: 2012 i2b2 Challenge. Journal of the American Med- ical Informatics Association, 20(5):806-13.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Overview of the ShARe/CLEF eHealth evaluation lab 2013", |
| "authors": [ |
| { |
| "first": "Hanna", |
| "middle": [], |
| "last": "Suominen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanna", |
| "middle": [], |
| "last": "Salanter\u00e4", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumithra", |
| "middle": [], |
| "last": "Velupillai", |
| "suffix": "" |
| }, |
| { |
| "first": "Wendy", |
| "middle": [ |
| "W" |
| ], |
| "last": "Chapman", |
| "suffix": "" |
| }, |
| { |
| "first": "Guergana", |
| "middle": [], |
| "last": "Savova", |
| "suffix": "" |
| }, |
| { |
| "first": "Noemie", |
| "middle": [], |
| "last": "Elhadad", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Brett", |
| "middle": [ |
| "R" |
| ], |
| "last": "South", |
| "suffix": "" |
| }, |
| { |
| "first": "Danielle", |
| "middle": [ |
| "L" |
| ], |
| "last": "Mowery", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "F" |
| ], |
| "last": "Gareth", |
| "suffix": "" |
| }, |
| { |
| "first": "Johannes", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Liadh", |
| "middle": [], |
| "last": "Leveling", |
| "suffix": "" |
| }, |
| { |
| "first": "Lorraine", |
| "middle": [], |
| "last": "Kelly", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Goeuriot", |
| "suffix": "" |
| }, |
| { |
| "first": "Guido", |
| "middle": [], |
| "last": "Martinez", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zuccon", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Working Notes of CLEF eHealth Evaluation Labs", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hanna Suominen, Sanna Salanter\u00e4, Sumithra Velupil- lai, Wendy W. Chapman, Guergana Savova, Noemie Elhadad, Sameer Pradhan, Brett R. South, Danielle L. Mowery, Gareth J. F. Jones, Johannes Leveling, Liadh Kelly, Lorraine Goeuriot, David Martinez, and Guido Zuccon. 2013. Overview of the ShARe/CLEF eHealth evaluation lab 2013. In Working Notes of CLEF eHealth Evaluation Labs.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "i2b2/VA challenge on concepts, assertions, and relations in clinical text", |
| "authors": [ |
| { |
| "first": "Ozlem", |
| "middle": [], |
| "last": "Uzuner", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Brett", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuying", |
| "middle": [], |
| "last": "South", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott L", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Duvall", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Journal of the American Medical Informatics Association", |
| "volume": "18", |
| "issue": "5", |
| "pages": "552--556", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ozlem Uzuner, Brett R South, Shuying Shen, and Scott L DuVall. 2011. 2010 i2b2/VA challenge on concepts, assertions, and relations in clinical text. Journal of the American Medical Informatics Asso- ciation, 18(5):552-556.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Evaluating the state of the art in coreference resolution for electronic medical records", |
| "authors": [ |
| { |
| "first": "Ozlem", |
| "middle": [], |
| "last": "Uzuner", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreea", |
| "middle": [], |
| "last": "Bodnari", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuying", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Tyler", |
| "middle": [], |
| "last": "Forbush", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Pestian", |
| "suffix": "" |
| }, |
| { |
| "first": "Brett R", |
| "middle": [], |
| "last": "South", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "American Medical Informatics Association", |
| "volume": "19", |
| "issue": "5", |
| "pages": "786--791", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ozlem Uzuner, Andreea Bodnari, Shuying Shen, Tyler Forbush, John Pestian, and Brett R South. 2012. Evaluating the state of the art in coreference res- olution for electronic medical records. Jour- nal of American Medical Informatics Association, 19(5):786-791, September.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "UTH CCB: A report for SemEval 2014 task 7 analysis of clinical text", |
| "authors": [ |
| { |
| "first": "Yaoyun", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingqi", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Buzhou", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yukun", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the International Workshop on Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yaoyun Zhang, Jingqi Wang, Buzhou Tang, Yonghui Wu, Min Jiang, Yukun Chen, and Hua Xu. 2014. UTH CCB: A report for SemEval 2014 task 7 anal- ysis of clinical text. In Proceedings of the Interna- tional Workshop on Semantic Evaluations, Dublin, Ireland, August.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "text": "Distribution of data in terms of notes and disorder mentions across the training, development and test sets. The disorders are further split according to two criteria -whether they map to a CUI or whether they are contiguous.", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF4": { |
| "text": "Inter-annotator (A1 and A2) and gold standard (GS) agreement as F 1 -score for the Disorder mentions and their normalization to the UMLS CUI.", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>Institution</td><td>User ID</td><td>Team ID</td></tr><tr><td>University of Pisa, Italy</td><td>attardi</td><td>UniPI</td></tr><tr><td>University of Lisbon, Portugal</td><td>francisco</td><td>ULisboa</td></tr><tr><td>University of Wisconsin, Milwaukee, USA</td><td>ghiasvand</td><td>UWM</td></tr><tr><td>University of Colorado, Boulder, USA</td><td>gung</td><td>CLEAR</td></tr><tr><td>University of Guadalajara, Mexico</td><td>herrera</td><td>UG</td></tr><tr><td>Taipei Medical University, Taiwan</td><td>hjdai</td><td>TMU</td></tr><tr><td>University of Turku, Finland</td><td>kaewphan</td><td>UTU</td></tr><tr><td>University of Szeged, Hungary</td><td>katona</td><td>SZTE-NLP</td></tr><tr><td>Queensland University of Queensland, Australia</td><td>kholghi</td><td>QUT AEHRC</td></tr><tr><td>KU Leuven, Belgium</td><td>kolomiyets</td><td>KUL</td></tr><tr><td>Universidade de Aveiro, Portugal</td><td>nunes</td><td>BioinformaticsUA</td></tr><tr><td>University of the Basque Country, Spain</td><td>oronoz</td><td>IxaMed</td></tr><tr><td>IBM, India</td><td>parikh</td><td>ThinkMiners</td></tr><tr><td>easy data intelligence, India</td><td>pathak</td><td>ezDI</td></tr><tr><td>RelAgent Tech Pvt. Ltd., India</td><td>ramanan</td><td>RelAgent</td></tr><tr><td>Universidad Nacional de Colombia, Colombia</td><td>riveros</td><td>MindLab-UNAL</td></tr><tr><td>IIT Patna, India</td><td>sikdar</td><td>IITP</td></tr><tr><td>University of North Texas, USA</td><td>solomon</td><td>UNT</td></tr><tr><td>University of Illinois at Urbana Champaign, USA</td><td>upadhya</td><td>CogComp</td></tr><tr><td>The University of Texas Health Science Center at Houston, USA</td><td>wu</td><td>UTH CCB</td></tr><tr><td>East China Normal University, China</td><td>yi</td><td>ECNU</td></tr></table>" |
| }, |
| "TABREF5": { |
| "text": "Participant organization and the respective User IDs and Team IDs.", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF7": { |
| "text": "Performance on test data for participating systems on Task A -Identification of disorder mentions.", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td/><td/><td/><td/><td>Task A</td><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td>Strict</td><td/><td/><td>Relaxed</td><td/><td/></tr><tr><td>Team ID</td><td>User ID</td><td>Run</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td><td>Data</td></tr><tr><td/><td/><td/><td>(%)</td><td>(%)</td><td>(%)</td><td>(%)</td><td>(%)</td><td>(%)</td><td/></tr><tr><td>hjdai</td><td>TMU</td><td>1</td><td>0.687</td><td>0.922</td><td>0.787</td><td>0.952</td><td>1.000</td><td>0.975</td><td>T</td></tr><tr><td>wu</td><td>UTH CCB</td><td>0</td><td>0.877</td><td>0.710</td><td>0.785</td><td>0.962</td><td>0.789</td><td>0.867</td><td>T</td></tr><tr><td>wu</td><td>UTH CCB</td><td>1</td><td>0.828</td><td>0.747</td><td>0.785</td><td>0.941</td><td>0.853</td><td>0.895</td><td>T</td></tr><tr><td colspan=\"3\">Best ShARe/CLEF-2013 performance</td><td>0.800</td><td>0.706</td><td>0.750</td><td>0.925</td><td>0.827</td><td>0.873</td><td>T</td></tr><tr><td>ghiasvand</td><td>UWM</td><td>0</td><td>0.827</td><td>0.675</td><td>0.743</td><td>0.958</td><td>0.799</td><td>0.871</td><td>T</td></tr><tr><td>pathak</td><td>ezDI</td><td>0</td><td>0.813</td><td>0.670</td><td>0.734</td><td>0.954</td><td>0.800</td><td>0.870</td><td>T</td></tr><tr><td>pathak</td><td>ezDI</td><td>1</td><td>0.809</td><td>0.667</td><td>0.732</td><td>0.954</td><td>0.801</td><td>0.871</td><td>T</td></tr><tr><td>wu</td><td>UTH CCB</td><td>2</td><td>0.657</td><td>0.790</td><td>0.717</td><td>0.806</td><td>0.893</td><td>0.847</td><td>T</td></tr><tr><td>francisco</td><td>ULisboa</td><td>1</td><td>0.803</td><td>0.646</td><td>0.716</td><td>0.954</td><td>0.781</td><td>0.858</td><td>T</td></tr><tr><td>francisco</td><td>ULisboa</td><td>2</td><td>0.803</td><td>0.646</td><td>0.716</td><td>0.954</td><td>0.781</td><td>0.858</td><td>T</td></tr><tr><td>francisco</td><td>ULisboa</td><td>0</td><td>0.796</td><td>0.642</td><td>0.711</td><td>0.959</td><td>0.793</td><td>0.868</td><td>T</td></tr><tr><td>oronoz</td><td>IxaMed</td><td>0</td><td>0.766</td><td>0.650</td><td>0.703</td><td>0.936</td><td>0.752</td><td>0.834</td><td>T</td></tr><tr><td>oronoz</td><td>IxaMed</td><td>1</td><td>0.660</td><td>0.721</td><td>0.689</td><td>0.899</td><td>0.842</td><td>0.870</td><td>T</td></tr><tr><td>hjdai</td><td>TMU</td><td>0</td><td>0.667</td><td>0.414</td><td>0.511</td><td>0.912</td><td>0.591</td><td>0.717</td><td>T</td></tr><tr><td>sikdar</td><td>IITP</td><td>0</td><td>0.525</td><td>0.430</td><td>0.473</td><td>0.862</td><td>0.726</td><td>0.788</td><td>T</td></tr><tr><td>sikdar</td><td>IITP</td><td>2</td><td>0.467</td><td>0.440</td><td>0.453</td><td>0.812</td><td>0.775</td><td>0.793</td><td>T</td></tr><tr><td>sikdar</td><td>IITP</td><td>1</td><td>0.493</td><td>0.410</td><td>0.448</td><td>0.828</td><td>0.706</td><td>0.762</td><td>T</td></tr></table>" |
| }, |
| "TABREF8": { |
| "text": "", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF10": { |
| "text": "", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td colspan=\"6\">: Performance on test data for participat-</td></tr><tr><td colspan=\"6\">ing systems on Task B -Normalization of disorder</td></tr><tr><td colspan=\"6\">mentions to UMLS (SNOMED-CT subset) CUIs.</td></tr><tr><td/><td/><td>Task B</td><td/><td/><td/></tr><tr><td/><td/><td/><td>Strict</td><td>Relaxed</td><td/></tr><tr><td>Team ID</td><td>User ID</td><td>Run</td><td>Acc.</td><td>Acc.</td><td>Data</td></tr><tr><td/><td/><td/><td>(%)</td><td>(%)</td><td/></tr><tr><td>TMU</td><td>hjdai</td><td>0</td><td>0.716</td><td>0.777</td><td>T</td></tr><tr><td>TMU</td><td>hjdai</td><td>1</td><td>0.716</td><td>0.777</td><td>T</td></tr><tr><td>UTH CCB</td><td>wu</td><td>2</td><td>0.713</td><td>0.903</td><td>T</td></tr><tr><td>UTH CCB</td><td>wu</td><td>1</td><td>0.680</td><td>0.910</td><td>T</td></tr><tr><td>UTH CCB</td><td>wu</td><td>0</td><td>0.647</td><td>0.910</td><td>T</td></tr><tr><td>UWM</td><td>ghiasvand</td><td>0</td><td>0.623</td><td>0.923</td><td>T</td></tr><tr><td>ezDI</td><td>pathak</td><td>0</td><td>0.603</td><td>0.900</td><td>T</td></tr><tr><td>ezDI</td><td>pathak</td><td>1</td><td>0.600</td><td>0.899</td><td>T</td></tr><tr><td colspan=\"3\">Best ShARe/CLEF-2013 performance</td><td>0.589</td><td>0.895</td><td>T</td></tr><tr><td>IxaMed</td><td>oronoz</td><td>0</td><td>0.556</td><td>0.855</td><td>T</td></tr><tr><td>IxaMed</td><td>oronoz</td><td>1</td><td>0.421</td><td>0.584</td><td>T</td></tr><tr><td>ULisboa</td><td>francisco</td><td>2</td><td>0.388</td><td>0.601</td><td>T</td></tr><tr><td>ULisboa</td><td>francisco</td><td>1</td><td>0.385</td><td>0.596</td><td>T</td></tr><tr><td>ULisboa</td><td>francisco</td><td>0</td><td>0.377</td><td>0.588</td><td>T</td></tr><tr><td>IITP</td><td>sikdar</td><td>2</td><td>0.318</td><td>0.724</td><td>T</td></tr><tr><td>IITP</td><td>sikdar</td><td>0</td><td>0.312</td><td>0.725</td><td>T</td></tr><tr><td>IITP</td><td>sikdar</td><td>1</td><td>0.299</td><td>0.730</td><td>T</td></tr></table>" |
| }, |
| "TABREF11": { |
| "text": "", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>: Performance on development data</td></tr><tr><td>for some participating systems on Task B -</td></tr><tr><td>Normalization of disorder mentions to UMLS</td></tr><tr><td>(SNOMED-CT subset) CUIs.</td></tr></table>" |
| } |
| } |
| } |
| } |