| { |
| "paper_id": "N06-1009", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:45:46.004706Z" |
| }, |
| "title": "Role of Local Context in Automatic Deidentification of Ungrammatical, Fragmented Text", |
| "authors": [ |
| { |
| "first": "Tawanda", |
| "middle": [], |
| "last": "Sibanda", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "CSAIL Massachusetts Institute of Technology Cambridge", |
| "location": { |
| "postCode": "02139", |
| "region": "MA" |
| } |
| }, |
| "email": "tawanda@mit.edu" |
| }, |
| { |
| "first": "Ozlem", |
| "middle": [], |
| "last": "Uzuner", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "ouzuner@albany.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Deidentification of clinical records is a crucial step before these records can be distributed to non-hospital researchers. Most approaches to deidentification rely heavily on dictionaries and heuristic rules; these approaches fail to remove most personal health information (PHI) that cannot be found in dictionaries. They also can fail to remove PHI that is ambiguous between PHI and non-PHI. Named entity recognition (NER) technologies can be used for deidentification. Some of these technologies exploit both local and global context of a word to identify its entity type. When documents are grammatically written, global context can improve NER. In this paper, we show that we can deidentify medical discharge summaries using support vector machines that rely on a statistical representation of local context. We compare our approach with three different systems. Comparison with a rulebased approach shows that a statistical representation of local context contributes more to deidentification than dictionaries and hand-tailored heuristics. Comparison with two well-known systems, SNoW and IdentiFinder, shows that when the language of documents is fragmented, local context contributes more to deidentification than global context.", |
| "pdf_parse": { |
| "paper_id": "N06-1009", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Deidentification of clinical records is a crucial step before these records can be distributed to non-hospital researchers. Most approaches to deidentification rely heavily on dictionaries and heuristic rules; these approaches fail to remove most personal health information (PHI) that cannot be found in dictionaries. They also can fail to remove PHI that is ambiguous between PHI and non-PHI. Named entity recognition (NER) technologies can be used for deidentification. Some of these technologies exploit both local and global context of a word to identify its entity type. When documents are grammatically written, global context can improve NER. In this paper, we show that we can deidentify medical discharge summaries using support vector machines that rely on a statistical representation of local context. We compare our approach with three different systems. Comparison with a rulebased approach shows that a statistical representation of local context contributes more to deidentification than dictionaries and hand-tailored heuristics. Comparison with two well-known systems, SNoW and IdentiFinder, shows that when the language of documents is fragmented, local context contributes more to deidentification than global context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Medical discharge summaries contain information that is useful to clinical researchers who study the interactions between, for example, different medications and diseases. However, these summaries include explicit personal health information (PHI) whose release would jeopardize privacy. In the United States, the Health Information Portability and Accountability Act (HIPAA) provides guidelines for protecting the confidentiality of health care information. HIPAA lists seventeen pieces of textual PHI of which the following appear in medical discharge summaries: first and last names of patients, their health proxies, and family members; doctors' first and last names; identification numbers; telephone, fax, and pager numbers; hospital names; geographic locations; and dates. Removing PHI from medical documents is the goal of deidentification. This paper presents a method based on a statistical representation of local context for automatically removing explicit PHI from medical discharge summaries, despite the often ungrammatical, fragmented, and ad hoc language of these documents, even when some words in the documents are ambiguous between PHI and non-PHI (e.g., \"Huntington\" as the name of a person and as the name of a disease), and even when some of the PHI cannot be found in dictionaries (e.g., misspelled and/or foreign names). This method differs from traditional approaches to deidentification in its independence from dictionaries and hand-tailored heuristics. It applies statistical named entity recognition (NER) methods to the more challenging task of deidenti-fication but differs from traditional NER approaches in its heavy reliance on a statistical representation of local context. Finally, this approach targets all PHI that appear in medical discharge summaries. Experiments reported in this paper show that context plays a more important role in deidentification than dictionaries, and that a statistical representation of local context contributes more to deidentification than global context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In the literature, named entities such as people, places, and organizations mentioned in news articles have been successfully identified by various approaches (Bikel et al., 1999; McCallum et al., 2000; Riloff and Jones, 1996; Collins and Singer, 1999; Hobbs et al., 1996) . Most of these approaches are tailored to a particular domain, e.g., understanding disaster news; they exploit both the characteristics of the entities they focus on and the contextual clues related to these entities.", |
| "cite_spans": [ |
| { |
| "start": 159, |
| "end": 179, |
| "text": "(Bikel et al., 1999;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 180, |
| "end": 202, |
| "text": "McCallum et al., 2000;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 203, |
| "end": 226, |
| "text": "Riloff and Jones, 1996;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 227, |
| "end": 252, |
| "text": "Collins and Singer, 1999;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 253, |
| "end": 272, |
| "text": "Hobbs et al., 1996)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In the biomedical domain, NER has focused on identification of biological entities such as genes and proteins (Collier et al., 2000; Yu et al., 2002) . Various statistical approaches, e.g., a maximum entropy model (Finkel et al., 2004) , HMMs and SVMs (GuoDong et al., 2005) , have been used with various feature sets including surface and syntactic features, word formation patterns, morphological patterns, part-of-speech tags, head noun triggers, and coreferences.", |
| "cite_spans": [ |
| { |
| "start": 110, |
| "end": 132, |
| "text": "(Collier et al., 2000;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 133, |
| "end": 149, |
| "text": "Yu et al., 2002)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 214, |
| "end": 235, |
| "text": "(Finkel et al., 2004)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 238, |
| "end": 274, |
| "text": "HMMs and SVMs (GuoDong et al., 2005)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Deidentification refers to the removal of identifying information from records. Some approaches to deidentification have focused on particular categories of PHI, e.g., Taira et al. focused on only patient names (2002), Thomas et al. focused on proper names including doctors' names (2002) . For full deidentification, i.e., removal of all PHI, Gupta et al. used \"a complex set of rules, dictionaries, pattern-matching algorithms, and Unified Medical Language System\" (2004). Sweeney's Scrub system employed competing algorithms that used patterns and lexicons to find PHI. Each of the algorithms included in her system specialized in one kind of PHI, each calculated the probability that a given word belonged to the class of PHI that it specialized in, and the algorithm with the highest prece-dence and the highest probability labelled the given word. This system identified 99-100% of all PHI in the test corpus of patient records and letters to physicians (1996) .", |
| "cite_spans": [ |
| { |
| "start": 282, |
| "end": 288, |
| "text": "(2002)", |
| "ref_id": null |
| }, |
| { |
| "start": 960, |
| "end": 966, |
| "text": "(1996)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We use a variety of features to train a support vector machine (SVM) that can automatically extract local context cues and can recognize PHI (even when some PHI are ambiguous between PHI and non-PHI, and even when PHI do not appear in dictionaries). We compare this approach with three others: a heuristic rule-based approach (Douglass, 2005) , the SNoW (Sparse Network of Winnows) system's NER component (Roth and Yih, 2002) , and IdentiFinder (Bikel et al., 1999) . The heuristic rulebased system relies heavily on dictionaries. SNoW and IdentiFinder consider some representation of the local context of words; they also rely on information about global context. Local context helps them recognize stereotypical names and name structures. Global context helps these systems update the probability of observing a particular entity type based on the other entity types contained in the sentence. We hypothesize that, given the mostly fragmented and ungrammatical nature of discharge summaries, local context will be more important for deidentification than global context. We further hypothesize that local context will be a more reliable indication of PHI than dictionaries (which can be incomplete). The results presented in this paper show that SVMs trained with a statistical representation of local context outperform all baselines. In other words, a classifier that relies heavily on local context (very little on dictionaries, and not at all on global context) outperforms classifiers that rely either on global context or dictionaries (but make much less use of local context). Global context cannot contribute much to deidentification when the language of documents is fragmented; dictionaries cannot contribute to deidentification when PHI are either missing from dictionaries or are ambiguous between PHI and non-PHI. Local context remains a reliable indication of PHI under these circumstances.", |
| "cite_spans": [ |
| { |
| "start": 326, |
| "end": 342, |
| "text": "(Douglass, 2005)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 405, |
| "end": 425, |
| "text": "(Roth and Yih, 2002)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 445, |
| "end": 465, |
| "text": "(Bikel et al., 1999)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The features used for our SVM-based system can be enriched in order to automatically acquire more and varied local context information. The features discussed in this paper have been chosen because of their simplicity and effectiveness on both grammatical and ungrammatical free text.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Discharge summaries are the reports generated by medical personnel at the end of a patient's hospital stay and contain important information about the patient's health. Linguistic processing of these documents is challenging, mainly because these reports are full of medical jargon, acronyms, shorthand notations, misspellings, ad hoc language, and fragments of sentences. Our goal is to identify the PHI used in discharge summaries even when text is fragmented and ad hoc, even when many words in the summaries are ambiguous between PHI and non-PHI, and even when many PHI contain misspelled or foreign words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpora", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this study, we worked with various corpora consisting of discharge summaries. One of these corpora was obtained already deidentified 1 ; i.e., (many) PHI (and some non-PHI) found in this corpus had been replaced with the generic placeholder [REMOVED] . An excerpt from this corpus is below:", |
| "cite_spans": [ |
| { |
| "start": 244, |
| "end": 253, |
| "text": "[REMOVED]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpora", |
| "sec_num": "3" |
| }, |
| { |
| "text": "HISTORY OF PRESENT ILLNESS: The patient is a 77-year-old-woman with long standing hypertension who presented as a Walk-in to me at the ...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpora", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We hand-annotated this corpus and experimented with it in several ways: we used it to generate a corpus of discharge summaries in which the [REMOVED] tokens were replaced with appropriate, fake PHI obtained from dictionaries 2 (Douglass,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpora", |
| "sec_num": "3" |
| }, |
| { |
| "text": "1 Authentic clinical data is very difficult to obtain for privacy reasons; therefore, the initial implementation of our system was tested on previously deidentified data that we reidentified.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpora", |
| "sec_num": "3" |
| }, |
| { |
| "text": "2 e.g., John Smith initiated radiation therapy ...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpora", |
| "sec_num": "3" |
| }, |
| { |
| "text": "; we used it to generate a second corpus in which most of the [REMOVED] tokens and some of the remaining text were appropriately replaced with lexical items that were ambiguous between PHI and non-PHI 3 ; we used it to generate another corpus in which all of the [REMOVED] tokens corresponding to names were replaced with appropriately formatted entries that could not be found in dictionaries 4 . For all of these corpora, we generated realistic substitutes for the [REMOVED] tokens using dictionaries (e.g., a dictionary of names from US Census Bureau) and patterns (e.g., names of people could be of the formats, \"Mr. F. Lastname\", \"Firstname Lastname\", \"Lastname\", \"F. M. Lastname\", etc.; dates could appear as \"dd/mm/yy\", \"dd Mon-thName, yyyy\", \"ddth of MonthName, yyyy\", etc.). In addition to these reidentified corpora (i.e., corpora generated from previously deidentified data), we also experimented with authentic discharge summaries 5 . The approximate distributions of PHI in the reidentified corpora and in the authentic corpus are shown in Table 1 ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1053, |
| "end": 1060, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "2005)", |
| "sec_num": null |
| }, |
| { |
| "text": "Traditional deidentification approaches rely heavily on dictionaries and hand-tailored heuristics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Baseline: Heuristic+Dictionary", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We obtained one such system (Douglass, 2005 ) that used three kinds of dictionaries:", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 43, |
| "text": "(Douglass, 2005", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Baseline: Heuristic+Dictionary", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 PHI lookup tables for female and male first names, last names, last name prefixes, hospital names, locations, and states.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Baseline: Heuristic+Dictionary", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 A dictionary of \"common words\" that should never be classified as PHI.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Baseline: Heuristic+Dictionary", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Lookup tables for context clues such as titles, e.g., Mr.; name indicators, e.g., proxy, daughter; location indicators, e.g., lives in.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Baseline: Heuristic+Dictionary", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Given these dictionaries, this system identifies keywords that appear in the PHI lookup tables but do not occur in the common words list, finds approximate matches for possibly misspelled words, and uses patterns and indicators to find PHI.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Baseline: Heuristic+Dictionary", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "SNoW is a statistical classifier that includes a NER component for recognizing entities and their relations. To create a hypothesis about the entity type of a word, SNoW first takes advantage of \"words, tags, conjunctions of words and tags, bigram and trigram of words and tags\", number of words in the entity, bigrams of words in the entity, and some attributes such as the prefix and suffix, as well as information about the presence of the word in a dictionary of people, organization, and location names (Roth and Yih, 2002) . After this initial step, it uses the possible relations of the entity with other entities in the sentence to strengthen or weaken its hypothesis about the entity's type. The constraints imposed on the entities and their relationships constitute the global context of inference. Intuitively, information about global context and constraints imposed on the relationships of entities should improve recognition of both entities and relations. Roth and Yih (2002) present results that support this hypothesis. SNoW can recognize entities that correspond to people, locations, and organizations. For deidentification purposes, all of these entities correspond to PHI; however, they do not constitute a comprehensive set. We evaluated SNoW only on the PHI it is built to recognize. We trained and tested its NER component using ten-fold cross-validation on each of our corpora.", |
| "cite_spans": [ |
| { |
| "start": 508, |
| "end": 528, |
| "text": "(Roth and Yih, 2002)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 971, |
| "end": 990, |
| "text": "Roth and Yih (2002)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SNoW", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "IdentiFinder uses Hidden Markov Models to learn the characteristics of names of entities, including people, locations, geographic jurisdictions, organizations, dates, and contact information (Bikel et al., 1999) . For each named entity class, this system learns a bigram language model which indicates the likelihood that a sequence of words belongs to that class. This model takes into consideration features of words, such as whether the word is capitalized, all upper case, or all lower case, whether it is the first word of the sentence, or whether it contains digits and punctuation. Thus, it captures the local context of the target word (i.e., the word to be classified; also referred to as TW). To find the names of all entities, the system finds the most likely sequence of entity types in a sentence given a sequence of words; thus, it captures the global context of the entities in a sentence.", |
| "cite_spans": [ |
| { |
| "start": 191, |
| "end": 211, |
| "text": "(Bikel et al., 1999)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "IdentiFinder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We obtained this system pre-trained on a news corpus and applied it to our corpora. We mapped its entity tags to our PHI and non-PHI labels. Admittedly, testing IdentiFinder on the discharge summaries puts this system at a disadvantage compared to the other statistical approaches. However, despite this shortcoming, IdentiFinder helps us evaluate the contribution of global context to deidentification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "IdentiFinder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We hypothesize that systems that rely on dictionaries and hand-tailored heuristics face a major challenge when particular PHI can be used in many different contexts, when PHI are ambiguous, or when the PHI cannot be found in dictionaries. We further hypothesize that given the ungrammatical and ad hoc nature of our data, despite being very powerful systems, IdentiFinder and SNoW may not provide perfect deidentification. In addition to being very fragmented, discharge summaries do not present information in the form of relations between entities, and many sentences contain only one entity. Therefore, the global context utilized by IdentiFinder and SNoW cannot contribute reliably to deidentification. When run on discharge summaries, the strength of these systems comes from their ability to recognize the structure of the names of different entity types and the local contexts of these entities.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SVMs with Local Context", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Discharge summaries contain patterns that can serve as local context. Therefore, we built an SVMbased system that, given a target word (TW), would accurately predict whether the TW was part of PHI. We used a development corpus to find features that captured as much of the immediate context of the TW as possible, paying particular attention to cues human annotators found useful for deidentification. We added to this some surface characteristics for the TW itself and obtained the following features: the TW itself, the word before, and the word after (all lemmatized); the bigram before and the bigram after TW (lemmatized); the part of speech of TW, of the word before, and of the word after; capitalization of TW; length of TW; MeSH ID of the noun phrase containing TW (MeSH is a dictionary of Medical Subject Headings and is a subset of the Unified Medical Language System (UMLS) of the National Library of Medicine); presence of TW, of the word before, and of the word after TW in the name, location, hospital, and month dictionaries; the heading of the section in which TW appears, e.g., \"History of Present Illness\"; and, whether TW contains \"-\" or \"/\" characters. Note that some of these features, e.g., capitalization and punctuation within TW, were also used in IdentiFinder.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SVMs with Local Context", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We used the SVM implementation provided by LIBSVM (Chang and Lin, 2001 ) with a linear kernel to classify each word in the summaries as either PHI or non-PHI based on the above-listed features. We evaluated this system using ten-fold crossvalidation.", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 70, |
| "text": "(Chang and Lin, 2001", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SVMs with Local Context", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Local context contributes differently to each of the four deidentification systems. Our SVM-based approach uses only local context. The heuristic, rulebased system relies heavily on dictionaries. Identi-Finder uses a simplified representation of local context and adds to this information about the global context as represented by transition probabilities between entities in the sentence. SNoW uses local context as well, but it also makes an effort to benefit from relations between entities. Given the difference in the strengths of these systems, we compared their performance on both the reidentified and authentic corpora (see Section 3). We hypothesized that given the nature of medical discharge summaries, Iden-tiFinder would not be able to find enough global context and SNoW would not be able to make use of relations (because many sentences in this corpus contain only one entity). We further hypothesized that when the data contain words ambiguous between PHI and non-PHI, or when the PHI cannot be found in dictionaries, the heuristic, rule-based approach would perform poorly. In all of these cases, SVMs trained with local context information would be sufficient for proper deidentification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "6" |
| }, |
| { |
| "text": "To compare the SVM approach with Identi-Finder, we evaluated both on PHI consisting of names of people (i.e., patient and doctor names), locations (i.e., geographic locations), and organizations (i.e., hospitals), as well as PHI consisting of dates, and contact information (i.e., phone numbers, pagers). We omitted PHI representing ID numbers from this experiment in order to be fair to Identi-Finder which was not trained on this category. To compare the SVM approach with SNoW, we trained both systems with only PHI consisting of names of people, locations, and organizations, i.e., the entities that SNoW was designed to recognize.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We first deidentified:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deidentifying Reidentified and Authentic Discharge Summaries", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "\u2022 Previously deidentified discharge summaries into which we inserted invented but realistic surrogates for PHI without deliberately introducing ambiguous words or words not found in dictionaries, and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deidentifying Reidentified and Authentic Discharge Summaries", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "\u2022 Authentic discharge summaries with real PHI.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deidentifying Reidentified and Authentic Discharge Summaries", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Our experiments showed that SVMs with local context outperformed all other approaches. On the reidentified corpus, SVMs gave an F-measure of 97.2% for PHI. In comparison, IdentiFinder, having been trained on the news corpus, gave an Fmeasure of 67.4% and was outperformed by the heuristic+dictionary approach (see Table 2 ). 6", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 314, |
| "end": 321, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Deidentifying Reidentified and Authentic Discharge Summaries", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "We evaluated SNoW only on the three kinds of entities it is designed to recognize. We crossvalidated it on our corpora and found that its performance in recognizing people, locations, and organizations was 96.2% in terms of F-measure (see Table 3 7 ). In comparison, our SVM-based system, when retrained to only consider people, locations, and organizations so as to be directly comparable to SNoW, had an F-measure of 98%. 8 Similarly, on the authentic discharge summaries, the SVM approach outperformed all other approaches in recognizing PHI (see Tables 4 and 5) .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 239, |
| "end": 246, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 550, |
| "end": 565, |
| "text": "Tables 4 and 5)", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Deidentifying Reidentified and Authentic Discharge Summaries", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "In discharge summaries, the same words can appear both as PHI and as non-PHI. For example, in the same corpus, the word \"Swan\" can appear both as the name of a medical device (i.e., \"Swan Catheter\") and as the name of a person, etc. Ideally, we would like to deidentify data even when many words in the corpus are ambiguous between PHI and non-PHI. We hypothesize that given ambiguities in the data, context will play an important role in determining whether the particular instance of the word is PHI and that given the many fragmented sentences in our corpus, local context will be particularly useful. To test these hypotheses, we generated a corpus by reidentifying the previously deidentified corpus with words that were ambiguous between PHI and non-PHI, making sure to use each ambiguous word both as PHI and non-PHI, and also making sure to cover all acceptable formats of all PHI (see Section 3). The resulting distribution of PHI is shown in Our results showed that, on this corpus, the SVMbased system accurately recognized 91.9% of all PHI; its performance, measured in terms of Fmeasure was also significantly better than all other approaches both on the complete corpus containing ambiguous entries (see Table 7 and Table 8 ) and only on the ambiguous words in this corpus (see Table 9 ). ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1218, |
| "end": 1237, |
| "text": "Table 7 and Table 8", |
| "ref_id": "TABREF11" |
| }, |
| { |
| "start": 1292, |
| "end": 1299, |
| "text": "Table 9", |
| "ref_id": "TABREF13" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Deidentifying Data with Ambiguous PHI", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "Some medical documents contain foreign or misspelled names that need to be effectively removed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deidentifying PHI Not Found in Dictionaries", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "To evaluate the different deidentification approaches under such circumstances, we generated a corpus in which the names of people, locations, and hospitals were all random permutations of letters. The resulting words were not found in any dictionaries but followed the general format of the entity name category to which they belonged. The distribution of PHI in this third corpus is in On this data set, dictionaries cannot contribute to deidentification because none of the PHI appear in dictionaries. Under these conditions, proper deidentification relies completely on context. Our results showed that SVM approach outperformed all other approaches on this corpus also (Tables 11 and 12) Of only the PHI not found in dictionaries, 95.5% was accurately identified by the SVM approach. In comparison, the heuristic+dictionary approach accurately identified those PHI that could not be found in dictionaries 11.1% of the time, IdentiFinder recognized these entities 76.7% of the time and SNoW gave an accuracy of 79% (see Table 13 ). ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 674, |
| "end": 692, |
| "text": "(Tables 11 and 12)", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 1024, |
| "end": 1032, |
| "text": "Table 13", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Deidentifying PHI Not Found in Dictionaries", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "As hypothesized, in all experiments, the SVMbased approach outperformed all other approaches. SVM's feature set included a total of 26 features, 12 of which were dictionary-related features (excluding MeSH). Information gain showed that the most informative features for deidentification were the TW, the bigram before TW, the bigram after TW, the word before TW, and the word after TW.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Importance", |
| "sec_num": "6.4" |
| }, |
| { |
| "text": "Note that the TW itself is important for classification; many of the non-PHI correspond to common words that appear in the corpus frequently and the SVM learns the fact that some words, e.g., the, admit, etc., are never PHI. In addition, the context of TW (captured in the form of unigrams and bigrams of words and part-of-speech tags surrounding TW) contributes significantly to deidentification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Importance", |
| "sec_num": "6.4" |
| }, |
| { |
| "text": "There are many ways of automatically capturing context. In our data, unigrams and bigrams of words and their part-of-speech tags seem to be sufficient for a statistical representation of local context. The global context, as represented within IdentiFinder and SNoW, could not contribute much to deidentification on this corpus because of the fragmented nature of the language of these documents, because most sentences in this corpus contain only one entity, and because many sentences do not include explicit relations between entities. However, there is enough structure in this data that can be captured by local context; lack of relations between entities and the inability to capture global context do not hold us back from almost perfect deidentification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Importance", |
| "sec_num": "6.4" |
| }, |
| { |
| "text": "We presented a set of experimental results that show that local context contributes more to deidentification than dictionaries and global context when working with medical discharge summaries. These documents are characterized by incomplete, fragmented sentences, and ad hoc language. They use a lot of jargon, many times omit subjects of sentences, use entity names that can be misspelled or foreign words, can include entity names that are ambiguous between PHI and non-PHI, etc. Similar documents in many domains exist; our experiments here show that even on such challenging corpora, local context can be exploited to identify entities. Even a rudimentary statistical representation of local context, as captured by unigrams and bigrams of lemmatized keywords and part-of-speech tags, gives good results and outperforms more sophisticated approaches that rely on global context. The simplicity of the representation of local context and the results obtained using this simple representation are particularly promising for many tasks that require processing ungrammatical and fragmented text where global context cannot be counted on.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "e.g., D. Sessions initiated radiation therapy... 4 e.g., O. Ymfgkstjj initiated radiation therapy ...5 We obtained authentic discharge summaries with real PHI in the final stages of this project.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Note that in deidentification, recall is much more important than precision. Low recall indicates that many PHI remain in the documents and that there is high risk to patient privacy. Low precision means that words that do not correspond to PHI have also been removed. This hurts the integrity of the data but does not present a risk to privacy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The best performances are marked in bold in all of the tables in this paper.8 For all of the corpora presented in this paper, a performance difference of 1% or more is statistically significant at \u03b1 = 0.05.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This publication was made possible by grant number R01-EB001659 from the National Institute of Biomedical Imaging and Bioengineering; by grant number N01-LM-3-3513 on National Multi-Protocol Ensemble for Self-Scaling Systems for Health from National Library of Medicine; and, by grant number U54-LM008748 on Informatics for Integrating Biology to the Bedside from National Library of Medicine.We are grateful to Professor Peter Szolovits and Dr. Boris Katz for their insights, and to Professor Carol Doll, Sue Felshin, Gregory Marton, and Tian He for their feedback on this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Concept-Match Medical Data Scrubbing: How Pathology Text Can Be Used in Research", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "J" |
| ], |
| "last": "Berman", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Archives of Pathology and Laboratory Medicine", |
| "volume": "", |
| "issue": "6", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. J. Berman. 2002. Concept-Match Medical Data Scrubbing: How Pathology Text Can Be Used in Research. Archives of Pathology and Laboratory Medicine, 127(6).", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "An Algorithm That Learns What's in a Name", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "M" |
| ], |
| "last": "Bikel", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "M" |
| ], |
| "last": "Weischedel", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Machine Learning Journal Special Issue on Natural Language Learning", |
| "volume": "34", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. M. Bikel, R. Schwartz, and R. M. Weischedel. 1999. An Algorithm That Learns What's in a Name. Ma- chine Learning Journal Special Issue on Natural Lan- guage Learning, 34(1/3).", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "LIBSVM: a Library for Support Vector Machines", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "C. Chang and C. Lin. 2001. LIBSVM: a Library for Sup- port Vector Machines.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Extracting the Names of Genes and Gene Products with a Hidden Markov Model", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Collier", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Nobata", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "N. Collier, C. Nobata, and J. Tsujii. 2000. Extracting the Names of Genes and Gene Products with a Hidden Markov Model. Proceedings of COLING.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Unsupervised Models for Named Entity Classification", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Singer", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Collins and Y. Singer. 1999. Unsupervised Mod- els for Named Entity Classification. Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Exploiting Context for Biomedical Entity Recognition: From Syntax to the Web", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Finkel", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Dingare", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Nissim", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Sinclair", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of Joint Workshop on Natural Language Processing in Biomedicine and its Applications at COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Finkel, S. Dingare, H. Nguyen, M. Nissim, C. Man- ning, and G. Sinclair. 2004. Exploiting Context for Biomedical Entity Recognition: From Syntax to the Web. Proceedings of Joint Workshop on Natural Lan- guage Processing in Biomedicine and its Applications at COLING.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Protein Structures and Information Extraction from Biological Texts: The PASTA System", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Gaizauskas", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Demetriou", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Artymiuk", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Willett", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Bioinformatics", |
| "volume": "19", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Gaizauskas, G. Demetriou, P. Artymiuk, and P. Willett. 2003. Protein Structures and Information Extraction from Biological Texts: The PASTA System. Bioinfor- matics, 19(1).", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Recognizing Names in Biomedical Texts: a", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Guodong", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Jie", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Jian", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Dan", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Chewlim", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Machine Learning Approach. Bioinformatics", |
| "volume": "", |
| "issue": "7", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Z. GuoDong, Z. Jie, S. Jian, S. Dan, T. ChewLim. 2005. Recognizing Names in Biomedical Texts: a Machine Learning Approach. Bioinformatics, 20(7).", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Evaluation of a Deidentification (De-Id) Software Engine to Share Pathology Reports and Clinical Documents for Research", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saul", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Gilbertson", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "American Journal of Clinical Pathology", |
| "volume": "", |
| "issue": "6", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Gupta, M. Saul, J. Gilbertson. 2004. Evalua- tion of a Deidentification (De-Id) Software Engine to Share Pathology Reports and Clinical Documents for Research. American Journal of Clinical Pathology, 121(6).", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "FAS-TUS: A Cascaded Finite-State Transducer for Extracting Information from Natural-Language Text", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "R" |
| ], |
| "last": "Hobbs", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "E" |
| ], |
| "last": "Appelt", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Bear", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Israel", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Kameyama", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Stickel", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Tyson", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Finite State Devices for Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. R. Hobbs, D. E. Appelt, J. Bear, D. Israel, M. Kameyama, M. Stickel, and M. Tyson. 1996. FAS- TUS: A Cascaded Finite-State Transducer for Extract- ing Information from Natural-Language Text. In Fi- nite State Devices for Natural Language Processing. MIT Press, Cambridge, MA.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Computer-Assisted De-Identification of Free Text in the MIMIC II Database", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Douglass", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [ |
| "D" |
| ], |
| "last": "Clifford", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Reisner", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [ |
| "B" |
| ], |
| "last": "Moody", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "G" |
| ], |
| "last": "Mark", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Computers in Cardiology", |
| "volume": "32", |
| "issue": "", |
| "pages": "331--334", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Douglass, G. D. Clifford, A. Reisner, G. B. Moody, R. G. Mark. 2005. Computer-Assisted De- Identification of Free Text in the MIMIC II Database. Computers in Cardiology. 32:331-334.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Maximum Entropy Markov Models for Information Extraction and Segmentation", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Freitag", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. McCallum, D. Freitag, and F. Pereira. 2000. Maxi- mum Entropy Markov Models for Information Extrac- tion and Segmentation. Proceedings of ICML.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Automatically Generating Extraction Patterns from Untagged Text", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Riloff", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of AAAI-96", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. Riloff and R. Jones. 1996. Automatically Generating Extraction Patterns from Untagged Text. Proceedings of AAAI-96.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Probabilistic Reasoning for Entity and Relation Recognition", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Yih", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Roth and W. Yih. 2002. Probabilistic Reasoning for Entity and Relation Recognition. Proceedings of COLING.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Medical Document Anonymization with a Semantic Lexicon", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Ruch", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "H" |
| ], |
| "last": "Baud", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Rassinoux", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Bouillon", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Robert", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of AMIA", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. Ruch, R. H. Baud, A. Rassinoux, P. Bouillon, G. Robert. 2000. Medical Document Anonymization with a Semantic Lexicon. Proceedings of AMIA.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Using Predicate-Argument Structures for Information Extraction", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "M" |
| ], |
| "last": "Harabagiu", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Aarseth", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Surdeanu, S. M. Harabagiu, J. Williams, and P. Aarseth. 2003. Using Predicate-Argument Structures for Information Extraction. Proceedings of ACL 2003.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Replacing personally-identifying information in medical records, the scrub system", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Sweeney", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Journal of the American Medical Informatics Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "L. Sweeney. 1996. Replacing personally-identifying in- formation in medical records, the scrub system. Jour- nal of the American Medical Informatics Association.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Identification of patient name references within medical documents using semantic selectional restrictions. Proceedings of AMIA", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [ |
| "K" |
| ], |
| "last": "Taira", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "A T" |
| ], |
| "last": "Bui", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Kangarloo", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. K. Taira, A. A. T. Bui, H. Kangarloo. 2002. Identifi- cation of patient name references within medical doc- uments using semantic selectional restrictions. Pro- ceedings of AMIA.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A Successful Technique for Removing Names in Pathology Reports Using an Augmented Search and Replace Method", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "M" |
| ], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Mamlin", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Schadow", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of AMIA", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. M. Thomas, B. Mamlin, G. Schadow, C. McDonald. 2002. A Successful Technique for Removing Names in Pathology Reports Using an Augmented Search and Replace Method. Proceedings of AMIA.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Automatic Extraction of Gene and Protein Synonyms from", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Hatzivassiloglou", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Friedman", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "J" |
| ], |
| "last": "Wilbur", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "MEDLINE and Journal Articles. Proceedings of AMIA", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. Yu, V. Hatzivassiloglou, C. Friedman, W. J. Wilbur. 2002. Automatic Extraction of Gene and Protein Syn- onyms from MEDLINE and Journal Articles. Pro- ceedings of AMIA.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "[REMOVED] Health Center on [REMOVED]. Recently had been started q.o.d. on Clonidine since [REMOVED] to taper off of the drug. Was told to start Zestril 20 mg. q.d. again. The patient was sent to the [REMOVED] Unit for direct admission for cardioversion and anticoagulation, with the Cardiologist, Dr. [REMOVED] to follow. SOCIAL HISTORY: Lives alone, has one daughter living in [REMOVED]. Is a non-smoker, and does not drink alcohol. HOSPITAL COURSE AND TREATMENT: During admission, the patient was seen by Cardiology, Dr. [REMOVED], was started on IV Heparin, Sotalol 40 mg PO b.i.d. increased to 80 mg b.i.d., and had an echocardiogram. By [REMOVED] the patient had better rate control and blood pressure control but remained in atrial fibrillation. On [RE-MOVED], the patient was felt to be medically stable.", |
| "num": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "text": "Distribution of different PHI (in terms of number of words) in the corpora." |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Method</td><td>Class</td><td>P</td><td>R</td><td>F</td></tr><tr><td>SVM</td><td>PHI</td><td colspan=\"3\">97.7% 98.2% 98.0%</td></tr><tr><td>SNoW</td><td>PHI</td><td colspan=\"3\">96.1% 96.2% 96.2%</td></tr><tr><td>SVM</td><td colspan=\"4\">Non-PHI 99.8% 99.8% 99.8%</td></tr><tr><td colspan=\"5\">SNoW Non-PHI 99.6% 99.6% 99.6%</td></tr></table>", |
| "num": null, |
| "text": "Precision, Recall, and F-measure on reidentified discharge summaries. IFinder refers to IdentiFinder, H+D refers to heuristic+dictionary approach." |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "text": "Evaluation of SNoW and SVM on recognizing people, locations, and organizations found in reidentified discharge summaries." |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Method</td><td>Class</td><td>P</td><td>R</td><td>F</td></tr><tr><td>SVM</td><td>PHI</td><td colspan=\"3\">97.4% 93.8% 95.6%</td></tr><tr><td>SNoW</td><td>PHI</td><td colspan=\"3\">93.7% 93.4% 93.6%</td></tr><tr><td>SVM</td><td colspan=\"4\">Non-PHI 99.9% 100% 100%</td></tr><tr><td colspan=\"5\">SNoW Non-PHI 99.9% 99.9% 99.9%</td></tr></table>", |
| "num": null, |
| "text": "Evaluation on authentic discharge summaries." |
| }, |
| "TABREF7": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "text": "Evaluation of SNoW and SVM on authentic discharge summaries." |
| }, |
| "TABREF8": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Class</td><td colspan=\"2\">Total # Words # Ambiguous Words</td></tr><tr><td>Non-PHI</td><td>19296</td><td>3781</td></tr><tr><td>Patient</td><td>1047</td><td>514</td></tr><tr><td>Doctor</td><td>311</td><td>247</td></tr><tr><td>Location</td><td>24</td><td>24</td></tr><tr><td>Hospital</td><td>592</td><td>82</td></tr><tr><td>Date</td><td>736</td><td>201</td></tr><tr><td>ID</td><td>36</td><td>0</td></tr><tr><td>Phone</td><td>39</td><td>0</td></tr></table>", |
| "num": null, |
| "text": "" |
| }, |
| "TABREF9": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "text": "Distribution of PHI when some words are ambiguous between PHI and non-PHI." |
| }, |
| "TABREF11": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Method</td><td>Class</td><td>P</td><td>R</td><td>F</td></tr><tr><td>SVM</td><td>PHI</td><td colspan=\"3\">92.1% 92.8% 92.5%</td></tr><tr><td>SNoW</td><td>PHI</td><td colspan=\"2\">91.6% 77%</td><td>83.7%</td></tr><tr><td>SVM</td><td colspan=\"4\">Non-PHI 99.3% 99.2% 99.3%</td></tr><tr><td colspan=\"5\">SNoW Non-PHI 97.6% 99.3% 98.4%</td></tr></table>", |
| "num": null, |
| "text": "Evaluation on" |
| }, |
| "TABREF12": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Method</td><td>Class</td><td>P</td><td>R</td><td>F</td></tr><tr><td>SVM</td><td>PHI</td><td colspan=\"3\">90.2% 87.5% 88.8%</td></tr><tr><td>IFinder</td><td>PHI</td><td colspan=\"3\">55.8% 64.0% 59.6%</td></tr><tr><td>H+D</td><td>PHI</td><td colspan=\"3\">59.8% 24.3% 34.6%</td></tr><tr><td>SNoW</td><td>PHI</td><td colspan=\"3\">91.6% 82.9% 87.1%</td></tr><tr><td>SVM</td><td colspan=\"4\">Non-PHI 90.5% 92.7% 91.6%</td></tr><tr><td colspan=\"5\">IFinder Non-PHI 69.0% 61.3% 64.9%</td></tr><tr><td>H+D</td><td colspan=\"4\">Non-PHI 59.9% 87.4% 71.1%</td></tr><tr><td colspan=\"5\">SNoW Non-PHI 90.4% 95.5% 92.9%</td></tr></table>", |
| "num": null, |
| "text": "Evaluation of SNoW and SVM on ambiguous data." |
| }, |
| "TABREF13": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "text": "Evaluation only on ambiguous people, locations, and organizations found in ambiguous data." |
| }, |
| "TABREF14": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td/><td>.</td><td/></tr><tr><td>Class</td><td colspan=\"2\">Total PHI PHI Not in Dict.</td></tr><tr><td>Non-PHI</td><td>17872</td><td>0</td></tr><tr><td>Patient</td><td>1045</td><td>1045</td></tr><tr><td>Doctor</td><td>302</td><td>302</td></tr><tr><td>Location</td><td>24</td><td>24</td></tr><tr><td>Hospital</td><td>376</td><td>376</td></tr><tr><td>Date</td><td>735</td><td>0</td></tr><tr><td>ID</td><td>36</td><td>0</td></tr><tr><td>Phone</td><td>39</td><td>0</td></tr></table>", |
| "num": null, |
| "text": "" |
| }, |
| "TABREF15": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "text": "Distribution of PHI in the corpus where all PHI associated with names are randomly generated so as not to be found in dictionaries." |
| }, |
| "TABREF17": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "text": "Evaluation on" |
| }, |
| "TABREF18": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Method</td><td>Class</td><td>P</td><td>R</td><td>F</td></tr><tr><td>SVM</td><td>PHI</td><td colspan=\"3\">93.9% 96.0% 95.0%</td></tr><tr><td>SNoW</td><td>PHI</td><td colspan=\"3\">93.7% 79.0% 85.7%</td></tr><tr><td colspan=\"5\">SVM Non-Method SVM IFinder SNoW H+D</td></tr><tr><td colspan=\"5\">Precision 95.5% 76.7% 79.0% 11.1%</td></tr></table>", |
| "num": null, |
| "text": "PHI 99.6% 99.4% 99.5% SNoW Non-PHI 98.0% 99.5% 98.7% Evaluation of SNoW and SVM on the people, locations, and organizations found in the corpus containing PHI not found in dictionaries." |
| }, |
| "TABREF19": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "text": "Precision on only the PHI not found in dictionaries." |
| } |
| } |
| } |
| } |