MANTRAGSC / MANTRAGSC.py
qanastek's picture
Update MANTRAGSC.py
e0ffc32
raw
history blame
15.9 kB
# coding=utf-8
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import ast
import json
import random
from pathlib import Path
from itertools import product
from dataclasses import dataclass
from typing import Dict, List, Tuple
import datasets
import numpy as np
_CITATION = """\
@article{10.1093/jamia/ocv037,
author = {Kors, Jan A and Clematide, Simon and Akhondi,
Saber A and van Mulligen, Erik M and Rebholz-Schuhmann, Dietrich},
title = "{A multilingual gold-standard corpus for biomedical concept recognition: the Mantra GSC}",
journal = {Journal of the American Medical Informatics Association},
volume = {22},
number = {5},
pages = {948-956},
year = {2015},
month = {05},
abstract = "{Objective To create a multilingual gold-standard corpus for biomedical concept recognition.Materials
and methods We selected text units from different parallel corpora (Medline abstract titles, drug labels,
biomedical patent claims) in English, French, German, Spanish, and Dutch. Three annotators per language
independently annotated the biomedical concepts, based on a subset of the Unified Medical Language System and
covering a wide range of semantic groups. To reduce the annotation workload, automatically generated
preannotations were provided. Individual annotations were automatically harmonized and then adjudicated, and
cross-language consistency checks were carried out to arrive at the final annotations.Results The number of final
annotations was 5530. Inter-annotator agreement scores indicate good agreement (median F-score 0.79), and are
similar to those between individual annotators and the gold standard. The automatically generated harmonized
annotation set for each language performed equally well as the best annotator for that language.Discussion The use
of automatic preannotations, harmonized annotations, and parallel corpora helped to keep the manual annotation
efforts manageable. The inter-annotator agreement scores provide a reference standard for gauging the performance
of automatic annotation techniques.Conclusion To our knowledge, this is the first gold-standard corpus for
biomedical concept recognition in languages other than English. Other distinguishing features are the wide variety
of semantic groups that are being covered, and the diversity of text genres that were annotated.}",
issn = {1067-5027},
doi = {10.1093/jamia/ocv037},
url = {https://doi.org/10.1093/jamia/ocv037},
eprint = {https://academic.oup.com/jamia/article-pdf/22/5/948/34146393/ocv037.pdf},
}
"""
_DESCRIPTION = """\
We selected text units from different parallel corpora (Medline abstract titles, drug labels, biomedical patent claims)
in English, French, German, Spanish, and Dutch. Three annotators per language independently annotated the biomedical
concepts, based on a subset of the Unified Medical Language System and covering a wide range of semantic groups.
"""
_HOMEPAGE = "https://biosemantics.erasmusmc.nl/index.php/resources/mantra-gsc"
_LICENSE = "CC_BY_4p0"
_URL = "http://biosemantics.org/MantraGSC/Mantra-GSC.zip"
_LANGUAGES_2 = {
"es": "Spanish",
"fr": "French",
"de": "German",
"nl": "Dutch",
"en": "English",
}
_DATASET_TYPES = {
"emea": "EMEA",
"medline": "Medline",
"patents": "Patents",
}
@dataclass
class DrBenchmarkConfig(datasets.BuilderConfig):
name: str = None
version: datasets.Version = None
description: str = None
schema: str = None
subset_id: str = None
class MANTRAGSC(datasets.GeneratorBasedBuilder):
SOURCE_VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = []
for language, dataset_type in product(_LANGUAGES_2, _DATASET_TYPES):
if dataset_type == "patents" and language in ["nl", "es"]:
continue
BUILDER_CONFIGS.append(
DrBenchmarkConfig(
name=f"{language}_{dataset_type}",
version=SOURCE_VERSION,
description=f"Mantra GSC {_LANGUAGES_2[language]} {_DATASET_TYPES[dataset_type]} source schema",
schema="source",
subset_id=f"{language}_{_DATASET_TYPES[dataset_type]}",
)
)
DEFAULT_CONFIG_NAME = "fr_medline"
def _info(self):
if self.config.name.find("emea") != -1:
names = ['O', 'DISO', 'CHEM|PHEN', 'DEVI', 'PHEN', 'PROC', 'OBJC', 'ANAT', 'LIVB', 'CHEM', 'PHYS']
elif self.config.name.find("medline") != -1:
names = ['O', 'DISO', 'GEOG', 'DEVI', 'Manufactured Object', 'PHEN', 'PROC', 'Research Device', 'OBJC', 'Mental or Behavioral Dysfunction', 'Research Activity', 'ANAT', 'LIVB', 'CHEM', 'PHYS']
elif self.config.name.find("patents") != -1:
names = ['O', 'PROC', 'DISO', 'LIVB', 'PHYS', 'PHEN', 'ANAT', 'OBJC', 'Amino Acid, Peptide, or Protein|Enzyme|Receptor', 'DEVI', 'CHEM']
features = datasets.Features(
{
"id": datasets.Value("string"),
"document_id": datasets.Value("string"),
"tokens": [datasets.Value("string")],
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names = names,
)
),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=str(_LICENSE),
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URL)
data_dir = Path(data_dir) / "Mantra-GSC"
language, dataset_type = self.config.name.split("_")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_dir": data_dir,
"language": language,
"dataset_type": dataset_type,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_dir": data_dir,
"language": language,
"dataset_type": dataset_type,
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_dir": data_dir,
"language": language,
"dataset_type": dataset_type,
"split": "test",
},
),
]
def convert_to_prodigy(self, json_object):
def prepare_split(text):
rep_before = ['?', '!', ';', '*']
rep_after = ['’', "'"]
rep_both = ['-', '/', '[', ']', ':', ')', '(', ',', '.']
for i in rep_before:
text = text.replace(i, ' '+i)
for i in rep_after:
text = text.replace(i, i+' ')
for i in rep_both:
text = text.replace(i, ' '+i+' ')
text_split = text.split()
punctuations = [',', '.']
for j in range(0, len(text_split)-1):
if j-1 >= 0 and j+1 <= len(text_split)-1 and text_split[j-1][-1].isdigit() and text_split[j+1][0].isdigit():
if text_split[j] in punctuations:
text_split[j-1:j+2] = [''.join(text_split[j-1:j+2])]
text = ' '.join(text_split)
return text
new_json = []
for ex in [json_object]:
text = prepare_split(ex['text'])
tokenized_text = text.split()
list_spans = []
cpt = 0
for a in ex['entities']:
for o in range(len(a['offsets'])):
text_annot = prepare_split(a['text'][o])
offset_start = a['offsets'][o][0]
offset_end = a['offsets'][o][1]
nb_tokens_annot = len(text_annot.split())
txt_offsetstart = prepare_split(ex['text'][:offset_start])
nb_tokens_before_annot = len(txt_offsetstart.split())
token_start = nb_tokens_before_annot
token_end = token_start + nb_tokens_annot - 1
list_spans.append({
'start': offset_start,
'end': offset_end,
'token_start': token_start,
'token_end': token_end,
'label': a['type'],
'id': ex['document_id'] + "_" + str(cpt),
'text': a['text'][o],
})
cpt += 1
res = {
'id': ex['document_id'],
'document_id': ex['document_id'],
'text': ex['text'],
'tokens': tokenized_text,
'spans': list_spans
}
new_json.append(res)
return new_json
def convert_to_hf_format(self, json_object):
"""
Le format prends en compte le multilabel en faisant une concaténation avec "_" entre chaque label
"""
dict_out = []
for i in json_object:
nb_tokens = len(i['tokens'])
ner_tags = ['O']*nb_tokens
if 'spans' in i:
for j in i['spans']:
for x in range(j['token_start'], j['token_end']+1, 1):
if i['tokens'][x] not in j['text'] and i['tokens'][x] != "Matériovigilance":
if ner_tags[x-1] == 'O':
ner_tags[x-1] = j['label']
else:
pass
else:
if ner_tags[x] == 'O':
ner_tags[x] = j['label']
else:
# Commenter la ligne et mettre pass si on veut prendre qu'un label par token
pass
dict_out.append({
'id': i['id'],
'document_id': i['document_id'],
"ner_tags": ner_tags,
"tokens": i['tokens'],
})
return dict_out
def remove_prefix(self, a: str, prefix: str) -> str:
if a.startswith(prefix):
a = a[len(prefix) :]
return a
def parse_brat_file(self, txt_file: Path, annotation_file_suffixes: List[str] = None, parse_notes: bool = False):
example = {}
example["document_id"] = txt_file.with_suffix("").name
with txt_file.open() as f:
example["text"] = f.read()
if annotation_file_suffixes is None:
annotation_file_suffixes = [".a1", ".a2", ".ann"]
if len(annotation_file_suffixes) == 0:
raise AssertionError("At least one suffix for the to-be-read annotation files should be given!")
ann_lines = []
for suffix in annotation_file_suffixes:
annotation_file = txt_file.with_suffix(suffix)
if annotation_file.exists():
with annotation_file.open() as f:
ann_lines.extend(f.readlines())
example["text_bound_annotations"] = []
example["events"] = []
example["relations"] = []
example["equivalences"] = []
example["attributes"] = []
example["normalizations"] = []
if parse_notes:
example["notes"] = []
for line in ann_lines:
line = line.strip()
if not line:
continue
if line.startswith("T"): # Text bound
ann = {}
fields = line.split("\t")
ann["id"] = fields[0]
ann["type"] = fields[1].split()[0]
ann["offsets"] = []
span_str = self.remove_prefix(fields[1], (ann["type"] + " "))
text = fields[2]
for span in span_str.split(";"):
start, end = span.split()
ann["offsets"].append([int(start), int(end)])
# Heuristically split text of discontiguous entities into chunks
ann["text"] = []
if len(ann["offsets"]) > 1:
i = 0
for start, end in ann["offsets"]:
chunk_len = end - start
ann["text"].append(text[i : chunk_len + i])
i += chunk_len
while i < len(text) and text[i] == " ":
i += 1
else:
ann["text"] = [text]
example["text_bound_annotations"].append(ann)
elif line.startswith("E"):
ann = {}
fields = line.split("\t")
ann["id"] = fields[0]
ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
ann["arguments"] = []
for role_ref_id in fields[1].split()[1:]:
argument = {
"role": (role_ref_id.split(":"))[0],
"ref_id": (role_ref_id.split(":"))[1],
}
ann["arguments"].append(argument)
example["events"].append(ann)
elif line.startswith("R"):
ann = {}
fields = line.split("\t")
ann["id"] = fields[0]
ann["type"] = fields[1].split()[0]
ann["head"] = {
"role": fields[1].split()[1].split(":")[0],
"ref_id": fields[1].split()[1].split(":")[1],
}
ann["tail"] = {
"role": fields[1].split()[2].split(":")[0],
"ref_id": fields[1].split()[2].split(":")[1],
}
example["relations"].append(ann)
elif line.startswith("*"):
ann = {}
fields = line.split("\t")
ann["id"] = fields[0]
ann["ref_ids"] = fields[1].split()[1:]
example["equivalences"].append(ann)
elif line.startswith("A") or line.startswith("M"):
ann = {}
fields = line.split("\t")
ann["id"] = fields[0]
info = fields[1].split()
ann["type"] = info[0]
ann["ref_id"] = info[1]
if len(info) > 2:
ann["value"] = info[2]
else:
ann["value"] = ""
example["attributes"].append(ann)
elif line.startswith("N"):
ann = {}
fields = line.split("\t")
ann["id"] = fields[0]
ann["text"] = fields[2]
info = fields[1].split()
ann["type"] = info[0]
ann["ref_id"] = info[1]
ann["resource_name"] = info[2].split(":")[0]
ann["cuid"] = info[2].split(":")[1]
example["normalizations"].append(ann)
elif parse_notes and line.startswith("#"):
ann = {}
fields = line.split("\t")
ann["id"] = fields[0]
ann["text"] = fields[2] if len(fields) == 3 else "<BB_NULL_STR>"
info = fields[1].split()
ann["type"] = info[0]
ann["ref_id"] = info[1]
example["notes"].append(ann)
return example
def _generate_examples(self, data_dir, language, dataset_type, split):
"""Yields examples as (key, example) tuples."""
data_dir = data_dir / f"{_LANGUAGES_2[language]}"
if dataset_type in ["patents", "emea"]:
data_dir = data_dir / f"{_DATASET_TYPES[dataset_type]}_ec22-cui-best_man"
else:
# Medline
if language != "en":
data_dir = (
data_dir
/ f"{_DATASET_TYPES[dataset_type]}_EN_{language.upper()}_ec22-cui-best_man"
)
else:
data_dir = [
data_dir
/ f"{_DATASET_TYPES[dataset_type]}_EN_{_lang.upper()}_ec22-cui-best_man"
for _lang in _LANGUAGES_2
if _lang != "en"
]
if not isinstance(data_dir, list):
data_dir: List[Path] = [data_dir]
raw_files = [raw_file for _dir in data_dir for raw_file in _dir.glob("*.txt")]
all_res = []
for i, raw_file in enumerate(raw_files):
brat_example = self.parse_brat_file(raw_file, parse_notes=True)
source_example = self._to_source_example(brat_example)
prod_format = self.convert_to_prodigy(source_example)
hf_format = self.convert_to_hf_format(prod_format)[0]
all_res.append(hf_format)
ids = [r["id"] for r in all_res]
random.seed(4)
random.shuffle(ids)
random.shuffle(ids)
random.shuffle(ids)
train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
if split == "train":
allowed_ids = list(train)
elif split == "validation":
allowed_ids = list(validation)
elif split == "test":
allowed_ids = list(test)
for r in all_res:
identifier = r["id"]
if identifier in allowed_ids:
yield identifier, r
def _to_source_example(self, brat_example):
source_example = {
"document_id": brat_example["document_id"],
"text": brat_example["text"],
}
source_example["entities"] = []
for entity_annotation, ann_notes in zip(brat_example["text_bound_annotations"], brat_example["notes"]):
entity_ann = entity_annotation.copy()
entity_ann["entity_id"] = entity_ann["id"]
entity_ann.pop("id")
# Get values from annotator notes
assert entity_ann["entity_id"] == ann_notes["ref_id"]
notes_values = ast.literal_eval(ann_notes["text"])
if len(notes_values) == 4:
cui, preferred_term, semantic_type, semantic_group = notes_values
else:
preferred_term, semantic_type, semantic_group = notes_values
cui = entity_ann["type"]
entity_ann["cui"] = cui
entity_ann["preferred_term"] = preferred_term
entity_ann["semantic_type"] = semantic_type
entity_ann["type"] = semantic_group
entity_ann["normalized"] = [{"db_name": "UMLS", "db_id": cui}]
source_example["entities"].append(entity_ann)
return source_example