Update MANTRAGSC.py
Browse files- MANTRAGSC.py +63 -97
MANTRAGSC.py
CHANGED
|
@@ -16,14 +16,16 @@
|
|
| 16 |
import re
|
| 17 |
import ast
|
| 18 |
import json
|
|
|
|
| 19 |
from pathlib import Path
|
| 20 |
from itertools import product
|
| 21 |
from dataclasses import dataclass
|
| 22 |
from typing import Dict, List, Tuple
|
| 23 |
|
| 24 |
-
|
| 25 |
import datasets
|
| 26 |
|
|
|
|
|
|
|
| 27 |
_CITATION = """\
|
| 28 |
@article{10.1093/jamia/ocv037,
|
| 29 |
author = {Kors, Jan A and Clematide, Simon and Akhondi,
|
|
@@ -116,30 +118,14 @@ class MantraGSC(datasets.GeneratorBasedBuilder):
|
|
| 116 |
|
| 117 |
def _info(self) -> datasets.DatasetInfo:
|
| 118 |
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
"type": datasets.Value("string"),
|
| 128 |
-
"offsets": datasets.Sequence([datasets.Value("int32")]),
|
| 129 |
-
"text": datasets.Sequence(datasets.Value("string")),
|
| 130 |
-
"cui": datasets.Value("string"),
|
| 131 |
-
"preferred_term": datasets.Value("string"),
|
| 132 |
-
"semantic_type": datasets.Value("string"),
|
| 133 |
-
"normalized": [
|
| 134 |
-
{
|
| 135 |
-
"db_name": datasets.Value("string"),
|
| 136 |
-
"db_id": datasets.Value("string"),
|
| 137 |
-
}
|
| 138 |
-
],
|
| 139 |
-
}
|
| 140 |
-
],
|
| 141 |
-
}
|
| 142 |
-
)
|
| 143 |
|
| 144 |
return datasets.DatasetInfo(
|
| 145 |
description=_DESCRIPTION,
|
|
@@ -151,17 +137,12 @@ class MantraGSC(datasets.GeneratorBasedBuilder):
|
|
| 151 |
|
| 152 |
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
|
| 153 |
|
| 154 |
-
print("1 - " + "*"*50)
|
| 155 |
-
print(_URL)
|
| 156 |
data_dir = dl_manager.download_and_extract(_URL)
|
| 157 |
|
| 158 |
-
print("2 - " + "*"*50)
|
| 159 |
data_dir = Path(data_dir) / "Mantra-GSC"
|
| 160 |
|
| 161 |
-
print("3 - " + "*"*50)
|
| 162 |
language, dataset_type = self.config.name.split("_")
|
| 163 |
|
| 164 |
-
print("4 - " + "*"*50)
|
| 165 |
return [
|
| 166 |
datasets.SplitGenerator(
|
| 167 |
name=datasets.Split.TRAIN,
|
|
@@ -169,6 +150,25 @@ class MantraGSC(datasets.GeneratorBasedBuilder):
|
|
| 169 |
"data_dir": data_dir,
|
| 170 |
"language": language,
|
| 171 |
"dataset_type": dataset_type,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
},
|
| 173 |
),
|
| 174 |
]
|
|
@@ -202,24 +202,19 @@ class MantraGSC(datasets.GeneratorBasedBuilder):
|
|
| 202 |
|
| 203 |
return text
|
| 204 |
|
| 205 |
-
# print(json.dumps(json_object, sort_keys=True, indent=4))
|
| 206 |
new_json = []
|
| 207 |
|
| 208 |
for ex in [json_object]:
|
| 209 |
|
| 210 |
-
# print(json.dumps(ex, sort_keys=True, indent=4))
|
| 211 |
-
|
| 212 |
text = prepare_split(ex['text'])
|
| 213 |
|
| 214 |
tokenized_text = text.split()
|
| 215 |
-
print(tokenized_text)
|
| 216 |
|
| 217 |
list_spans = []
|
| 218 |
|
| 219 |
cpt = 0
|
| 220 |
|
| 221 |
for a in ex['entities']:
|
| 222 |
-
# for a in ex['text_bound_annotations']:
|
| 223 |
|
| 224 |
for o in range(len(a['offsets'])):
|
| 225 |
|
|
@@ -262,7 +257,6 @@ class MantraGSC(datasets.GeneratorBasedBuilder):
|
|
| 262 |
return new_json
|
| 263 |
|
| 264 |
def convert_to_hf_format(self, json_object):
|
| 265 |
-
# def convert_to_hf_format(self, json_object, list_label):
|
| 266 |
"""
|
| 267 |
Le format prends en compte le multilabel en faisant une concaténation avec "_" entre chaque label
|
| 268 |
"""
|
|
@@ -271,8 +265,6 @@ class MantraGSC(datasets.GeneratorBasedBuilder):
|
|
| 271 |
|
| 272 |
for i in json_object:
|
| 273 |
|
| 274 |
-
print("#"*50)
|
| 275 |
-
|
| 276 |
nb_tokens = len(i['tokens'])
|
| 277 |
|
| 278 |
ner_tags = ['O']*nb_tokens
|
|
@@ -281,32 +273,10 @@ class MantraGSC(datasets.GeneratorBasedBuilder):
|
|
| 281 |
|
| 282 |
for j in i['spans']:
|
| 283 |
|
| 284 |
-
print(j)
|
| 285 |
-
|
| 286 |
-
# if j['text'] != ' '.join(i['tokens'][j['token_start']:j['token_end']+1]):
|
| 287 |
-
# print(j)
|
| 288 |
-
# print(j['id'])
|
| 289 |
-
# print(j['text'])
|
| 290 |
-
# print(' '.join(i['tokens'][j['token_start']:j['token_end']+1]))
|
| 291 |
-
# print()
|
| 292 |
-
|
| 293 |
-
# for x in range(j['token_start'], j['token_end'], 1):
|
| 294 |
for x in range(j['token_start'], j['token_end']+1, 1):
|
| 295 |
|
| 296 |
-
# if j['label'] in list_label:
|
| 297 |
-
# if j['text'] != ' '.join(i['tokens'][j['token_start']:j['token_end']+1]):
|
| 298 |
-
print("x: ", x)
|
| 299 |
-
print("t: ", i['tokens'][x])
|
| 300 |
-
print("n: ", j['label'])
|
| 301 |
-
print()
|
| 302 |
-
|
| 303 |
-
# x -= 1
|
| 304 |
if i['tokens'][x] not in j['text'] and i['tokens'][x] != "Matériovigilance":
|
| 305 |
-
|
| 306 |
-
print("x: ", x-1)
|
| 307 |
-
print("t: ", i['tokens'][x-1])
|
| 308 |
-
print("n: ", j['label'])
|
| 309 |
-
print()
|
| 310 |
if ner_tags[x-1] == 'O':
|
| 311 |
ner_tags[x-1] = j['label']
|
| 312 |
else:
|
|
@@ -317,8 +287,7 @@ class MantraGSC(datasets.GeneratorBasedBuilder):
|
|
| 317 |
else:
|
| 318 |
# Commenter la ligne et mettre pass si on veut prendre qu'un label par token
|
| 319 |
pass
|
| 320 |
-
|
| 321 |
-
|
| 322 |
dict_out.append({
|
| 323 |
'id': i['id'],
|
| 324 |
'document_id': i['document_id'],
|
|
@@ -340,15 +309,11 @@ class MantraGSC(datasets.GeneratorBasedBuilder):
|
|
| 340 |
with txt_file.open() as f:
|
| 341 |
example["text"] = f.read()
|
| 342 |
|
| 343 |
-
# If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
|
| 344 |
-
# for event extraction
|
| 345 |
if annotation_file_suffixes is None:
|
| 346 |
annotation_file_suffixes = [".a1", ".a2", ".ann"]
|
| 347 |
|
| 348 |
if len(annotation_file_suffixes) == 0:
|
| 349 |
-
raise AssertionError(
|
| 350 |
-
"At least one suffix for the to-be-read annotation files should be given!"
|
| 351 |
-
)
|
| 352 |
|
| 353 |
ann_lines = []
|
| 354 |
for suffix in annotation_file_suffixes:
|
|
@@ -436,10 +401,6 @@ class MantraGSC(datasets.GeneratorBasedBuilder):
|
|
| 436 |
|
| 437 |
example["relations"].append(ann)
|
| 438 |
|
| 439 |
-
# '*' seems to be the legacy way to mark equivalences,
|
| 440 |
-
# but I couldn't find any info on the current way
|
| 441 |
-
# this might have to be adapted dependent on the brat version
|
| 442 |
-
# of the annotation
|
| 443 |
elif line.startswith("*"):
|
| 444 |
ann = {}
|
| 445 |
fields = line.split("\t")
|
|
@@ -493,19 +454,18 @@ class MantraGSC(datasets.GeneratorBasedBuilder):
|
|
| 493 |
ann["type"] = info[0]
|
| 494 |
ann["ref_id"] = info[1]
|
| 495 |
example["notes"].append(ann)
|
| 496 |
-
return example
|
| 497 |
|
|
|
|
| 498 |
|
| 499 |
-
def _generate_examples(
|
| 500 |
-
self, data_dir: Path, language: str, dataset_type: str
|
| 501 |
-
) -> Tuple[int, Dict]:
|
| 502 |
"""Yields examples as (key, example) tuples."""
|
|
|
|
| 503 |
data_dir = data_dir / f"{_LANGUAGES_2[language]}"
|
| 504 |
|
| 505 |
if dataset_type in ["patents", "emea"]:
|
| 506 |
data_dir = data_dir / f"{_DATASET_TYPES[dataset_type]}_ec22-cui-best_man"
|
| 507 |
else:
|
| 508 |
-
#
|
| 509 |
if language != "en":
|
| 510 |
data_dir = (
|
| 511 |
data_dir
|
|
@@ -531,24 +491,31 @@ class MantraGSC(datasets.GeneratorBasedBuilder):
|
|
| 531 |
source_example = self._to_source_example(brat_example)
|
| 532 |
|
| 533 |
prod_format = self.convert_to_prodigy(source_example)
|
| 534 |
-
# print(prod_format)
|
| 535 |
-
# print()
|
| 536 |
|
| 537 |
hf_format = self.convert_to_hf_format(prod_format)[0]
|
| 538 |
-
|
| 539 |
-
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
|
| 543 |
-
|
| 544 |
-
|
| 545 |
-
|
| 546 |
-
|
| 547 |
-
|
| 548 |
-
# _LABELS_BASE,
|
| 549 |
-
# )
|
| 550 |
-
yield i, source_example
|
| 551 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 552 |
def _to_source_example(self, brat_example: Dict) -> Dict:
|
| 553 |
|
| 554 |
source_example = {
|
|
@@ -558,30 +525,29 @@ class MantraGSC(datasets.GeneratorBasedBuilder):
|
|
| 558 |
|
| 559 |
source_example["entities"] = []
|
| 560 |
|
| 561 |
-
for entity_annotation, ann_notes in zip(
|
| 562 |
-
|
| 563 |
-
):
|
| 564 |
entity_ann = entity_annotation.copy()
|
| 565 |
|
| 566 |
-
# Change id property name
|
| 567 |
entity_ann["entity_id"] = entity_ann["id"]
|
| 568 |
entity_ann.pop("id")
|
| 569 |
|
| 570 |
# Get values from annotator notes
|
| 571 |
assert entity_ann["entity_id"] == ann_notes["ref_id"]
|
| 572 |
notes_values = ast.literal_eval(ann_notes["text"])
|
|
|
|
| 573 |
if len(notes_values) == 4:
|
| 574 |
cui, preferred_term, semantic_type, semantic_group = notes_values
|
| 575 |
else:
|
| 576 |
preferred_term, semantic_type, semantic_group = notes_values
|
| 577 |
cui = entity_ann["type"]
|
|
|
|
| 578 |
entity_ann["cui"] = cui
|
| 579 |
entity_ann["preferred_term"] = preferred_term
|
| 580 |
entity_ann["semantic_type"] = semantic_type
|
| 581 |
entity_ann["type"] = semantic_group
|
| 582 |
entity_ann["normalized"] = [{"db_name": "UMLS", "db_id": cui}]
|
| 583 |
|
| 584 |
-
# Add entity annotation to sample
|
| 585 |
source_example["entities"].append(entity_ann)
|
| 586 |
|
| 587 |
return source_example
|
|
|
|
| 16 |
import re
|
| 17 |
import ast
|
| 18 |
import json
|
| 19 |
+
import random
|
| 20 |
from pathlib import Path
|
| 21 |
from itertools import product
|
| 22 |
from dataclasses import dataclass
|
| 23 |
from typing import Dict, List, Tuple
|
| 24 |
|
|
|
|
| 25 |
import datasets
|
| 26 |
|
| 27 |
+
import numpy as np
|
| 28 |
+
|
| 29 |
_CITATION = """\
|
| 30 |
@article{10.1093/jamia/ocv037,
|
| 31 |
author = {Kors, Jan A and Clematide, Simon and Akhondi,
|
|
|
|
| 118 |
|
| 119 |
def _info(self) -> datasets.DatasetInfo:
|
| 120 |
|
| 121 |
+
features = datasets.Features(
|
| 122 |
+
{
|
| 123 |
+
"id": datasets.Value("string"),
|
| 124 |
+
"document_id": datasets.Value("string"),
|
| 125 |
+
"tokens": [datasets.Value("string")],
|
| 126 |
+
"ner_tags": [datasets.Value("string")],
|
| 127 |
+
}
|
| 128 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
return datasets.DatasetInfo(
|
| 131 |
description=_DESCRIPTION,
|
|
|
|
| 137 |
|
| 138 |
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
|
| 139 |
|
|
|
|
|
|
|
| 140 |
data_dir = dl_manager.download_and_extract(_URL)
|
| 141 |
|
|
|
|
| 142 |
data_dir = Path(data_dir) / "Mantra-GSC"
|
| 143 |
|
|
|
|
| 144 |
language, dataset_type = self.config.name.split("_")
|
| 145 |
|
|
|
|
| 146 |
return [
|
| 147 |
datasets.SplitGenerator(
|
| 148 |
name=datasets.Split.TRAIN,
|
|
|
|
| 150 |
"data_dir": data_dir,
|
| 151 |
"language": language,
|
| 152 |
"dataset_type": dataset_type,
|
| 153 |
+
"split": "train",
|
| 154 |
+
},
|
| 155 |
+
),
|
| 156 |
+
datasets.SplitGenerator(
|
| 157 |
+
name=datasets.Split.VALIDATION,
|
| 158 |
+
gen_kwargs={
|
| 159 |
+
"data_dir": data_dir,
|
| 160 |
+
"language": language,
|
| 161 |
+
"dataset_type": dataset_type,
|
| 162 |
+
"split": "validation",
|
| 163 |
+
},
|
| 164 |
+
),
|
| 165 |
+
datasets.SplitGenerator(
|
| 166 |
+
name=datasets.Split.TEST,
|
| 167 |
+
gen_kwargs={
|
| 168 |
+
"data_dir": data_dir,
|
| 169 |
+
"language": language,
|
| 170 |
+
"dataset_type": dataset_type,
|
| 171 |
+
"split": "test",
|
| 172 |
},
|
| 173 |
),
|
| 174 |
]
|
|
|
|
| 202 |
|
| 203 |
return text
|
| 204 |
|
|
|
|
| 205 |
new_json = []
|
| 206 |
|
| 207 |
for ex in [json_object]:
|
| 208 |
|
|
|
|
|
|
|
| 209 |
text = prepare_split(ex['text'])
|
| 210 |
|
| 211 |
tokenized_text = text.split()
|
|
|
|
| 212 |
|
| 213 |
list_spans = []
|
| 214 |
|
| 215 |
cpt = 0
|
| 216 |
|
| 217 |
for a in ex['entities']:
|
|
|
|
| 218 |
|
| 219 |
for o in range(len(a['offsets'])):
|
| 220 |
|
|
|
|
| 257 |
return new_json
|
| 258 |
|
| 259 |
def convert_to_hf_format(self, json_object):
|
|
|
|
| 260 |
"""
|
| 261 |
Le format prends en compte le multilabel en faisant une concaténation avec "_" entre chaque label
|
| 262 |
"""
|
|
|
|
| 265 |
|
| 266 |
for i in json_object:
|
| 267 |
|
|
|
|
|
|
|
| 268 |
nb_tokens = len(i['tokens'])
|
| 269 |
|
| 270 |
ner_tags = ['O']*nb_tokens
|
|
|
|
| 273 |
|
| 274 |
for j in i['spans']:
|
| 275 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 276 |
for x in range(j['token_start'], j['token_end']+1, 1):
|
| 277 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 278 |
if i['tokens'][x] not in j['text'] and i['tokens'][x] != "Matériovigilance":
|
| 279 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 280 |
if ner_tags[x-1] == 'O':
|
| 281 |
ner_tags[x-1] = j['label']
|
| 282 |
else:
|
|
|
|
| 287 |
else:
|
| 288 |
# Commenter la ligne et mettre pass si on veut prendre qu'un label par token
|
| 289 |
pass
|
| 290 |
+
|
|
|
|
| 291 |
dict_out.append({
|
| 292 |
'id': i['id'],
|
| 293 |
'document_id': i['document_id'],
|
|
|
|
| 309 |
with txt_file.open() as f:
|
| 310 |
example["text"] = f.read()
|
| 311 |
|
|
|
|
|
|
|
| 312 |
if annotation_file_suffixes is None:
|
| 313 |
annotation_file_suffixes = [".a1", ".a2", ".ann"]
|
| 314 |
|
| 315 |
if len(annotation_file_suffixes) == 0:
|
| 316 |
+
raise AssertionError("At least one suffix for the to-be-read annotation files should be given!")
|
|
|
|
|
|
|
| 317 |
|
| 318 |
ann_lines = []
|
| 319 |
for suffix in annotation_file_suffixes:
|
|
|
|
| 401 |
|
| 402 |
example["relations"].append(ann)
|
| 403 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 404 |
elif line.startswith("*"):
|
| 405 |
ann = {}
|
| 406 |
fields = line.split("\t")
|
|
|
|
| 454 |
ann["type"] = info[0]
|
| 455 |
ann["ref_id"] = info[1]
|
| 456 |
example["notes"].append(ann)
|
|
|
|
| 457 |
|
| 458 |
+
return example
|
| 459 |
|
| 460 |
+
def _generate_examples(self, data_dir: Path, language: str, dataset_type: str, split: str):
|
|
|
|
|
|
|
| 461 |
"""Yields examples as (key, example) tuples."""
|
| 462 |
+
|
| 463 |
data_dir = data_dir / f"{_LANGUAGES_2[language]}"
|
| 464 |
|
| 465 |
if dataset_type in ["patents", "emea"]:
|
| 466 |
data_dir = data_dir / f"{_DATASET_TYPES[dataset_type]}_ec22-cui-best_man"
|
| 467 |
else:
|
| 468 |
+
# Medline
|
| 469 |
if language != "en":
|
| 470 |
data_dir = (
|
| 471 |
data_dir
|
|
|
|
| 491 |
source_example = self._to_source_example(brat_example)
|
| 492 |
|
| 493 |
prod_format = self.convert_to_prodigy(source_example)
|
|
|
|
|
|
|
| 494 |
|
| 495 |
hf_format = self.convert_to_hf_format(prod_format)[0]
|
| 496 |
+
all_res.append(hf_format)
|
| 497 |
+
|
| 498 |
+
ids = [r["id"] for r in all_res]
|
| 499 |
+
|
| 500 |
+
random.seed(4)
|
| 501 |
+
random.shuffle(ids)
|
| 502 |
+
random.shuffle(ids)
|
| 503 |
+
random.shuffle(ids)
|
| 504 |
+
|
| 505 |
+
train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
|
|
|
|
|
|
|
|
|
|
| 506 |
|
| 507 |
+
if split == "train":
|
| 508 |
+
allowed_ids = list(train)
|
| 509 |
+
elif split == "validation":
|
| 510 |
+
allowed_ids = list(validation)
|
| 511 |
+
elif split == "test":
|
| 512 |
+
allowed_ids = list(test)
|
| 513 |
+
|
| 514 |
+
for r in all_res:
|
| 515 |
+
identifier = r["id"]
|
| 516 |
+
if identifier in allowed_ids:
|
| 517 |
+
yield identifier, r
|
| 518 |
+
|
| 519 |
def _to_source_example(self, brat_example: Dict) -> Dict:
|
| 520 |
|
| 521 |
source_example = {
|
|
|
|
| 525 |
|
| 526 |
source_example["entities"] = []
|
| 527 |
|
| 528 |
+
for entity_annotation, ann_notes in zip(brat_example["text_bound_annotations"], brat_example["notes"]):
|
| 529 |
+
|
|
|
|
| 530 |
entity_ann = entity_annotation.copy()
|
| 531 |
|
|
|
|
| 532 |
entity_ann["entity_id"] = entity_ann["id"]
|
| 533 |
entity_ann.pop("id")
|
| 534 |
|
| 535 |
# Get values from annotator notes
|
| 536 |
assert entity_ann["entity_id"] == ann_notes["ref_id"]
|
| 537 |
notes_values = ast.literal_eval(ann_notes["text"])
|
| 538 |
+
|
| 539 |
if len(notes_values) == 4:
|
| 540 |
cui, preferred_term, semantic_type, semantic_group = notes_values
|
| 541 |
else:
|
| 542 |
preferred_term, semantic_type, semantic_group = notes_values
|
| 543 |
cui = entity_ann["type"]
|
| 544 |
+
|
| 545 |
entity_ann["cui"] = cui
|
| 546 |
entity_ann["preferred_term"] = preferred_term
|
| 547 |
entity_ann["semantic_type"] = semantic_type
|
| 548 |
entity_ann["type"] = semantic_group
|
| 549 |
entity_ann["normalized"] = [{"db_name": "UMLS", "db_id": cui}]
|
| 550 |
|
|
|
|
| 551 |
source_example["entities"].append(entity_ann)
|
| 552 |
|
| 553 |
return source_example
|