qanastek commited on
Commit
5364118
·
1 Parent(s): 335fde2

Update MantraGSC.py

Browse files
Files changed (1) hide show
  1. MantraGSC.py +536 -360
MantraGSC.py CHANGED
@@ -13,44 +13,47 @@
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
 
 
16
  import ast
 
17
  from pathlib import Path
18
  from itertools import product
19
  from dataclasses import dataclass
20
  from typing import Dict, List, Tuple
21
 
 
22
  import datasets
23
 
24
  _CITATION = """\
25
  @article{10.1093/jamia/ocv037,
26
- author = {Kors, Jan A and Clematide, Simon and Akhondi,
27
- Saber A and van Mulligen, Erik M and Rebholz-Schuhmann, Dietrich},
28
- title = "{A multilingual gold-standard corpus for biomedical concept recognition: the Mantra GSC}",
29
- journal = {Journal of the American Medical Informatics Association},
30
- volume = {22},
31
- number = {5},
32
- pages = {948-956},
33
- year = {2015},
34
- month = {05},
35
- abstract = "{Objective To create a multilingual gold-standard corpus for biomedical concept recognition.Materials
36
- and methods We selected text units from different parallel corpora (Medline abstract titles, drug labels,
37
- biomedical patent claims) in English, French, German, Spanish, and Dutch. Three annotators per language
38
- independently annotated the biomedical concepts, based on a subset of the Unified Medical Language System and
39
- covering a wide range of semantic groups. To reduce the annotation workload, automatically generated
40
- preannotations were provided. Individual annotations were automatically harmonized and then adjudicated, and
41
- cross-language consistency checks were carried out to arrive at the final annotations.Results The number of final
42
- annotations was 5530. Inter-annotator agreement scores indicate good agreement (median F-score 0.79), and are
43
- similar to those between individual annotators and the gold standard. The automatically generated harmonized
44
- annotation set for each language performed equally well as the best annotator for that language.Discussion The use
45
- of automatic preannotations, harmonized annotations, and parallel corpora helped to keep the manual annotation
46
- efforts manageable. The inter-annotator agreement scores provide a reference standard for gauging the performance
47
- of automatic annotation techniques.Conclusion To our knowledge, this is the first gold-standard corpus for
48
- biomedical concept recognition in languages other than English. Other distinguishing features are the wide variety
49
- of semantic groups that are being covered, and the diversity of text genres that were annotated.}",
50
- issn = {1067-5027},
51
- doi = {10.1093/jamia/ocv037},
52
- url = {https://doi.org/10.1093/jamia/ocv037},
53
- eprint = {https://academic.oup.com/jamia/article-pdf/22/5/948/34146393/ocv037.pdf},
54
  }
55
  """
56
 
@@ -67,345 +70,518 @@ _LICENSE = "CC_BY_4p0"
67
  _URL = "http://biosemantics.org/MantraGSC/Mantra-GSC.zip"
68
 
69
  _LANGUAGES_2 = {
70
- "es": "Spanish",
71
- "fr": "French",
72
- "de": "German",
73
- "nl": "Dutch",
74
- "en": "English",
75
  }
76
 
77
  _DATASET_TYPES = {
78
- "emea": "EMEA",
79
- "medline": "Medline",
80
- "patents": "Patents",
81
  }
82
 
83
  @dataclass
84
  class DrBenchmarkConfig(datasets.BuilderConfig):
85
- name: str = None
86
- version: datasets.Version = None
87
- description: str = None
88
- schema: str = None
89
- subset_id: str = None
90
 
91
  class MantraGSC(datasets.GeneratorBasedBuilder):
92
 
93
- SOURCE_VERSION = datasets.Version("1.0.0")
94
-
95
- BUILDER_CONFIGS = []
96
-
97
- for language, dataset_type in product(_LANGUAGES_2, _DATASET_TYPES):
98
-
99
- if dataset_type == "patents" and language in ["nl", "es"]:
100
- continue
101
-
102
- BUILDER_CONFIGS.append(
103
- DrBenchmarkConfig(
104
- name=f"{language}_{dataset_type}",
105
- version=SOURCE_VERSION,
106
- description=f"Mantra GSC {_LANGUAGES_2[language]} {_DATASET_TYPES[dataset_type]} source schema",
107
- schema="source",
108
- subset_id=f"{language}_{_DATASET_TYPES[dataset_type]}",
109
- )
110
- )
111
-
112
- DEFAULT_CONFIG_NAME = "fr_medline"
113
-
114
- def _info(self) -> datasets.DatasetInfo:
115
-
116
- if self.config.schema == "source":
117
- features = datasets.Features(
118
- {
119
- "document_id": datasets.Value("string"),
120
- "text": datasets.Value("string"),
121
- "entities": [
122
- {
123
- "entity_id": datasets.Value("string"),
124
- "type": datasets.Value("string"),
125
- "offsets": datasets.Sequence([datasets.Value("int32")]),
126
- "text": datasets.Sequence(datasets.Value("string")),
127
- "cui": datasets.Value("string"),
128
- "preferred_term": datasets.Value("string"),
129
- "semantic_type": datasets.Value("string"),
130
- "normalized": [
131
- {
132
- "db_name": datasets.Value("string"),
133
- "db_id": datasets.Value("string"),
134
- }
135
- ],
136
- }
137
- ],
138
- }
139
- )
140
-
141
- return datasets.DatasetInfo(
142
- description=_DESCRIPTION,
143
- features=features,
144
- homepage=_HOMEPAGE,
145
- license=str(_LICENSE),
146
- citation=_CITATION,
147
- )
148
-
149
- def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
150
-
151
- print("1 - " + "*"*50)
152
- print(_URL)
153
- data_dir = dl_manager.download_and_extract(_URL)
154
-
155
- print("2 - " + "*"*50)
156
- data_dir = Path(data_dir) / "Mantra-GSC"
157
-
158
- print("3 - " + "*"*50)
159
- language, dataset_type = self.config.name.split("_")
160
-
161
- print("4 - " + "*"*50)
162
- return [
163
- datasets.SplitGenerator(
164
- name=datasets.Split.TRAIN,
165
- gen_kwargs={
166
- "data_dir": data_dir,
167
- "language": language,
168
- "dataset_type": dataset_type,
169
- },
170
- ),
171
- ]
172
-
173
- def remove_prefix(self, a: str, prefix: str) -> str:
174
- if a.startswith(prefix):
175
- a = a[len(prefix) :]
176
- return a
177
-
178
- def parse_brat_file(self, txt_file: Path, annotation_file_suffixes: List[str] = None, parse_notes: bool = False) -> Dict:
179
-
180
- example = {}
181
- example["document_id"] = txt_file.with_suffix("").name
182
- with txt_file.open() as f:
183
- example["text"] = f.read()
184
-
185
- # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
186
- # for event extraction
187
- if annotation_file_suffixes is None:
188
- annotation_file_suffixes = [".a1", ".a2", ".ann"]
189
-
190
- if len(annotation_file_suffixes) == 0:
191
- raise AssertionError(
192
- "At least one suffix for the to-be-read annotation files should be given!"
193
- )
194
-
195
- ann_lines = []
196
- for suffix in annotation_file_suffixes:
197
- annotation_file = txt_file.with_suffix(suffix)
198
- if annotation_file.exists():
199
- with annotation_file.open() as f:
200
- ann_lines.extend(f.readlines())
201
-
202
- example["text_bound_annotations"] = []
203
- example["events"] = []
204
- example["relations"] = []
205
- example["equivalences"] = []
206
- example["attributes"] = []
207
- example["normalizations"] = []
208
-
209
- if parse_notes:
210
- example["notes"] = []
211
-
212
- for line in ann_lines:
213
- line = line.strip()
214
- if not line:
215
- continue
216
-
217
- if line.startswith("T"): # Text bound
218
- ann = {}
219
- fields = line.split("\t")
220
-
221
- ann["id"] = fields[0]
222
- ann["type"] = fields[1].split()[0]
223
- ann["offsets"] = []
224
- span_str = self.remove_prefix(fields[1], (ann["type"] + " "))
225
- text = fields[2]
226
- for span in span_str.split(";"):
227
- start, end = span.split()
228
- ann["offsets"].append([int(start), int(end)])
229
-
230
- # Heuristically split text of discontiguous entities into chunks
231
- ann["text"] = []
232
- if len(ann["offsets"]) > 1:
233
- i = 0
234
- for start, end in ann["offsets"]:
235
- chunk_len = end - start
236
- ann["text"].append(text[i : chunk_len + i])
237
- i += chunk_len
238
- while i < len(text) and text[i] == " ":
239
- i += 1
240
- else:
241
- ann["text"] = [text]
242
-
243
- example["text_bound_annotations"].append(ann)
244
-
245
- elif line.startswith("E"):
246
- ann = {}
247
- fields = line.split("\t")
248
-
249
- ann["id"] = fields[0]
250
-
251
- ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
252
-
253
- ann["arguments"] = []
254
- for role_ref_id in fields[1].split()[1:]:
255
- argument = {
256
- "role": (role_ref_id.split(":"))[0],
257
- "ref_id": (role_ref_id.split(":"))[1],
258
- }
259
- ann["arguments"].append(argument)
260
-
261
- example["events"].append(ann)
262
-
263
- elif line.startswith("R"):
264
- ann = {}
265
- fields = line.split("\t")
266
-
267
- ann["id"] = fields[0]
268
- ann["type"] = fields[1].split()[0]
269
-
270
- ann["head"] = {
271
- "role": fields[1].split()[1].split(":")[0],
272
- "ref_id": fields[1].split()[1].split(":")[1],
273
- }
274
- ann["tail"] = {
275
- "role": fields[1].split()[2].split(":")[0],
276
- "ref_id": fields[1].split()[2].split(":")[1],
277
- }
278
-
279
- example["relations"].append(ann)
280
-
281
- # '*' seems to be the legacy way to mark equivalences,
282
- # but I couldn't find any info on the current way
283
- # this might have to be adapted dependent on the brat version
284
- # of the annotation
285
- elif line.startswith("*"):
286
- ann = {}
287
- fields = line.split("\t")
288
-
289
- ann["id"] = fields[0]
290
- ann["ref_ids"] = fields[1].split()[1:]
291
-
292
- example["equivalences"].append(ann)
293
-
294
- elif line.startswith("A") or line.startswith("M"):
295
- ann = {}
296
- fields = line.split("\t")
297
-
298
- ann["id"] = fields[0]
299
-
300
- info = fields[1].split()
301
- ann["type"] = info[0]
302
- ann["ref_id"] = info[1]
303
-
304
- if len(info) > 2:
305
- ann["value"] = info[2]
306
- else:
307
- ann["value"] = ""
308
-
309
- example["attributes"].append(ann)
310
-
311
- elif line.startswith("N"):
312
- ann = {}
313
- fields = line.split("\t")
314
-
315
- ann["id"] = fields[0]
316
- ann["text"] = fields[2]
317
-
318
- info = fields[1].split()
319
-
320
- ann["type"] = info[0]
321
- ann["ref_id"] = info[1]
322
- ann["resource_name"] = info[2].split(":")[0]
323
- ann["cuid"] = info[2].split(":")[1]
324
- example["normalizations"].append(ann)
325
-
326
- elif parse_notes and line.startswith("#"):
327
- ann = {}
328
- fields = line.split("\t")
329
-
330
- ann["id"] = fields[0]
331
- ann["text"] = fields[2] if len(fields) == 3 else "<BB_NULL_STR>"
332
-
333
- info = fields[1].split()
334
-
335
- ann["type"] = info[0]
336
- ann["ref_id"] = info[1]
337
- example["notes"].append(ann)
338
-
339
- return example
340
-
341
-
342
- def _generate_examples(
343
- self, data_dir: Path, language: str, dataset_type: str
344
- ) -> Tuple[int, Dict]:
345
- """Yields examples as (key, example) tuples."""
346
- data_dir = data_dir / f"{_LANGUAGES_2[language]}"
347
-
348
- if dataset_type in ["patents", "emea"]:
349
- data_dir = data_dir / f"{_DATASET_TYPES[dataset_type]}_ec22-cui-best_man"
350
- else:
351
- # It is Medline now
352
- if language != "en":
353
- data_dir = (
354
- data_dir
355
- / f"{_DATASET_TYPES[dataset_type]}_EN_{language.upper()}_ec22-cui-best_man"
356
- )
357
- else:
358
- data_dir = [
359
- data_dir
360
- / f"{_DATASET_TYPES[dataset_type]}_EN_{_lang.upper()}_ec22-cui-best_man"
361
- for _lang in _LANGUAGES_2
362
- if _lang != "en"
363
- ]
364
-
365
- if not isinstance(data_dir, list):
366
- data_dir: List[Path] = [data_dir]
367
-
368
- raw_files = [raw_file for _dir in data_dir for raw_file in _dir.glob("*.txt")]
369
-
370
- if self.config.schema == "source":
371
- for i, raw_file in enumerate(raw_files):
372
- brat_example = self.parse_brat_file(raw_file, parse_notes=True)
373
- source_example = self._to_source_example(brat_example)
374
- yield i, source_example
375
-
376
- def _to_source_example(self, brat_example: Dict) -> Dict:
377
-
378
- source_example = {
379
- "document_id": brat_example["document_id"],
380
- "text": brat_example["text"],
381
- }
382
-
383
- source_example["entities"] = []
384
-
385
- for entity_annotation, ann_notes in zip(
386
- brat_example["text_bound_annotations"], brat_example["notes"]
387
- ):
388
- entity_ann = entity_annotation.copy()
389
-
390
- # Change id property name
391
- entity_ann["entity_id"] = entity_ann["id"]
392
- entity_ann.pop("id")
393
-
394
- # Get values from annotator notes
395
- assert entity_ann["entity_id"] == ann_notes["ref_id"]
396
- notes_values = ast.literal_eval(ann_notes["text"])
397
- if len(notes_values) == 4:
398
- cui, preferred_term, semantic_type, semantic_group = notes_values
399
- else:
400
- preferred_term, semantic_type, semantic_group = notes_values
401
- cui = entity_ann["type"]
402
- entity_ann["cui"] = cui
403
- entity_ann["preferred_term"] = preferred_term
404
- entity_ann["semantic_type"] = semantic_type
405
- entity_ann["type"] = semantic_group
406
- entity_ann["normalized"] = [{"db_name": "UMLS", "db_id": cui}]
407
-
408
- # Add entity annotation to sample
409
- source_example["entities"].append(entity_ann)
410
-
411
- return source_example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
 
16
+ import re
17
  import ast
18
+ import json
19
  from pathlib import Path
20
  from itertools import product
21
  from dataclasses import dataclass
22
  from typing import Dict, List, Tuple
23
 
24
+
25
  import datasets
26
 
27
  _CITATION = """\
28
  @article{10.1093/jamia/ocv037,
29
+ author = {Kors, Jan A and Clematide, Simon and Akhondi,
30
+ Saber A and van Mulligen, Erik M and Rebholz-Schuhmann, Dietrich},
31
+ title = "{A multilingual gold-standard corpus for biomedical concept recognition: the Mantra GSC}",
32
+ journal = {Journal of the American Medical Informatics Association},
33
+ volume = {22},
34
+ number = {5},
35
+ pages = {948-956},
36
+ year = {2015},
37
+ month = {05},
38
+ abstract = "{Objective To create a multilingual gold-standard corpus for biomedical concept recognition.Materials
39
+ and methods We selected text units from different parallel corpora (Medline abstract titles, drug labels,
40
+ biomedical patent claims) in English, French, German, Spanish, and Dutch. Three annotators per language
41
+ independently annotated the biomedical concepts, based on a subset of the Unified Medical Language System and
42
+ covering a wide range of semantic groups. To reduce the annotation workload, automatically generated
43
+ preannotations were provided. Individual annotations were automatically harmonized and then adjudicated, and
44
+ cross-language consistency checks were carried out to arrive at the final annotations.Results The number of final
45
+ annotations was 5530. Inter-annotator agreement scores indicate good agreement (median F-score 0.79), and are
46
+ similar to those between individual annotators and the gold standard. The automatically generated harmonized
47
+ annotation set for each language performed equally well as the best annotator for that language.Discussion The use
48
+ of automatic preannotations, harmonized annotations, and parallel corpora helped to keep the manual annotation
49
+ efforts manageable. The inter-annotator agreement scores provide a reference standard for gauging the performance
50
+ of automatic annotation techniques.Conclusion To our knowledge, this is the first gold-standard corpus for
51
+ biomedical concept recognition in languages other than English. Other distinguishing features are the wide variety
52
+ of semantic groups that are being covered, and the diversity of text genres that were annotated.}",
53
+ issn = {1067-5027},
54
+ doi = {10.1093/jamia/ocv037},
55
+ url = {https://doi.org/10.1093/jamia/ocv037},
56
+ eprint = {https://academic.oup.com/jamia/article-pdf/22/5/948/34146393/ocv037.pdf},
57
  }
58
  """
59
 
 
70
  _URL = "http://biosemantics.org/MantraGSC/Mantra-GSC.zip"
71
 
72
  _LANGUAGES_2 = {
73
+ "es": "Spanish",
74
+ "fr": "French",
75
+ "de": "German",
76
+ "nl": "Dutch",
77
+ "en": "English",
78
  }
79
 
80
  _DATASET_TYPES = {
81
+ "emea": "EMEA",
82
+ "medline": "Medline",
83
+ "patents": "Patents",
84
  }
85
 
86
  @dataclass
87
  class DrBenchmarkConfig(datasets.BuilderConfig):
88
+ name: str = None
89
+ version: datasets.Version = None
90
+ description: str = None
91
+ schema: str = None
92
+ subset_id: str = None
93
 
94
  class MantraGSC(datasets.GeneratorBasedBuilder):
95
 
96
+ SOURCE_VERSION = datasets.Version("1.0.0")
97
+
98
+ BUILDER_CONFIGS = []
99
+
100
+ for language, dataset_type in product(_LANGUAGES_2, _DATASET_TYPES):
101
+
102
+ if dataset_type == "patents" and language in ["nl", "es"]:
103
+ continue
104
+
105
+ BUILDER_CONFIGS.append(
106
+ DrBenchmarkConfig(
107
+ name=f"{language}_{dataset_type}",
108
+ version=SOURCE_VERSION,
109
+ description=f"Mantra GSC {_LANGUAGES_2[language]} {_DATASET_TYPES[dataset_type]} source schema",
110
+ schema="source",
111
+ subset_id=f"{language}_{_DATASET_TYPES[dataset_type]}",
112
+ )
113
+ )
114
+
115
+ DEFAULT_CONFIG_NAME = "fr_medline"
116
+
117
+ def _info(self) -> datasets.DatasetInfo:
118
+
119
+ if self.config.schema == "source":
120
+ features = datasets.Features(
121
+ {
122
+ "document_id": datasets.Value("string"),
123
+ "text": datasets.Value("string"),
124
+ "entities": [
125
+ {
126
+ "entity_id": datasets.Value("string"),
127
+ "type": datasets.Value("string"),
128
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
129
+ "text": datasets.Sequence(datasets.Value("string")),
130
+ "cui": datasets.Value("string"),
131
+ "preferred_term": datasets.Value("string"),
132
+ "semantic_type": datasets.Value("string"),
133
+ "normalized": [
134
+ {
135
+ "db_name": datasets.Value("string"),
136
+ "db_id": datasets.Value("string"),
137
+ }
138
+ ],
139
+ }
140
+ ],
141
+ }
142
+ )
143
+
144
+ return datasets.DatasetInfo(
145
+ description=_DESCRIPTION,
146
+ features=features,
147
+ homepage=_HOMEPAGE,
148
+ license=str(_LICENSE),
149
+ citation=_CITATION,
150
+ )
151
+
152
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
153
+
154
+ print("1 - " + "*"*50)
155
+ print(_URL)
156
+ data_dir = dl_manager.download_and_extract(_URL)
157
+
158
+ print("2 - " + "*"*50)
159
+ data_dir = Path(data_dir) / "Mantra-GSC"
160
+
161
+ print("3 - " + "*"*50)
162
+ language, dataset_type = self.config.name.split("_")
163
+
164
+ print("4 - " + "*"*50)
165
+ return [
166
+ datasets.SplitGenerator(
167
+ name=datasets.Split.TRAIN,
168
+ gen_kwargs={
169
+ "data_dir": data_dir,
170
+ "language": language,
171
+ "dataset_type": dataset_type,
172
+ },
173
+ ),
174
+ ]
175
+
176
+ def convert_to_prodigy(self, json_object):
177
+
178
+ def prepare_split(text):
179
+
180
+ rep_before = ['?', '!', ';', '*']
181
+ rep_after = ['’', "'"]
182
+ rep_both = ['-', '/', '[', ']', ':', ')', '(', ',', '.']
183
+
184
+ for i in rep_before:
185
+ text = text.replace(i, ' '+i)
186
+
187
+ for i in rep_after:
188
+ text = text.replace(i, i+' ')
189
+
190
+ for i in rep_both:
191
+ text = text.replace(i, ' '+i+' ')
192
+
193
+ text_split = text.split()
194
+
195
+ punctuations = [',', '.']
196
+ for j in range(0, len(text_split)-1):
197
+ if j-1 >= 0 and j+1 <= len(text_split)-1 and text_split[j-1][-1].isdigit() and text_split[j+1][0].isdigit():
198
+ if text_split[j] in punctuations:
199
+ text_split[j-1:j+2] = [''.join(text_split[j-1:j+2])]
200
+
201
+ text = ' '.join(text_split)
202
+
203
+ return text
204
+
205
+ # print(json.dumps(json_object, sort_keys=True, indent=4))
206
+ new_json = []
207
+
208
+ for ex in [json_object]:
209
+
210
+ # print(json.dumps(ex, sort_keys=True, indent=4))
211
+
212
+ text = prepare_split(ex['text'])
213
+
214
+ tokenized_text = text.split()
215
+ print(tokenized_text)
216
+
217
+ list_spans = []
218
+
219
+ cpt = 0
220
+
221
+ for a in ex['entities']:
222
+ # for a in ex['text_bound_annotations']:
223
+
224
+ for o in range(len(a['offsets'])):
225
+
226
+ text_annot = prepare_split(a['text'][o])
227
+
228
+ offset_start = a['offsets'][o][0]
229
+ offset_end = a['offsets'][o][1]
230
+
231
+ nb_tokens_annot = len(text_annot.split())
232
+
233
+ txt_offsetstart = prepare_split(ex['text'][:offset_start])
234
+
235
+ nb_tokens_before_annot = len(txt_offsetstart.split())
236
+
237
+ token_start = nb_tokens_before_annot
238
+ token_end = token_start + nb_tokens_annot - 1
239
+
240
+ list_spans.append({
241
+ 'start': offset_start,
242
+ 'end': offset_end,
243
+ 'token_start': token_start,
244
+ 'token_end': token_end,
245
+ 'label': a['type'],
246
+ 'id': ex['document_id'] + "_" + str(cpt),
247
+ 'text': a['text'][o],
248
+ })
249
+
250
+ cpt += 1
251
+
252
+ res = {
253
+ 'id': ex['document_id'],
254
+ 'document_id': ex['document_id'],
255
+ 'text': ex['text'],
256
+ 'tokens': tokenized_text,
257
+ 'spans': list_spans
258
+ }
259
+
260
+ new_json.append(res)
261
+
262
+ return new_json
263
+
264
+ def convert_to_hf_format(self, json_object):
265
+ # def convert_to_hf_format(self, json_object, list_label):
266
+ """
267
+ Le format prends en compte le multilabel en faisant une concaténation avec "_" entre chaque label
268
+ """
269
+
270
+ dict_out = []
271
+
272
+ for i in json_object:
273
+
274
+ print("#"*50)
275
+
276
+ nb_tokens = len(i['tokens'])
277
+
278
+ ner_tags = ['O']*nb_tokens
279
+
280
+ if 'spans' in i:
281
+
282
+ for j in i['spans']:
283
+
284
+ print(j)
285
+
286
+ # if j['text'] != ' '.join(i['tokens'][j['token_start']:j['token_end']+1]):
287
+ # print(j)
288
+ # print(j['id'])
289
+ # print(j['text'])
290
+ # print(' '.join(i['tokens'][j['token_start']:j['token_end']+1]))
291
+ # print()
292
+
293
+ # for x in range(j['token_start'], j['token_end'], 1):
294
+ for x in range(j['token_start'], j['token_end']+1, 1):
295
+
296
+ # if j['label'] in list_label:
297
+ # if j['text'] != ' '.join(i['tokens'][j['token_start']:j['token_end']+1]):
298
+ print("x: ", x)
299
+ print("t: ", i['tokens'][x])
300
+ print("n: ", j['label'])
301
+ print()
302
+
303
+ # x -= 1
304
+ if i['tokens'][x] not in j['text'] and i['tokens'][x] != "Matériovigilance":
305
+ print("Mots entiers")
306
+ print("x: ", x-1)
307
+ print("t: ", i['tokens'][x-1])
308
+ print("n: ", j['label'])
309
+ print()
310
+ if ner_tags[x-1] == 'O':
311
+ ner_tags[x-1] = j['label']
312
+ else:
313
+ pass
314
+ else:
315
+ if ner_tags[x] == 'O':
316
+ ner_tags[x] = j['label']
317
+ else:
318
+ # Commenter la ligne et mettre pass si on veut prendre qu'un label par token
319
+ pass
320
+ # ner_tags[x] = '_'.join(sorted(list(set(ner_tags[x].split('_')+[j['label']]))))
321
+
322
+ dict_out.append({
323
+ 'id': i['id'],
324
+ 'document_id': i['document_id'],
325
+ "ner_tags": ner_tags,
326
+ "tokens": i['tokens'],
327
+ })
328
+
329
+ return dict_out
330
+
331
+ def remove_prefix(self, a: str, prefix: str) -> str:
332
+ if a.startswith(prefix):
333
+ a = a[len(prefix) :]
334
+ return a
335
+
336
+ def parse_brat_file(self, txt_file: Path, annotation_file_suffixes: List[str] = None, parse_notes: bool = False) -> Dict:
337
+
338
+ example = {}
339
+ example["document_id"] = txt_file.with_suffix("").name
340
+ with txt_file.open() as f:
341
+ example["text"] = f.read()
342
+
343
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
344
+ # for event extraction
345
+ if annotation_file_suffixes is None:
346
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
347
+
348
+ if len(annotation_file_suffixes) == 0:
349
+ raise AssertionError(
350
+ "At least one suffix for the to-be-read annotation files should be given!"
351
+ )
352
+
353
+ ann_lines = []
354
+ for suffix in annotation_file_suffixes:
355
+ annotation_file = txt_file.with_suffix(suffix)
356
+ if annotation_file.exists():
357
+ with annotation_file.open() as f:
358
+ ann_lines.extend(f.readlines())
359
+
360
+ example["text_bound_annotations"] = []
361
+ example["events"] = []
362
+ example["relations"] = []
363
+ example["equivalences"] = []
364
+ example["attributes"] = []
365
+ example["normalizations"] = []
366
+
367
+ if parse_notes:
368
+ example["notes"] = []
369
+
370
+ for line in ann_lines:
371
+ line = line.strip()
372
+ if not line:
373
+ continue
374
+
375
+ if line.startswith("T"): # Text bound
376
+ ann = {}
377
+ fields = line.split("\t")
378
+
379
+ ann["id"] = fields[0]
380
+ ann["type"] = fields[1].split()[0]
381
+ ann["offsets"] = []
382
+ span_str = self.remove_prefix(fields[1], (ann["type"] + " "))
383
+ text = fields[2]
384
+ for span in span_str.split(";"):
385
+ start, end = span.split()
386
+ ann["offsets"].append([int(start), int(end)])
387
+
388
+ # Heuristically split text of discontiguous entities into chunks
389
+ ann["text"] = []
390
+ if len(ann["offsets"]) > 1:
391
+ i = 0
392
+ for start, end in ann["offsets"]:
393
+ chunk_len = end - start
394
+ ann["text"].append(text[i : chunk_len + i])
395
+ i += chunk_len
396
+ while i < len(text) and text[i] == " ":
397
+ i += 1
398
+ else:
399
+ ann["text"] = [text]
400
+
401
+ example["text_bound_annotations"].append(ann)
402
+
403
+ elif line.startswith("E"):
404
+ ann = {}
405
+ fields = line.split("\t")
406
+
407
+ ann["id"] = fields[0]
408
+
409
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
410
+
411
+ ann["arguments"] = []
412
+ for role_ref_id in fields[1].split()[1:]:
413
+ argument = {
414
+ "role": (role_ref_id.split(":"))[0],
415
+ "ref_id": (role_ref_id.split(":"))[1],
416
+ }
417
+ ann["arguments"].append(argument)
418
+
419
+ example["events"].append(ann)
420
+
421
+ elif line.startswith("R"):
422
+ ann = {}
423
+ fields = line.split("\t")
424
+
425
+ ann["id"] = fields[0]
426
+ ann["type"] = fields[1].split()[0]
427
+
428
+ ann["head"] = {
429
+ "role": fields[1].split()[1].split(":")[0],
430
+ "ref_id": fields[1].split()[1].split(":")[1],
431
+ }
432
+ ann["tail"] = {
433
+ "role": fields[1].split()[2].split(":")[0],
434
+ "ref_id": fields[1].split()[2].split(":")[1],
435
+ }
436
+
437
+ example["relations"].append(ann)
438
+
439
+ # '*' seems to be the legacy way to mark equivalences,
440
+ # but I couldn't find any info on the current way
441
+ # this might have to be adapted dependent on the brat version
442
+ # of the annotation
443
+ elif line.startswith("*"):
444
+ ann = {}
445
+ fields = line.split("\t")
446
+
447
+ ann["id"] = fields[0]
448
+ ann["ref_ids"] = fields[1].split()[1:]
449
+
450
+ example["equivalences"].append(ann)
451
+
452
+ elif line.startswith("A") or line.startswith("M"):
453
+ ann = {}
454
+ fields = line.split("\t")
455
+
456
+ ann["id"] = fields[0]
457
+
458
+ info = fields[1].split()
459
+ ann["type"] = info[0]
460
+ ann["ref_id"] = info[1]
461
+
462
+ if len(info) > 2:
463
+ ann["value"] = info[2]
464
+ else:
465
+ ann["value"] = ""
466
+
467
+ example["attributes"].append(ann)
468
+
469
+ elif line.startswith("N"):
470
+ ann = {}
471
+ fields = line.split("\t")
472
+
473
+ ann["id"] = fields[0]
474
+ ann["text"] = fields[2]
475
+
476
+ info = fields[1].split()
477
+
478
+ ann["type"] = info[0]
479
+ ann["ref_id"] = info[1]
480
+ ann["resource_name"] = info[2].split(":")[0]
481
+ ann["cuid"] = info[2].split(":")[1]
482
+ example["normalizations"].append(ann)
483
+
484
+ elif parse_notes and line.startswith("#"):
485
+ ann = {}
486
+ fields = line.split("\t")
487
+
488
+ ann["id"] = fields[0]
489
+ ann["text"] = fields[2] if len(fields) == 3 else "<BB_NULL_STR>"
490
+
491
+ info = fields[1].split()
492
+
493
+ ann["type"] = info[0]
494
+ ann["ref_id"] = info[1]
495
+ example["notes"].append(ann)
496
+ return example
497
+
498
+
499
+ def _generate_examples(
500
+ self, data_dir: Path, language: str, dataset_type: str
501
+ ) -> Tuple[int, Dict]:
502
+ """Yields examples as (key, example) tuples."""
503
+ data_dir = data_dir / f"{_LANGUAGES_2[language]}"
504
+
505
+ if dataset_type in ["patents", "emea"]:
506
+ data_dir = data_dir / f"{_DATASET_TYPES[dataset_type]}_ec22-cui-best_man"
507
+ else:
508
+ # It is Medline now
509
+ if language != "en":
510
+ data_dir = (
511
+ data_dir
512
+ / f"{_DATASET_TYPES[dataset_type]}_EN_{language.upper()}_ec22-cui-best_man"
513
+ )
514
+ else:
515
+ data_dir = [
516
+ data_dir
517
+ / f"{_DATASET_TYPES[dataset_type]}_EN_{_lang.upper()}_ec22-cui-best_man"
518
+ for _lang in _LANGUAGES_2
519
+ if _lang != "en"
520
+ ]
521
+
522
+ if not isinstance(data_dir, list):
523
+ data_dir: List[Path] = [data_dir]
524
+
525
+ raw_files = [raw_file for _dir in data_dir for raw_file in _dir.glob("*.txt")]
526
+
527
+ all_res = []
528
+
529
+ for i, raw_file in enumerate(raw_files):
530
+ brat_example = self.parse_brat_file(raw_file, parse_notes=True)
531
+ source_example = self._to_source_example(brat_example)
532
+
533
+ prod_format = self.convert_to_prodigy(source_example)
534
+ # print(prod_format)
535
+ # print()
536
+
537
+ hf_format = self.convert_to_hf_format(prod_format)[0]
538
+ print(">>> hf_format")
539
+ print(hf_format)
540
+ print("*"*50)
541
+ for a, b in zip(hf_format['tokens'], hf_format['ner_tags']):
542
+ print(a, " - ", b, end=" || ")
543
+ print()
544
+ print("*"*50)
545
+
546
+ # yield i, self.convert_to_hf_format(
547
+ # self.convert_to_prodigy(source_example),
548
+ # _LABELS_BASE,
549
+ # )
550
+ yield i, source_example
551
+
552
+ def _to_source_example(self, brat_example: Dict) -> Dict:
553
+
554
+ source_example = {
555
+ "document_id": brat_example["document_id"],
556
+ "text": brat_example["text"],
557
+ }
558
+
559
+ source_example["entities"] = []
560
+
561
+ for entity_annotation, ann_notes in zip(
562
+ brat_example["text_bound_annotations"], brat_example["notes"]
563
+ ):
564
+ entity_ann = entity_annotation.copy()
565
+
566
+ # Change id property name
567
+ entity_ann["entity_id"] = entity_ann["id"]
568
+ entity_ann.pop("id")
569
+
570
+ # Get values from annotator notes
571
+ assert entity_ann["entity_id"] == ann_notes["ref_id"]
572
+ notes_values = ast.literal_eval(ann_notes["text"])
573
+ if len(notes_values) == 4:
574
+ cui, preferred_term, semantic_type, semantic_group = notes_values
575
+ else:
576
+ preferred_term, semantic_type, semantic_group = notes_values
577
+ cui = entity_ann["type"]
578
+ entity_ann["cui"] = cui
579
+ entity_ann["preferred_term"] = preferred_term
580
+ entity_ann["semantic_type"] = semantic_type
581
+ entity_ann["type"] = semantic_group
582
+ entity_ann["normalized"] = [{"db_name": "UMLS", "db_id": cui}]
583
+
584
+ # Add entity annotation to sample
585
+ source_example["entities"].append(entity_ann)
586
+
587
+ return source_example