qanastek commited on
Commit
9e8df72
·
1 Parent(s): a566430

Create DEFT2021.py

Browse files
Files changed (1) hide show
  1. DEFT2021.py +640 -0
DEFT2021.py ADDED
@@ -0,0 +1,640 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import ast
4
+ import json
5
+ import random
6
+ from pathlib import Path
7
+ from itertools import product
8
+ from dataclasses import dataclass
9
+ from typing import Dict, List, Tuple
10
+
11
+ import datasets
12
+ import numpy as np
13
+
14
+ _CITATION = """\
15
+ @inproceedings{grouin-etal-2021-classification,
16
+ title = "Classification de cas cliniques et {\'e}valuation automatique de r{\'e}ponses d{'}{\'e}tudiants : pr{\'e}sentation de la campagne {DEFT} 2021 (Clinical cases classification and automatic evaluation of student answers : Presentation of the {DEFT} 2021 Challenge)",
17
+ author = "Grouin, Cyril and
18
+ Grabar, Natalia and
19
+ Illouz, Gabriel",
20
+ booktitle = "Actes de la 28e Conf{\'e}rence sur le Traitement Automatique des Langues Naturelles. Atelier D{\'E}fi Fouille de Textes (DEFT)",
21
+ month = "6",
22
+ year = "2021",
23
+ address = "Lille, France",
24
+ publisher = "ATALA",
25
+ url = "https://aclanthology.org/2021.jeptalnrecital-deft.1",
26
+ pages = "1--13",
27
+ abstract = "Le d{\'e}fi fouille de textes (DEFT) est une campagne d{'}{\'e}valuation annuelle francophone. Nous pr{\'e}sentons les corpus et baselines {\'e}labor{\'e}es pour trois t{\^a}ches : (i) identifier le profil clinique de patients d{\'e}crits dans des cas cliniques, (ii) {\'e}valuer automatiquement les r{\'e}ponses d{'}{\'e}tudiants sur des questionnaires en ligne (Moodle) {\`a} partir de la correction de l{'}enseignant, et (iii) poursuivre une {\'e}valuation de r{\'e}ponses d{'}{\'e}tudiants {\`a} partir de r{\'e}ponses d{\'e}j{\`a} {\'e}valu{\'e}es par l{'}enseignant. Les r{\'e}sultats varient de 0,394 {\`a} 0,814 de F-mesure sur la premi{\`e}re t{\^a}che (7 {\'e}quipes), de 0,448 {\`a} 0,682 de pr{\'e}cision sur la deuxi{\`e}me (3 {\'e}quipes), et de 0,133 {\`a} 0,510 de pr{\'e}cision sur la derni{\`e}re (3 {\'e}quipes).",
28
+ language = "French",
29
+ }
30
+ """
31
+
32
+ _DESCRIPTION = """\
33
+ ddd
34
+ """
35
+
36
+ _HOMEPAGE = "ddd"
37
+
38
+ _LICENSE = "unknown"
39
+
40
+ _SPECIALITIES = ['immunitaire', 'endocriniennes', 'blessures', 'chimiques', 'etatsosy', 'nutritionnelles', 'infections', 'virales', 'parasitaires', 'tumeur', 'osteomusculaires', 'stomatognathique', 'digestif', 'respiratoire', 'ORL', 'nerveux', 'oeil', 'homme', 'femme', 'cardiovasculaires', 'hemopathies', 'genetique', 'peau']
41
+
42
+ _LABELS_BASE = ['anatomie', 'date', 'dose', 'duree', 'examen', 'frequence', 'mode', 'moment', 'pathologie', 'sosy', 'substance', 'traitement', 'valeur']
43
+
44
+ class DEFT2021(datasets.GeneratorBasedBuilder):
45
+
46
+ DEFAULT_CONFIG_NAME = "ner"
47
+
48
+ BUILDER_CONFIGS = [
49
+ datasets.BuilderConfig(name="cls", version="1.0.0", description="DEFT 2021 corpora - Classification task"),
50
+ datasets.BuilderConfig(name="ner", version="1.0.0", description="DEFT 2021 corpora - Named-entity recognition task"),
51
+ ]
52
+
53
+ def _info(self):
54
+
55
+ if self.config.name.find("cls") != -1:
56
+
57
+ features = datasets.Features(
58
+ {
59
+ "id": datasets.Value("string"),
60
+ "document_id": datasets.Value("string"),
61
+ "text": datasets.Value("string"),
62
+ "specialities": datasets.Sequence(
63
+ datasets.features.ClassLabel(names=_SPECIALITIES),
64
+ ),
65
+ "specialities_one_hot": datasets.Sequence(
66
+ datasets.Value("float"),
67
+ ),
68
+ }
69
+ )
70
+
71
+ elif self.config.name.find("ner") != -1:
72
+
73
+ features = datasets.Features(
74
+ {
75
+ "id": datasets.Value("string"),
76
+ "document_id": datasets.Value("string"),
77
+ "tokens": datasets.Sequence(datasets.Value("string")),
78
+ "ner_tags": datasets.Sequence(
79
+ datasets.features.ClassLabel(
80
+ names = ['O', 'B-anatomie', 'I-anatomie', 'B-date', 'I-date', 'B-dose', 'I-dose', 'B-duree', 'I-duree', 'B-examen', 'I-examen', 'B-frequence', 'I-frequence', 'B-mode', 'I-mode', 'B-moment', 'I-moment', 'B-pathologie', 'I-pathologie', 'B-sosy', 'I-sosy', 'B-substance', 'I-substance', 'B-traitement', 'I-traitement', 'B-valeur', 'I-valeur'],
81
+ )
82
+ ),
83
+ }
84
+ )
85
+
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=features,
89
+ supervised_keys=None,
90
+ homepage=_HOMEPAGE,
91
+ license=str(_LICENSE),
92
+ citation=_CITATION,
93
+ )
94
+
95
+ def _split_generators(self, dl_manager):
96
+
97
+ if self.config.data_dir is None:
98
+ raise ValueError("This is a local dataset. Please pass the data_dir kwarg to load_dataset.")
99
+
100
+ else:
101
+ data_dir = self.config.data_dir
102
+
103
+ return [
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TRAIN,
106
+ gen_kwargs={
107
+ "data_dir": data_dir,
108
+ "split": "train",
109
+ },
110
+ ),
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.VALIDATION,
113
+ gen_kwargs={
114
+ "data_dir": data_dir,
115
+ "split": "validation",
116
+ },
117
+ ),
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.TEST,
120
+ gen_kwargs={
121
+ "data_dir": data_dir,
122
+ "split": "test",
123
+ },
124
+ ),
125
+ ]
126
+
127
+ def remove_prefix(self, a: str, prefix: str) -> str:
128
+ if a.startswith(prefix):
129
+ a = a[len(prefix) :]
130
+ return a
131
+
132
+ def parse_brat_file(self, txt_file: Path, annotation_file_suffixes: List[str] = None, parse_notes: bool = False) -> Dict:
133
+
134
+ example = {}
135
+ example["document_id"] = txt_file.with_suffix("").name
136
+ with txt_file.open() as f:
137
+ example["text"] = f.read()
138
+
139
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
140
+ # for event extraction
141
+ if annotation_file_suffixes is None:
142
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
143
+
144
+ if len(annotation_file_suffixes) == 0:
145
+ raise AssertionError(
146
+ "At least one suffix for the to-be-read annotation files should be given!"
147
+ )
148
+
149
+ ann_lines = []
150
+ for suffix in annotation_file_suffixes:
151
+ annotation_file = txt_file.with_suffix(suffix)
152
+ if annotation_file.exists():
153
+ with annotation_file.open() as f:
154
+ ann_lines.extend(f.readlines())
155
+
156
+ example["text_bound_annotations"] = []
157
+ example["events"] = []
158
+ example["relations"] = []
159
+ example["equivalences"] = []
160
+ example["attributes"] = []
161
+ example["normalizations"] = []
162
+
163
+ if parse_notes:
164
+ example["notes"] = []
165
+
166
+ for line in ann_lines:
167
+ line = line.strip()
168
+ if not line:
169
+ continue
170
+
171
+ if line.startswith("T"): # Text bound
172
+ ann = {}
173
+ fields = line.split("\t")
174
+
175
+ ann["id"] = fields[0]
176
+ ann["type"] = fields[1].split()[0]
177
+ ann["offsets"] = []
178
+ span_str = self.remove_prefix(fields[1], (ann["type"] + " "))
179
+ text = fields[2]
180
+ for span in span_str.split(";"):
181
+ start, end = span.split()
182
+ ann["offsets"].append([int(start), int(end)])
183
+
184
+ # Heuristically split text of discontiguous entities into chunks
185
+ ann["text"] = []
186
+ if len(ann["offsets"]) > 1:
187
+ i = 0
188
+ for start, end in ann["offsets"]:
189
+ chunk_len = end - start
190
+ ann["text"].append(text[i : chunk_len + i])
191
+ i += chunk_len
192
+ while i < len(text) and text[i] == " ":
193
+ i += 1
194
+ else:
195
+ ann["text"] = [text]
196
+
197
+ example["text_bound_annotations"].append(ann)
198
+
199
+ elif line.startswith("E"):
200
+ ann = {}
201
+ fields = line.split("\t")
202
+
203
+ ann["id"] = fields[0]
204
+
205
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
206
+
207
+ ann["arguments"] = []
208
+ for role_ref_id in fields[1].split()[1:]:
209
+ argument = {
210
+ "role": (role_ref_id.split(":"))[0],
211
+ "ref_id": (role_ref_id.split(":"))[1],
212
+ }
213
+ ann["arguments"].append(argument)
214
+
215
+ example["events"].append(ann)
216
+
217
+ elif line.startswith("R"):
218
+ ann = {}
219
+ fields = line.split("\t")
220
+
221
+ ann["id"] = fields[0]
222
+ ann["type"] = fields[1].split()[0]
223
+
224
+ ann["head"] = {
225
+ "role": fields[1].split()[1].split(":")[0],
226
+ "ref_id": fields[1].split()[1].split(":")[1],
227
+ }
228
+ ann["tail"] = {
229
+ "role": fields[1].split()[2].split(":")[0],
230
+ "ref_id": fields[1].split()[2].split(":")[1],
231
+ }
232
+
233
+ example["relations"].append(ann)
234
+
235
+ # '*' seems to be the legacy way to mark equivalences,
236
+ # but I couldn't find any info on the current way
237
+ # this might have to be adapted dependent on the brat version
238
+ # of the annotation
239
+ elif line.startswith("*"):
240
+ ann = {}
241
+ fields = line.split("\t")
242
+
243
+ ann["id"] = fields[0]
244
+ ann["ref_ids"] = fields[1].split()[1:]
245
+
246
+ example["equivalences"].append(ann)
247
+
248
+ elif line.startswith("A") or line.startswith("M"):
249
+ ann = {}
250
+ fields = line.split("\t")
251
+
252
+ ann["id"] = fields[0]
253
+
254
+ info = fields[1].split()
255
+ ann["type"] = info[0]
256
+ ann["ref_id"] = info[1]
257
+
258
+ if len(info) > 2:
259
+ ann["value"] = info[2]
260
+ else:
261
+ ann["value"] = ""
262
+
263
+ example["attributes"].append(ann)
264
+
265
+ elif line.startswith("N"):
266
+ ann = {}
267
+ fields = line.split("\t")
268
+
269
+ ann["id"] = fields[0]
270
+ ann["text"] = fields[2]
271
+
272
+ info = fields[1].split()
273
+
274
+ ann["type"] = info[0]
275
+ ann["ref_id"] = info[1]
276
+ ann["resource_name"] = info[2].split(":")[0]
277
+ ann["cuid"] = info[2].split(":")[1]
278
+ example["normalizations"].append(ann)
279
+
280
+ elif parse_notes and line.startswith("#"):
281
+ ann = {}
282
+ fields = line.split("\t")
283
+
284
+ ann["id"] = fields[0]
285
+ ann["text"] = fields[2] if len(fields) == 3 else "<BB_NULL_STR>"
286
+
287
+ info = fields[1].split()
288
+
289
+ ann["type"] = info[0]
290
+ ann["ref_id"] = info[1]
291
+ example["notes"].append(ann)
292
+ return example
293
+
294
+ def _to_source_example(self, brat_example: Dict) -> Dict:
295
+
296
+ source_example = {
297
+ "document_id": brat_example["document_id"],
298
+ "text": brat_example["text"],
299
+ }
300
+
301
+ source_example["entities"] = []
302
+
303
+ for entity_annotation in brat_example["text_bound_annotations"]:
304
+ entity_ann = entity_annotation.copy()
305
+
306
+ # Change id property name
307
+ entity_ann["entity_id"] = entity_ann["id"]
308
+ entity_ann.pop("id")
309
+
310
+ # Add entity annotation to sample
311
+ source_example["entities"].append(entity_ann)
312
+
313
+ return source_example
314
+
315
+ def convert_to_prodigy(self, json_object, list_label):
316
+
317
+ def prepare_split(text):
318
+
319
+ rep_before = ['?', '!', ';', '*']
320
+ rep_after = ['’', "'"]
321
+ rep_both = ['-', '/', '[', ']', ':', ')', '(', ',', '.']
322
+
323
+ for i in rep_before:
324
+ text = text.replace(i, ' '+i)
325
+
326
+ for i in rep_after:
327
+ text = text.replace(i, i+' ')
328
+
329
+ for i in rep_both:
330
+ text = text.replace(i, ' '+i+' ')
331
+
332
+ text_split = text.split()
333
+
334
+ punctuations = [',', '.']
335
+ for j in range(0, len(text_split)-1):
336
+ if j-1 >= 0 and j+1 <= len(text_split)-1 and text_split[j-1][-1].isdigit() and text_split[j+1][0].isdigit():
337
+ if text_split[j] in punctuations:
338
+ text_split[j-1:j+2] = [''.join(text_split[j-1:j+2])]
339
+
340
+ text = ' '.join(text_split)
341
+
342
+ return text
343
+
344
+ new_json = []
345
+
346
+ for ex in [json_object]:
347
+
348
+ text = prepare_split(ex['text'])
349
+
350
+ tokenized_text = text.split()
351
+
352
+ list_spans = []
353
+
354
+ for a in ex['entities']:
355
+
356
+ for o in range(len(a['offsets'])):
357
+
358
+ text_annot = prepare_split(a['text'][o])
359
+
360
+ offset_start = a['offsets'][o][0]
361
+ offset_end = a['offsets'][o][1]
362
+
363
+ nb_tokens_annot = len(text_annot.split())
364
+
365
+ txt_offsetstart = prepare_split(ex['text'][:offset_start])
366
+
367
+ nb_tokens_before_annot = len(txt_offsetstart.split())
368
+
369
+ token_start = nb_tokens_before_annot
370
+ token_end = token_start + nb_tokens_annot - 1
371
+
372
+ if a['type'] in list_label:
373
+ list_spans.append({
374
+ 'start': offset_start,
375
+ 'end': offset_end,
376
+ 'token_start': token_start,
377
+ 'token_end': token_end,
378
+ 'label': a['type'],
379
+ 'id': a['entity_id'],
380
+ 'text': a['text'][o],
381
+ })
382
+
383
+ res = {
384
+ 'id': ex['document_id'],
385
+ 'document_id': ex['document_id'],
386
+ 'text': ex['text'],
387
+ 'tokens': tokenized_text,
388
+ 'spans': list_spans
389
+ }
390
+
391
+ new_json.append(res)
392
+
393
+ return new_json
394
+
395
+ def convert_to_hf_format(self, json_object):
396
+
397
+ dict_out = []
398
+
399
+ for i in json_object:
400
+
401
+ # Filter annotations to keep the longest annotated spans when there is nested annotations
402
+ selected_annotations = []
403
+
404
+ if 'spans' in i:
405
+
406
+ for idx_j, j in enumerate(i['spans']):
407
+
408
+ len_j = int(j['end'])-int(j['start'])
409
+ range_j = [l for l in range(int(j['start']),int(j['end']),1)]
410
+
411
+ keep = True
412
+
413
+ for idx_k, k in enumerate(i['spans'][idx_j+1:]):
414
+
415
+ len_k = int(k['end'])-int(k['start'])
416
+ range_k = [l for l in range(int(k['start']),int(k['end']),1)]
417
+
418
+ inter = list(set(range_k).intersection(set(range_j)))
419
+ if len(inter) > 0 and len_j < len_k:
420
+ keep = False
421
+
422
+ if keep:
423
+ selected_annotations.append(j)
424
+
425
+ # Create list of labels + id to separate different annotation and prepare IOB2 format
426
+ nb_tokens = len(i['tokens'])
427
+ ner_tags = ['O']*nb_tokens
428
+
429
+ for slct in selected_annotations:
430
+
431
+ for x in range(slct['token_start'], slct['token_end']+1, 1):
432
+
433
+ if i['tokens'][x] not in slct['text']:
434
+ if ner_tags[x-1] == 'O':
435
+ ner_tags[x-1] = slct['label']+'-'+slct['id']
436
+ else:
437
+ if ner_tags[x] == 'O':
438
+ ner_tags[x] = slct['label']+'-'+slct['id']
439
+
440
+ # Make IOB2 format
441
+ ner_tags_IOB2 = []
442
+ for idx_l, label in enumerate(ner_tags):
443
+
444
+ if label == 'O':
445
+ ner_tags_IOB2.append('O')
446
+ else:
447
+ current_label = label.split('-')[0]
448
+ current_id = label.split('-')[1]
449
+ if idx_l == 0:
450
+ ner_tags_IOB2.append('B-'+current_label)
451
+ elif current_label in ner_tags[idx_l-1]:
452
+ if current_id == ner_tags[idx_l-1].split('-')[1]:
453
+ ner_tags_IOB2.append('I-'+current_label)
454
+ else:
455
+ ner_tags_IOB2.append('B-'+current_label)
456
+ else:
457
+ ner_tags_IOB2.append('B-'+current_label)
458
+
459
+ dict_out.append({
460
+ 'id': i['id'],
461
+ 'document_id': i['document_id'],
462
+ "ner_tags": ner_tags_IOB2,
463
+ "tokens": i['tokens'],
464
+ })
465
+
466
+ return dict_out
467
+
468
+
469
+ def split_sentences(self, json_o):
470
+ """
471
+ Split each document in sentences to fit the 512 maximum tokens of BERT.
472
+
473
+ """
474
+
475
+ final_json = []
476
+
477
+ for i in json_o:
478
+
479
+ ind_punc = [index for index, value in enumerate(i['tokens']) if value=='.'] + [len(i['tokens'])]
480
+
481
+ for index, value in enumerate(ind_punc):
482
+
483
+ if index==0:
484
+ final_json.append({'id': i['id']+'_'+str(index),
485
+ 'document_id': i['document_id'],
486
+ 'ner_tags': i['ner_tags'][:value+1],
487
+ 'tokens': i['tokens'][:value+1]
488
+ })
489
+ else:
490
+ prev_value = ind_punc[index-1]
491
+ final_json.append({'id': i['id']+'_'+str(index),
492
+ 'document_id': i['document_id'],
493
+ 'ner_tags': i['ner_tags'][prev_value+1:value+1],
494
+ 'tokens': i['tokens'][prev_value+1:value+1]
495
+ })
496
+
497
+ return final_json
498
+
499
+ def _generate_examples(self, data_dir, split):
500
+
501
+ if self.config.name.find("cls") != -1:
502
+
503
+ all_res = {}
504
+
505
+ key = 0
506
+
507
+ if split == 'train' or split == 'validation':
508
+ split_eval = 'train'
509
+ else:
510
+ split_eval = 'test'
511
+
512
+ path_labels = Path(data_dir) / 'evaluations' / f"ref-{split_eval}-deft2021.txt"
513
+
514
+ with open(os.path.join(data_dir, 'distribution-corpus.txt')) as f_dist:
515
+
516
+ doc_specialities_ = {}
517
+
518
+ with open(path_labels) as f_spec:
519
+
520
+ doc_specialities = [line.strip() for line in f_spec.readlines()]
521
+
522
+ for raw in doc_specialities:
523
+
524
+ raw_split = raw.split('\t')
525
+
526
+ if len(raw_split) == 3 and raw_split[0] in doc_specialities_:
527
+ doc_specialities_[raw_split[0]].append(raw_split[1])
528
+
529
+ elif len(raw_split) == 3 and raw_split[0] not in doc_specialities_:
530
+ doc_specialities_[raw_split[0]] = [raw_split[1]]
531
+
532
+ ann_path = Path(data_dir) / "DEFT-cas-cliniques"
533
+
534
+ for guid, txt_file in enumerate(sorted(ann_path.glob("*.txt"))):
535
+
536
+ ann_file = txt_file.with_suffix("").name.split('.')[0]+'.ann'
537
+
538
+ if ann_file in doc_specialities_:
539
+
540
+ res = {}
541
+ res['document_id'] = txt_file.with_suffix("").name
542
+ with txt_file.open() as f:
543
+ res["text"] = f.read()
544
+
545
+ specialities = doc_specialities_[ann_file]
546
+
547
+ # Empty one hot vector
548
+ one_hot = [0.0 for i in _SPECIALITIES]
549
+
550
+ # Fill up the one hot vector
551
+ for s in specialities:
552
+ one_hot[_SPECIALITIES.index(s)] = 1.0
553
+
554
+ all_res[res['document_id']] = {
555
+ "id": str(key),
556
+ "document_id": res['document_id'],
557
+ "text": res["text"],
558
+ "specialities": specialities,
559
+ "specialities_one_hot": one_hot,
560
+ }
561
+
562
+ key += 1
563
+
564
+ distribution = [line.strip() for line in f_dist.readlines()]
565
+
566
+ random.seed(4)
567
+ train = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'train 2021']
568
+ random.shuffle(train)
569
+ random.shuffle(train)
570
+ random.shuffle(train)
571
+ train, validation = np.split(train, [int(len(train)*0.7096)])
572
+
573
+ test = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'test 2021']
574
+
575
+ if split == "train":
576
+ allowed_ids = list(train)
577
+ elif split == "test":
578
+ allowed_ids = list(test)
579
+ elif split == "validation":
580
+ allowed_ids = list(validation)
581
+
582
+ for r in all_res.values():
583
+ if r["document_id"]+'.txt' in allowed_ids:
584
+ yield r["id"], r
585
+
586
+ elif self.config.name.find("ner") != -1:
587
+
588
+ all_res = []
589
+
590
+ key = 0
591
+
592
+ with open(os.path.join(data_dir, 'distribution-corpus.txt')) as f_dist:
593
+
594
+ distribution = [line.strip() for line in f_dist.readlines()]
595
+
596
+ random.seed(4)
597
+ train = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'train 2021']
598
+ random.shuffle(train)
599
+ random.shuffle(train)
600
+ random.shuffle(train)
601
+ train, validation = np.split(train, [int(len(train)*0.73)])
602
+ test = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'test 2021']
603
+
604
+ ann_path = Path(data_dir) / "DEFT-cas-cliniques"
605
+
606
+ for guid, txt_file in enumerate(sorted(ann_path.glob("*.txt"))):
607
+
608
+ brat_example = self.parse_brat_file(txt_file, parse_notes=True)
609
+
610
+ source_example = self._to_source_example(brat_example)
611
+
612
+ prod_format = self.convert_to_prodigy(source_example, _LABELS_BASE)
613
+
614
+ hf_format = self.convert_to_hf_format(prod_format)
615
+
616
+ hf_split = self.split_sentences(hf_format)
617
+
618
+ for h in hf_split:
619
+
620
+ if len(h['tokens']) > 0 and len(h['ner_tags']) > 0:
621
+
622
+ all_res.append({
623
+ "id": str(key),
624
+ "document_id": h['document_id'],
625
+ "tokens": h['tokens'],
626
+ "ner_tags": h['ner_tags'],
627
+ })
628
+
629
+ key += 1
630
+
631
+ if split == "train":
632
+ allowed_ids = list(train)
633
+ elif split == "validation":
634
+ allowed_ids = list(validation)
635
+ elif split == "test":
636
+ allowed_ids = list(test)
637
+
638
+ for r in all_res:
639
+ if r["document_id"]+'.txt' in allowed_ids:
640
+ yield r["id"], r