qanastek commited on
Commit
694a490
·
1 Parent(s): 88c6ec7

Update DEFT2021

Browse files
Files changed (1) hide show
  1. DEFT2021 +535 -88
DEFT2021 CHANGED
@@ -36,7 +36,7 @@ _SPECIALITIES = ['immunitaire', 'endocriniennes', 'blessures', 'chimiques', 'eta
36
 
37
  class DEFT2021(datasets.GeneratorBasedBuilder):
38
 
39
- DEFAULT_CONFIG_NAME = "source"
40
 
41
  BUILDER_CONFIGS = [
42
  datasets.BuilderConfig(name="source", version="1.0.0", description="DEFT 2021 corpora"),
@@ -44,19 +44,36 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
44
 
45
  def _info(self):
46
 
47
- features = datasets.Features(
48
- {
49
- "id": datasets.Value("string"),
50
- "document_id": datasets.Value("string"),
51
- "text": datasets.Value("string"),
52
- "specialities": datasets.Sequence(
53
- datasets.features.ClassLabel(names=_SPECIALITIES),
54
- ),
55
- "specialities_one_hot": datasets.Sequence(
56
- datasets.Value("float"),
57
- ),
58
- }
59
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
  return datasets.DatasetInfo(
62
  description=_DESCRIPTION,
@@ -99,81 +116,511 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
99
  ),
100
  ]
101
 
102
- def _generate_examples(self, data_dir, split):
103
-
104
- all_res = {}
105
-
106
- key = 0
107
-
108
- if split == 'train' or split == 'validation':
109
- split_eval = 'train'
110
- else:
111
- split_eval = 'test'
112
-
113
- path_labels = Path(data_dir) / 'evaluations' / f"ref-{split_eval}-deft2021.txt"
114
-
115
- with open(os.path.join(data_dir, 'distribution-corpus.txt')) as f_dist:
116
-
117
- doc_specialities_ = {}
118
- with open(path_labels) as f_spec:
119
- doc_specialities = [line.strip() for line in f_spec.readlines()]
120
- for raw in doc_specialities:
121
- raw_split = raw.split('\t')
122
- if len(raw_split) == 3 and raw_split[0] in doc_specialities_:
123
- doc_specialities_[raw_split[0]].append(raw_split[1])
124
- elif len(raw_split) == 3 and raw_split[0] not in doc_specialities_:
125
- doc_specialities_[raw_split[0]] = [raw_split[1]]
126
-
127
- ann_path = Path(data_dir) / "DEFT-cas-cliniques"
128
-
129
- for guid, txt_file in enumerate(sorted(ann_path.glob("*.txt"))):
130
-
131
- ann_file = txt_file.with_suffix("").name.split('.')[0]+'.ann'
132
-
133
- if ann_file in doc_specialities_:
134
-
135
- res = {}
136
- res['document_id'] = txt_file.with_suffix("").name
137
- with txt_file.open() as f:
138
- res["text"] = f.read()
139
-
140
- specialities = doc_specialities_[ann_file]
141
-
142
- # Empty one hot vector
143
- one_hot = [0.0 for i in _SPECIALITIES]
144
-
145
- # Fill up the one hot vector
146
- for s in specialities:
147
- one_hot[_SPECIALITIES.index(s)] = 1.0
148
-
149
- all_res[res['document_id']] = {
150
- "id": str(key),
151
- "document_id": res['document_id'],
152
- "text": res["text"],
153
- "specialities": specialities,
154
- "specialities_one_hot": one_hot,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
- key += 1
158
 
159
- distribution = [line.strip() for line in f_dist.readlines()]
160
-
161
- random.seed(4)
162
- train = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'train 2021']
163
- random.shuffle(train)
164
- random.shuffle(train)
165
- random.shuffle(train)
166
- train, validation = np.split(train, [int(len(train)*0.7096)])
 
 
 
167
 
168
- test = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'test 2021']
169
-
170
- if split == "train":
171
- allowed_ids = list(train)
172
- elif split == "test":
173
- allowed_ids = list(test)
174
- elif split == "validation":
175
- allowed_ids = list(validation)
176
-
177
- for r in all_res.values():
178
- if r["document_id"]+'.txt' in allowed_ids:
179
- yield r["id"], r
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  class DEFT2021(datasets.GeneratorBasedBuilder):
38
 
39
+ DEFAULT_CONFIG_NAME = "ner"
40
 
41
  BUILDER_CONFIGS = [
42
  datasets.BuilderConfig(name="source", version="1.0.0", description="DEFT 2021 corpora"),
 
44
 
45
  def _info(self):
46
 
47
+ if self.config.name.find("cls") != -1:
48
+
49
+ features = datasets.Features(
50
+ {
51
+ "id": datasets.Value("string"),
52
+ "document_id": datasets.Value("string"),
53
+ "text": datasets.Value("string"),
54
+ "specialities": datasets.Sequence(
55
+ datasets.features.ClassLabel(names=_SPECIALITIES),
56
+ ),
57
+ "specialities_one_hot": datasets.Sequence(
58
+ datasets.Value("float"),
59
+ ),
60
+ }
61
+ )
62
+
63
+ elif self.config.name.find("ner") != -1:
64
+
65
+ features = datasets.Features(
66
+ {
67
+ "id": datasets.Value("string"),
68
+ "document_id": datasets.Value("string"),
69
+ "tokens": datasets.Sequence(datasets.Value("string")),
70
+ "ner_tags": datasets.Sequence(
71
+ datasets.features.ClassLabel(
72
+ names = ['O', 'B-anatomie', 'I-anatomie', 'B-date', 'I-date', 'B-dose', 'I-dose', 'B-duree', 'I-duree', 'B-examen', 'I-examen', 'B-frequence', 'I-frequence', 'B-mode', 'I-mode', 'B-moment', 'I-moment', 'B-pathologie', 'I-pathologie', 'B-sosy', 'I-sosy', 'B-substance', 'I-substance', 'B-traitement', 'I-traitement', 'B-valeur', 'I-valeur'],
73
+ )
74
+ ),
75
+ }
76
+ ),
77
 
78
  return datasets.DatasetInfo(
79
  description=_DESCRIPTION,
 
116
  ),
117
  ]
118
 
119
+ def remove_prefix(self, a: str, prefix: str) -> str:
120
+ if a.startswith(prefix):
121
+ a = a[len(prefix) :]
122
+ return a
123
+
124
+ def parse_brat_file(self, txt_file: Path, annotation_file_suffixes: List[str] = None, parse_notes: bool = False) -> Dict:
125
+
126
+ example = {}
127
+ example["document_id"] = txt_file.with_suffix("").name
128
+ with txt_file.open() as f:
129
+ example["text"] = f.read()
130
+
131
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
132
+ # for event extraction
133
+ if annotation_file_suffixes is None:
134
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
135
+
136
+ if len(annotation_file_suffixes) == 0:
137
+ raise AssertionError(
138
+ "At least one suffix for the to-be-read annotation files should be given!"
139
+ )
140
+
141
+ ann_lines = []
142
+ for suffix in annotation_file_suffixes:
143
+ annotation_file = txt_file.with_suffix(suffix)
144
+ if annotation_file.exists():
145
+ with annotation_file.open() as f:
146
+ ann_lines.extend(f.readlines())
147
+
148
+ example["text_bound_annotations"] = []
149
+ example["events"] = []
150
+ example["relations"] = []
151
+ example["equivalences"] = []
152
+ example["attributes"] = []
153
+ example["normalizations"] = []
154
+
155
+ if parse_notes:
156
+ example["notes"] = []
157
+
158
+ for line in ann_lines:
159
+ line = line.strip()
160
+ if not line:
161
+ continue
162
+
163
+ if line.startswith("T"): # Text bound
164
+ ann = {}
165
+ fields = line.split("\t")
166
+
167
+ ann["id"] = fields[0]
168
+ ann["type"] = fields[1].split()[0]
169
+ ann["offsets"] = []
170
+ span_str = self.remove_prefix(fields[1], (ann["type"] + " "))
171
+ text = fields[2]
172
+ for span in span_str.split(";"):
173
+ start, end = span.split()
174
+ ann["offsets"].append([int(start), int(end)])
175
+
176
+ # Heuristically split text of discontiguous entities into chunks
177
+ ann["text"] = []
178
+ if len(ann["offsets"]) > 1:
179
+ i = 0
180
+ for start, end in ann["offsets"]:
181
+ chunk_len = end - start
182
+ ann["text"].append(text[i : chunk_len + i])
183
+ i += chunk_len
184
+ while i < len(text) and text[i] == " ":
185
+ i += 1
186
+ else:
187
+ ann["text"] = [text]
188
+
189
+ example["text_bound_annotations"].append(ann)
190
+
191
+ elif line.startswith("E"):
192
+ ann = {}
193
+ fields = line.split("\t")
194
+
195
+ ann["id"] = fields[0]
196
+
197
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
198
+
199
+ ann["arguments"] = []
200
+ for role_ref_id in fields[1].split()[1:]:
201
+ argument = {
202
+ "role": (role_ref_id.split(":"))[0],
203
+ "ref_id": (role_ref_id.split(":"))[1],
204
  }
205
+ ann["arguments"].append(argument)
206
+
207
+ example["events"].append(ann)
208
+
209
+ elif line.startswith("R"):
210
+ ann = {}
211
+ fields = line.split("\t")
212
+
213
+ ann["id"] = fields[0]
214
+ ann["type"] = fields[1].split()[0]
215
+
216
+ ann["head"] = {
217
+ "role": fields[1].split()[1].split(":")[0],
218
+ "ref_id": fields[1].split()[1].split(":")[1],
219
+ }
220
+ ann["tail"] = {
221
+ "role": fields[1].split()[2].split(":")[0],
222
+ "ref_id": fields[1].split()[2].split(":")[1],
223
+ }
224
+
225
+ example["relations"].append(ann)
226
+
227
+ # '*' seems to be the legacy way to mark equivalences,
228
+ # but I couldn't find any info on the current way
229
+ # this might have to be adapted dependent on the brat version
230
+ # of the annotation
231
+ elif line.startswith("*"):
232
+ ann = {}
233
+ fields = line.split("\t")
234
+
235
+ ann["id"] = fields[0]
236
+ ann["ref_ids"] = fields[1].split()[1:]
237
+
238
+ example["equivalences"].append(ann)
239
+
240
+ elif line.startswith("A") or line.startswith("M"):
241
+ ann = {}
242
+ fields = line.split("\t")
243
+
244
+ ann["id"] = fields[0]
245
+
246
+ info = fields[1].split()
247
+ ann["type"] = info[0]
248
+ ann["ref_id"] = info[1]
249
+
250
+ if len(info) > 2:
251
+ ann["value"] = info[2]
252
+ else:
253
+ ann["value"] = ""
254
+
255
+ example["attributes"].append(ann)
256
+
257
+ elif line.startswith("N"):
258
+ ann = {}
259
+ fields = line.split("\t")
260
+
261
+ ann["id"] = fields[0]
262
+ ann["text"] = fields[2]
263
+
264
+ info = fields[1].split()
265
+
266
+ ann["type"] = info[0]
267
+ ann["ref_id"] = info[1]
268
+ ann["resource_name"] = info[2].split(":")[0]
269
+ ann["cuid"] = info[2].split(":")[1]
270
+ example["normalizations"].append(ann)
271
+
272
+ elif parse_notes and line.startswith("#"):
273
+ ann = {}
274
+ fields = line.split("\t")
275
+
276
+ ann["id"] = fields[0]
277
+ ann["text"] = fields[2] if len(fields) == 3 else "<BB_NULL_STR>"
278
+
279
+ info = fields[1].split()
280
+
281
+ ann["type"] = info[0]
282
+ ann["ref_id"] = info[1]
283
+ example["notes"].append(ann)
284
+ return example
285
+
286
+ def _to_source_example(self, brat_example: Dict) -> Dict:
287
+
288
+ source_example = {
289
+ "document_id": brat_example["document_id"],
290
+ "text": brat_example["text"],
291
+ }
292
+
293
+ source_example["entities"] = []
294
+
295
+ for entity_annotation in brat_example["text_bound_annotations"]:
296
+ entity_ann = entity_annotation.copy()
297
+
298
+ # Change id property name
299
+ entity_ann["entity_id"] = entity_ann["id"]
300
+ entity_ann.pop("id")
301
+
302
+ # Add entity annotation to sample
303
+ source_example["entities"].append(entity_ann)
304
+
305
+ return source_example
306
+
307
+ def convert_to_prodigy(self, json_object, list_label):
308
+
309
+ def prepare_split(text):
310
+
311
+ rep_before = ['?', '!', ';', '*']
312
+ rep_after = ['’', "'"]
313
+ rep_both = ['-', '/', '[', ']', ':', ')', '(', ',', '.']
314
+
315
+ for i in rep_before:
316
+ text = text.replace(i, ' '+i)
317
+
318
+ for i in rep_after:
319
+ text = text.replace(i, i+' ')
320
+
321
+ for i in rep_both:
322
+ text = text.replace(i, ' '+i+' ')
323
+
324
+ text_split = text.split()
325
+
326
+ punctuations = [',', '.']
327
+ for j in range(0, len(text_split)-1):
328
+ if j-1 >= 0 and j+1 <= len(text_split)-1 and text_split[j-1][-1].isdigit() and text_split[j+1][0].isdigit():
329
+ if text_split[j] in punctuations:
330
+ text_split[j-1:j+2] = [''.join(text_split[j-1:j+2])]
331
+
332
+ text = ' '.join(text_split)
333
+
334
+ return text
335
+
336
+ new_json = []
337
+
338
+ for ex in [json_object]:
339
+
340
+ text = prepare_split(ex['text'])
341
+
342
+ tokenized_text = text.split()
343
+
344
+ list_spans = []
345
+
346
+ for a in ex['entities']:
347
+
348
+ for o in range(len(a['offsets'])):
349
+
350
+ text_annot = prepare_split(a['text'][o])
351
+
352
+ offset_start = a['offsets'][o][0]
353
+ offset_end = a['offsets'][o][1]
354
+
355
+ nb_tokens_annot = len(text_annot.split())
356
+
357
+ txt_offsetstart = prepare_split(ex['text'][:offset_start])
358
+
359
+ nb_tokens_before_annot = len(txt_offsetstart.split())
360
+
361
+ token_start = nb_tokens_before_annot
362
+ token_end = token_start + nb_tokens_annot - 1
363
+
364
+ if a['type'] in list_label:
365
+ list_spans.append({
366
+ 'start': offset_start,
367
+ 'end': offset_end,
368
+ 'token_start': token_start,
369
+ 'token_end': token_end,
370
+ 'label': a['type'],
371
+ 'id': a['entity_id'],
372
+ 'text': a['text'][o],
373
+ })
374
+
375
+ res = {
376
+ 'id': ex['document_id'],
377
+ 'document_id': ex['document_id'],
378
+ 'text': ex['text'],
379
+ 'tokens': tokenized_text,
380
+ 'spans': list_spans
381
+ }
382
+
383
+ new_json.append(res)
384
+
385
+ return new_json
386
+
387
+ def convert_to_hf_format(self, json_object):
388
+
389
+ dict_out = []
390
+
391
+ for i in json_object:
392
+
393
+ # Filter annotations to keep the longest annotated spans when there is nested annotations
394
+ selected_annotations = []
395
+
396
+ if 'spans' in i:
397
+
398
+ for idx_j, j in enumerate(i['spans']):
399
+
400
+ len_j = int(j['end'])-int(j['start'])
401
+ range_j = [l for l in range(int(j['start']),int(j['end']),1)]
402
+
403
+ keep = True
404
+
405
+ for idx_k, k in enumerate(i['spans'][idx_j+1:]):
406
+
407
+ len_k = int(k['end'])-int(k['start'])
408
+ range_k = [l for l in range(int(k['start']),int(k['end']),1)]
409
+
410
+ inter = list(set(range_k).intersection(set(range_j)))
411
+ if len(inter) > 0 and len_j < len_k:
412
+ keep = False
413
+
414
+ if keep:
415
+ selected_annotations.append(j)
416
+
417
+ # Create list of labels + id to separate different annotation and prepare IOB2 format
418
+ nb_tokens = len(i['tokens'])
419
+ ner_tags = ['O']*nb_tokens
420
+
421
+ for slct in selected_annotations:
422
+
423
+ for x in range(slct['token_start'], slct['token_end']+1, 1):
424
+
425
+ if i['tokens'][x] not in slct['text']:
426
+ if ner_tags[x-1] == 'O':
427
+ ner_tags[x-1] = slct['label']+'-'+slct['id']
428
+ else:
429
+ if ner_tags[x] == 'O':
430
+ ner_tags[x] = slct['label']+'-'+slct['id']
431
+
432
+ # Make IOB2 format
433
+ ner_tags_IOB2 = []
434
+ for idx_l, label in enumerate(ner_tags):
435
+
436
+ if label == 'O':
437
+ ner_tags_IOB2.append('O')
438
+ else:
439
+ current_label = label.split('-')[0]
440
+ current_id = label.split('-')[1]
441
+ if idx_l == 0:
442
+ ner_tags_IOB2.append('B-'+current_label)
443
+ elif current_label in ner_tags[idx_l-1]:
444
+ if current_id == ner_tags[idx_l-1].split('-')[1]:
445
+ ner_tags_IOB2.append('I-'+current_label)
446
+ else:
447
+ ner_tags_IOB2.append('B-'+current_label)
448
+ else:
449
+ ner_tags_IOB2.append('B-'+current_label)
450
+
451
+ dict_out.append({
452
+ 'id': i['id'],
453
+ 'document_id': i['document_id'],
454
+ "ner_tags": ner_tags_IOB2,
455
+ "tokens": i['tokens'],
456
+ })
457
+
458
+ return dict_out
459
 
 
460
 
461
+ def split_sentences(self, json_o):
462
+ """
463
+ Split each document in sentences to fit the 512 maximum tokens of BERT.
464
+
465
+ """
466
+
467
+ final_json = []
468
+
469
+ for i in json_o:
470
+
471
+ ind_punc = [index for index, value in enumerate(i['tokens']) if value=='.'] + [len(i['tokens'])]
472
 
473
+ for index, value in enumerate(ind_punc):
474
+
475
+ if index==0:
476
+ final_json.append({'id': i['id']+'_'+str(index),
477
+ 'document_id': i['document_id'],
478
+ 'ner_tags': i['ner_tags'][:value+1],
479
+ 'tokens': i['tokens'][:value+1]
480
+ })
481
+ else:
482
+ prev_value = ind_punc[index-1]
483
+ final_json.append({'id': i['id']+'_'+str(index),
484
+ 'document_id': i['document_id'],
485
+ 'ner_tags': i['ner_tags'][prev_value+1:value+1],
486
+ 'tokens': i['tokens'][prev_value+1:value+1]
487
+ })
488
+
489
+ return final_json
490
+
491
+ def _generate_examples(self, data_dir, split):
492
+
493
+ if self.config.name.find("cls") != -1:
494
+ all_res = {}
495
+
496
+ key = 0
497
+
498
+ if split == 'train' or split == 'validation':
499
+ split_eval = 'train'
500
+ else:
501
+ split_eval = 'test'
502
+
503
+ path_labels = Path(data_dir) / 'evaluations' / f"ref-{split_eval}-deft2021.txt"
504
+
505
+ with open(os.path.join(data_dir, 'distribution-corpus.txt')) as f_dist:
506
+
507
+ doc_specialities_ = {}
508
+ with open(path_labels) as f_spec:
509
+ doc_specialities = [line.strip() for line in f_spec.readlines()]
510
+ for raw in doc_specialities:
511
+ raw_split = raw.split('\t')
512
+ if len(raw_split) == 3 and raw_split[0] in doc_specialities_:
513
+ doc_specialities_[raw_split[0]].append(raw_split[1])
514
+ elif len(raw_split) == 3 and raw_split[0] not in doc_specialities_:
515
+ doc_specialities_[raw_split[0]] = [raw_split[1]]
516
+
517
+ ann_path = Path(data_dir) / "DEFT-cas-cliniques"
518
+
519
+ for guid, txt_file in enumerate(sorted(ann_path.glob("*.txt"))):
520
+
521
+ ann_file = txt_file.with_suffix("").name.split('.')[0]+'.ann'
522
+
523
+ if ann_file in doc_specialities_:
524
+
525
+ res = {}
526
+ res['document_id'] = txt_file.with_suffix("").name
527
+ with txt_file.open() as f:
528
+ res["text"] = f.read()
529
+
530
+ specialities = doc_specialities_[ann_file]
531
+
532
+ # Empty one hot vector
533
+ one_hot = [0.0 for i in _SPECIALITIES]
534
+
535
+ # Fill up the one hot vector
536
+ for s in specialities:
537
+ one_hot[_SPECIALITIES.index(s)] = 1.0
538
+
539
+ all_res[res['document_id']] = {
540
+ "id": str(key),
541
+ "document_id": res['document_id'],
542
+ "text": res["text"],
543
+ "specialities": specialities,
544
+ "specialities_one_hot": one_hot,
545
+ }
546
+
547
+ key += 1
548
+
549
+ distribution = [line.strip() for line in f_dist.readlines()]
550
+
551
+ random.seed(4)
552
+ train = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'train 2021']
553
+ random.shuffle(train)
554
+ random.shuffle(train)
555
+ random.shuffle(train)
556
+ train, validation = np.split(train, [int(len(train)*0.7096)])
557
+
558
+ test = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'test 2021']
559
+
560
+ if split == "train":
561
+ allowed_ids = list(train)
562
+ elif split == "test":
563
+ allowed_ids = list(test)
564
+ elif split == "validation":
565
+ allowed_ids = list(validation)
566
+
567
+ for r in all_res.values():
568
+ if r["document_id"]+'.txt' in allowed_ids:
569
+ yield r["id"], r
570
+
571
+ elif self.config.name.find("ner") != -1:
572
+
573
+ all_res = []
574
+
575
+ key = 0
576
+
577
+ with open(os.path.join(data_dir, 'distribution-corpus.txt')) as f_dist:
578
+
579
+ distribution = [line.strip() for line in f_dist.readlines()]
580
+
581
+ random.seed(4)
582
+ train = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'train 2021']
583
+ random.shuffle(train)
584
+ random.shuffle(train)
585
+ random.shuffle(train)
586
+ train, validation = np.split(train, [int(len(train)*0.73)])
587
+ test = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'test 2021']
588
+
589
+ ann_path = Path(data_dir) / "DEFT-cas-cliniques"
590
+
591
+ for guid, txt_file in enumerate(sorted(ann_path.glob("*.txt"))):
592
+ brat_example = self.parse_brat_file(txt_file, parse_notes=True)
593
+
594
+ source_example = self._to_source_example(brat_example)
595
+
596
+ prod_format = self.convert_to_prodigy(source_example, _LABELS_BASE)
597
+
598
+ hf_format = self.convert_to_hf_format(prod_format)
599
+
600
+ hf_split = self.split_sentences(hf_format)
601
+
602
+ for h in hf_split:
603
+
604
+ all_res.append({
605
+ "id": str(key),
606
+ "document_id": h['document_id'],
607
+ "tokens": h['tokens'],
608
+ "ner_tags": h['ner_tags'],
609
+ })
610
+
611
+ key += 1
612
+
613
+ if split == "train":
614
+ allowed_ids = list(train)
615
+ elif split == "validation":
616
+ allowed_ids = list(validation)
617
+ elif split == "test":
618
+ allowed_ids = list(test)
619
+
620
+ print("train", len(train))
621
+ print("validation", len(validation))
622
+ print("test", len(test))
623
+
624
+ for r in all_res:
625
+ if r["document_id"]+'.txt' in allowed_ids:
626
+ yield r["id"], r