ygorg commited on
Commit
79f2827
·
1 Parent(s): 19789a7

Keep original files for reproduction.

Browse files
Files changed (2) hide show
  1. _attic/ESSAI.py +369 -0
  2. _attic/data.zip +3 -0
_attic/ESSAI.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+
4
+ import datasets
5
+ import numpy as np
6
+
7
+ _CITATION = """\
8
+ @misc{
9
+ dalloux,
10
+ title={Datasets – Clément Dalloux},
11
+ url={http://clementdalloux.fr/?page_id=28},
12
+ journal={Clément Dalloux},
13
+ author={Dalloux, Clément}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ We manually annotated two corpora from the biomedical field. The ESSAI corpus \
19
+ contains clinical trial protocols in French. They were mainly obtained from the \
20
+ National Cancer Institute The typical protocol consists of two parts: the \
21
+ summary of the trial, which indicates the purpose of the trial and the methods \
22
+ applied; and a detailed description of the trial with the inclusion and \
23
+ exclusion criteria. The CAS corpus contains clinical cases published in \
24
+ scientific literature and training material. They are published in different \
25
+ journals from French-speaking countries (France, Belgium, Switzerland, Canada, \
26
+ African countries, tropical countries) and are related to various medical \
27
+ specialties (cardiology, urology, oncology, obstetrics, pulmonology, \
28
+ gastro-enterology). The purpose of clinical cases is to describe clinical \
29
+ situations of patients. Hence, their content is close to the content of clinical \
30
+ narratives (description of diagnoses, treatments or procedures, evolution, \
31
+ family history, expected audience, etc.). In clinical cases, the negation is \
32
+ frequently used for describing the patient signs, symptoms, and diagnosis. \
33
+ Speculation is present as well but less frequently.
34
+
35
+ This version only contain the annotated ESSAI corpus
36
+ """
37
+
38
+ _HOMEPAGE = "https://clementdalloux.fr/?page_id=28"
39
+
40
+ _LICENSE = 'Data User Agreement'
41
+
42
+ _URL = "data.zip"
43
+
44
+
45
+ class ESSAI(datasets.GeneratorBasedBuilder):
46
+
47
+ DEFAULT_CONFIG_NAME = "pos"
48
+
49
+ BUILDER_CONFIGS = [
50
+ datasets.BuilderConfig(name="pos", version="1.0.0",
51
+ description="The ESSAI corpora - POS Speculation task"),
52
+
53
+ datasets.BuilderConfig(name="cls", version="1.0.0",
54
+ description="The ESSAI corpora - CLS Negation / Speculation task"),
55
+
56
+ datasets.BuilderConfig(name="ner_spec", version="1.0.0",
57
+ description="The ESSAI corpora - NER Speculation task"),
58
+ datasets.BuilderConfig(name="ner_neg", version="1.0.0",
59
+ description="The ESSAI corpora - NER Negation task"),
60
+ ]
61
+
62
+ def _info(self):
63
+
64
+ if self.config.name.find("pos") != -1:
65
+
66
+ features = datasets.Features(
67
+ {
68
+ "id": datasets.Value("string"),
69
+ "document_id": datasets.Value("string"),
70
+ "tokens": [datasets.Value("string")],
71
+ "lemmas": [datasets.Value("string")],
72
+ "pos_tags": [datasets.features.ClassLabel(
73
+ names=[
74
+ 'B-ABR', 'B-ADJ', 'B-ADV', 'B-DET:ART', 'B-DET:POS', 'B-INT',
75
+ 'B-KON', 'B-NAM', 'B-NN', 'B-NOM', 'B-NUM', 'B-PREF', 'B-PRO',
76
+ 'B-PRO:DEM', 'B-PRO:IND', 'B-PRO:PER', 'B-PRO:POS',
77
+ 'B-PRO:REL', 'B-PRP', 'B-PRP:det', 'B-PUN', 'B-PUN:cit',
78
+ 'B-SENT', 'B-SYM', 'B-VER:', 'B-VER:cond', 'B-VER:futu',
79
+ 'B-VER:impf', 'B-VER:infi', 'B-VER:pper', 'B-VER:ppre',
80
+ 'B-VER:pres', 'B-VER:simp', 'B-VER:subi', 'B-VER:subp'
81
+ ],
82
+ )],
83
+ }
84
+ )
85
+
86
+ elif self.config.name.find("cls") != -1:
87
+
88
+ features = datasets.Features(
89
+ {
90
+ "id": datasets.Value("string"),
91
+ "document_id": datasets.Value("string"),
92
+ "tokens": [datasets.Value("string")],
93
+ "label": datasets.features.ClassLabel(
94
+ names=['negation_speculation', 'negation', 'neutral', 'speculation'],
95
+ ),
96
+ }
97
+ )
98
+
99
+ elif self.config.name.find("ner") != -1:
100
+
101
+ if self.config.name.find("_spec") != -1:
102
+ names = ['O', 'B_cue_spec', 'B_scope_spec', 'I_scope_spec']
103
+ elif self.config.name.find("_neg") != -1:
104
+ names = ['O', 'B_cue_neg', 'B_scope_neg', 'I_scope_neg']
105
+
106
+ features = datasets.Features(
107
+ {
108
+ "id": datasets.Value("string"),
109
+ "document_id": datasets.Value("string"),
110
+ "tokens": [datasets.Value("string")],
111
+ "lemmas": [datasets.Value("string")],
112
+ "ner_tags": [datasets.features.ClassLabel(
113
+ names=names,
114
+ )],
115
+ }
116
+ )
117
+
118
+ return datasets.DatasetInfo(
119
+ description=_DESCRIPTION,
120
+ features=features,
121
+ supervised_keys=None,
122
+ homepage=_HOMEPAGE,
123
+ license=str(_LICENSE),
124
+ citation=_CITATION,
125
+ )
126
+
127
+ def _split_generators(self, dl_manager):
128
+
129
+ data_dir = dl_manager.download_and_extract(_URL).rstrip("/")
130
+
131
+ return [
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split.TRAIN,
134
+ gen_kwargs={
135
+ "datadir": data_dir,
136
+ "split": "train",
137
+ },
138
+ ),
139
+ datasets.SplitGenerator(
140
+ name=datasets.Split.VALIDATION,
141
+ gen_kwargs={
142
+ "datadir": data_dir,
143
+ "split": "validation",
144
+ },
145
+ ),
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TEST,
148
+ gen_kwargs={
149
+ "datadir": data_dir,
150
+ "split": "test",
151
+ },
152
+ ),
153
+ ]
154
+
155
+ def _generate_examples(self, datadir, split):
156
+
157
+ all_res = []
158
+
159
+ key = 0
160
+
161
+ subset = self.config.name.split("_")[-1]
162
+
163
+ unique_id_doc = []
164
+
165
+ if self.config.name.find("ner") != -1:
166
+ docs = [f"ESSAI_{subset}.txt"]
167
+ else:
168
+ docs = ["ESSAI_neg.txt", "ESSAI_spec.txt"]
169
+
170
+ for file in docs:
171
+
172
+ filename = os.path.join(datadir, file)
173
+
174
+ if self.config.name.find("pos") != -1:
175
+
176
+ id_docs = []
177
+ id_words = []
178
+ words = []
179
+ lemmas = []
180
+ POS_tags = []
181
+
182
+ with open(filename) as f:
183
+
184
+ for line in f.readlines():
185
+
186
+ splitted = line.split("\t")
187
+
188
+ if len(splitted) < 5:
189
+ continue
190
+
191
+ id_doc, id_word, word, lemma, tag = splitted[0:5]
192
+ if len(splitted) >= 8:
193
+ tag = splitted[6]
194
+
195
+ if lemma == "000" and tag == "@card@":
196
+ tag = "NUM"
197
+ word = "100 000"
198
+ lemma = "100 000"
199
+ elif lemma == "45" and tag == "@card@":
200
+ tag = "NUM"
201
+
202
+ # if id_doc in id_docs:
203
+ # continue
204
+
205
+ id_docs.append(id_doc)
206
+ id_words.append(id_word)
207
+ words.append(word)
208
+ lemmas.append(lemma)
209
+ POS_tags.append(f'B-{tag}')
210
+
211
+ dic = {
212
+ "id_docs": np.array(list(map(int, id_docs))),
213
+ "id_words": id_words,
214
+ "words": words,
215
+ "lemmas": lemmas,
216
+ "POS_tags": POS_tags,
217
+ }
218
+
219
+ for doc_id in set(dic["id_docs"]):
220
+
221
+ indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
222
+ tokens = [dic["words"][id] for id in indexes]
223
+ text_lemmas = [dic["lemmas"][id] for id in indexes]
224
+ pos_tags = [dic["POS_tags"][id] for id in indexes]
225
+
226
+ if doc_id not in unique_id_doc:
227
+
228
+ all_res.append({
229
+ "id": str(doc_id),
230
+ "document_id": doc_id,
231
+ "tokens": tokens,
232
+ "lemmas": text_lemmas,
233
+ "pos_tags": pos_tags,
234
+ })
235
+ unique_id_doc.append(doc_id)
236
+
237
+ # key += 1
238
+
239
+ elif self.config.name.find("ner") != -1:
240
+
241
+ id_docs = []
242
+ id_words = []
243
+ words = []
244
+ lemmas = []
245
+ ner_tags = []
246
+
247
+ with open(filename) as f:
248
+
249
+ for line in f.readlines():
250
+
251
+ if len(line.split("\t")) < 5:
252
+ continue
253
+
254
+ id_doc, id_word, word, lemma, _ = line.split("\t")[0:5]
255
+ tag = line.replace("\n", "").split("\t")[-1]
256
+
257
+ if tag == "***" or tag == "_":
258
+ tag = "O"
259
+ elif tag == "v":
260
+ tag = "I_scope_spec"
261
+ elif tag == "z":
262
+ tag = "O"
263
+ elif tag == "I_scope_spec_":
264
+ tag = "I_scope_spec"
265
+
266
+ id_docs.append(id_doc)
267
+ id_words.append(id_word)
268
+ words.append(word)
269
+ lemmas.append(lemma)
270
+ ner_tags.append(tag)
271
+
272
+ dic = {
273
+ "id_docs": np.array(list(map(int, id_docs))),
274
+ "id_words": id_words,
275
+ "words": words,
276
+ "lemmas": lemmas,
277
+ "ner_tags": ner_tags,
278
+ }
279
+
280
+ for doc_id in set(dic["id_docs"]):
281
+
282
+ indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
283
+ tokens = [dic["words"][id] for id in indexes]
284
+ text_lemmas = [dic["lemmas"][id] for id in indexes]
285
+ ner_tags = [dic["ner_tags"][id] for id in indexes]
286
+
287
+ all_res.append({
288
+ "id": key,
289
+ "document_id": doc_id,
290
+ "tokens": tokens,
291
+ "lemmas": text_lemmas,
292
+ "ner_tags": ner_tags,
293
+ })
294
+
295
+ key += 1
296
+
297
+ elif self.config.name.find("cls") != -1:
298
+
299
+ f_in = open(filename, "r")
300
+ conll = [
301
+ [b.split("\t") for b in a.split("\n")]
302
+ for a in f_in.read().split("\n\n")
303
+ ]
304
+ f_in.close()
305
+
306
+ classe = "negation" if filename.find("_neg") != -1 else "speculation"
307
+
308
+ for document in conll:
309
+
310
+ if document == [""]:
311
+ continue
312
+
313
+ identifier = document[0][0]
314
+
315
+ unique = list(set([w[-1] for w in document]))
316
+ tokens = [sent[2] for sent in document if len(sent) > 1]
317
+
318
+ if "***" in unique:
319
+ l = "neutral"
320
+ elif "_" in unique:
321
+ l = classe
322
+
323
+ if identifier in unique_id_doc and l == 'neutral':
324
+ continue
325
+
326
+ elif identifier in unique_id_doc and l != 'neutral':
327
+
328
+ index_l = unique_id_doc.index(identifier)
329
+
330
+ if all_res[index_l]["label"] != "neutral":
331
+ l = "negation_speculation"
332
+
333
+ all_res[index_l] = {
334
+ "id": str(identifier),
335
+ "document_id": identifier,
336
+ "tokens": tokens,
337
+ "label": l,
338
+ }
339
+
340
+ else:
341
+
342
+ all_res.append({
343
+ "id": str(identifier),
344
+ "document_id": identifier,
345
+ "tokens": tokens,
346
+ "label": l,
347
+ })
348
+
349
+ unique_id_doc.append(identifier)
350
+
351
+ ids = [r["id"] for r in all_res]
352
+
353
+ random.seed(4)
354
+ random.shuffle(ids)
355
+ random.shuffle(ids)
356
+ random.shuffle(ids)
357
+
358
+ train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
359
+
360
+ if split == "train":
361
+ allowed_ids = list(train)
362
+ elif split == "validation":
363
+ allowed_ids = list(validation)
364
+ elif split == "test":
365
+ allowed_ids = list(test)
366
+
367
+ for r in all_res:
368
+ if r["id"] in allowed_ids:
369
+ yield r["id"], r
_attic/data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afcf4b94dae2ad4cc5e01be3592d12f1ace422629cee6f8192600f37c28b43c0
3
+ size 1911010