ygorg commited on
Commit
0ce56e6
·
verified ·
1 Parent(s): add045c

Delete loading script

Browse files
Files changed (1) hide show
  1. MANTRAGSC.py +0 -308
MANTRAGSC.py DELETED
@@ -1,308 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # pip install xmltodict
17
-
18
- import random
19
- from pathlib import Path
20
- from itertools import product
21
- from dataclasses import dataclass
22
- from typing import Dict, List, Tuple
23
-
24
- import xmltodict
25
- import numpy as np
26
-
27
- import datasets
28
-
29
- _CITATION = """\
30
- @article{10.1093/jamia/ocv037,
31
- author = {Kors, Jan A and Clematide, Simon and Akhondi,
32
- Saber A and van Mulligen, Erik M and Rebholz-Schuhmann, Dietrich},
33
- title = "{A multilingual gold-standard corpus for biomedical concept recognition: the Mantra GSC}",
34
- journal = {Journal of the American Medical Informatics Association},
35
- volume = {22},
36
- number = {5},
37
- pages = {948-956},
38
- year = {2015},
39
- month = {05},
40
- abstract = "{Objective To create a multilingual gold-standard corpus for biomedical concept recognition.Materials
41
- and methods We selected text units from different parallel corpora (Medline abstract titles, drug labels,
42
- biomedical patent claims) in English, French, German, Spanish, and Dutch. Three annotators per language
43
- independently annotated the biomedical concepts, based on a subset of the Unified Medical Language System and
44
- covering a wide range of semantic groups. To reduce the annotation workload, automatically generated
45
- preannotations were provided. Individual annotations were automatically harmonized and then adjudicated, and
46
- cross-language consistency checks were carried out to arrive at the final annotations.Results The number of final
47
- annotations was 5530. Inter-annotator agreement scores indicate good agreement (median F-score 0.79), and are
48
- similar to those between individual annotators and the gold standard. The automatically generated harmonized
49
- annotation set for each language performed equally well as the best annotator for that language.Discussion The use
50
- of automatic preannotations, harmonized annotations, and parallel corpora helped to keep the manual annotation
51
- efforts manageable. The inter-annotator agreement scores provide a reference standard for gauging the performance
52
- of automatic annotation techniques.Conclusion To our knowledge, this is the first gold-standard corpus for
53
- biomedical concept recognition in languages other than English. Other distinguishing features are the wide variety
54
- of semantic groups that are being covered, and the diversity of text genres that were annotated.}",
55
- issn = {1067-5027},
56
- doi = {10.1093/jamia/ocv037},
57
- url = {https://doi.org/10.1093/jamia/ocv037},
58
- eprint = {https://academic.oup.com/jamia/article-pdf/22/5/948/34146393/ocv037.pdf},
59
- }
60
- """
61
-
62
- _DESCRIPTION = """\
63
- We selected text units from different parallel corpora (Medline abstract titles, drug labels, biomedical patent claims)
64
- in English, French, German, Spanish, and Dutch. Three annotators per language independently annotated the biomedical
65
- concepts, based on a subset of the Unified Medical Language System and covering a wide range of semantic groups.
66
- """
67
-
68
- _HOMEPAGE = "https://biosemantics.erasmusmc.nl/index.php/resources/mantra-gsc"
69
-
70
- _LICENSE = "CC_BY_4p0"
71
-
72
- _URL = "https://huggingface.co/datasets/DrBenchmark/MANTRAGSC/resolve/main/GSC-v1.1.zip"
73
-
74
- _LANGUAGES_2 = {
75
- "es": "Spanish",
76
- "fr": "French",
77
- "de": "German",
78
- "nl": "Dutch",
79
- "en": "English",
80
- }
81
-
82
- _DATASET_TYPES = {
83
- "emea": "EMEA",
84
- "medline": "Medline",
85
- "patents": "Patent",
86
- }
87
-
88
-
89
- @dataclass
90
- class DrBenchmarkConfig(datasets.BuilderConfig):
91
- name: str = None
92
- version: datasets.Version = None
93
- description: str = None
94
- schema: str = None
95
- subset_id: str = None
96
-
97
-
98
- class MANTRAGSC(datasets.GeneratorBasedBuilder):
99
-
100
- SOURCE_VERSION = datasets.Version("1.0.0")
101
-
102
- BUILDER_CONFIGS = []
103
-
104
- for language, dataset_type in product(_LANGUAGES_2, _DATASET_TYPES):
105
-
106
- name = f"{language}_{dataset_type}"
107
- if name in ['nl_patents', 'es_patents', 'en_medline']:
108
- continue
109
-
110
- BUILDER_CONFIGS.append(
111
- DrBenchmarkConfig(
112
- name=name,
113
- version=SOURCE_VERSION,
114
- description=f"Mantra GSC {_LANGUAGES_2[language]} {_DATASET_TYPES[dataset_type]} source schema",
115
- schema="source",
116
- subset_id=f"{language}_{_DATASET_TYPES[dataset_type]}",
117
- )
118
- )
119
-
120
- DEFAULT_CONFIG_NAME = "fr_medline"
121
-
122
- def _info(self):
123
- # Label definition for each task
124
- # Goals:
125
- # - Tasks must not have extra labels (not present in their corpus)
126
- # - Labels should have (roughly) the same index
127
-
128
- # Labels common to every task (ordered by name and B-I)
129
- common_names = ['O', 'B-ANAT', 'B-CHEM', 'I-CHEM', 'B-DEVI', 'B-DISO', 'I-DISO', 'B-LIVB', 'I-LIVB', 'B-OBJC', 'B-PHEN', 'B-PHYS', 'I-PHYS', 'B-PROC', 'I-PROC']
130
- # Adding labels not common to every task (in an order that maximises labels having the same index accross tasks)
131
- names = common_names + ["I-ANAT", "I-DEVI", "B-GEOG", "I-PHEN", "I-OBJC"]
132
- unused_name_map = {
133
- 'de_emea': {'B-GEOG', 'I-OBJC'},
134
- 'en_emea': {'B-GEOG', 'I-OBJC'},
135
- 'es_emea': {'B-GEOG', 'I-OBJC'},
136
- 'fr_emea': {'B-GEOG', 'I-OBJC'},
137
- 'nl_emea': {'B-GEOG', 'I-OBJC'},
138
-
139
- 'de_medline': {'I-DEVI', 'I-PHEN'},
140
- 'es_medline': {'I-DEVI', 'I-OBJC'},
141
- 'fr_medline': {'I-OBJC', 'I-PHEN'},
142
- 'nl_medline': {'I-DEVI'},
143
-
144
- 'fr_patents': {'B-GEOG', 'I-OBJC', 'I-PHEN'},
145
- 'de_patents': {'B-GEOG', 'I-OBJC', 'I-PHEN', 'I-ANAT', 'I-DEVI'},
146
- 'en_patents': {'B-GEOG', 'I-OBJC', 'I-PHEN'}
147
- }
148
- names = [n for n in names if n not in unused_name_map.get(self.config.name, {})]
149
-
150
- print(self.config.name)
151
-
152
- features = datasets.Features(
153
- {
154
- "id": datasets.Value("string"),
155
- "tokens": [datasets.Value("string")],
156
- "ner_tags": datasets.Sequence(
157
- datasets.features.ClassLabel(
158
- names=names,
159
- )
160
- ),
161
- }
162
- )
163
-
164
- return datasets.DatasetInfo(
165
- description=_DESCRIPTION,
166
- features=features,
167
- homepage=_HOMEPAGE,
168
- license=str(_LICENSE),
169
- citation=_CITATION,
170
- )
171
-
172
- def _split_generators(self, dl_manager):
173
-
174
- language, dataset_type = self.config.name.split("_")
175
-
176
- data_dir = dl_manager.download_and_extract(_URL)
177
- data_dir = Path(data_dir) / "GSC-v1.1" / f"{_DATASET_TYPES[dataset_type]}_GSC_{language}_man.xml"
178
-
179
- return [
180
- datasets.SplitGenerator(
181
- name=datasets.Split.TRAIN,
182
- gen_kwargs={
183
- "data_dir": data_dir,
184
- "split": "train",
185
- },
186
- ),
187
- datasets.SplitGenerator(
188
- name=datasets.Split.VALIDATION,
189
- gen_kwargs={
190
- "data_dir": data_dir,
191
- "split": "validation",
192
- },
193
- ),
194
- datasets.SplitGenerator(
195
- name=datasets.Split.TEST,
196
- gen_kwargs={
197
- "data_dir": data_dir,
198
- "split": "test",
199
- },
200
- ),
201
- ]
202
-
203
- def _generate_examples(self, data_dir, split):
204
-
205
- with open(data_dir) as fd:
206
- doc = xmltodict.parse(fd.read())
207
-
208
- all_res = []
209
-
210
- for d in doc["Corpus"]["document"]:
211
-
212
- if not isinstance(d["unit"], list):
213
- d["unit"] = [d["unit"]]
214
-
215
- for u in d["unit"]:
216
-
217
- text = u["text"]
218
-
219
- if "e" in u.keys():
220
-
221
- if not isinstance(u["e"], list):
222
- u["e"] = [u["e"]]
223
-
224
- tags = [{
225
- "label": current["@grp"].upper(),
226
- "offset_start": int(current["@offset"]),
227
- "offset_end": int(current["@offset"]) + int(current["@len"]),
228
- } for current in u["e"]]
229
-
230
- else:
231
- tags = []
232
-
233
- _tokens = text.split(" ")
234
- tokens = []
235
- for i, t in enumerate(_tokens):
236
-
237
- concat = " ".join(_tokens[0:i + 1])
238
-
239
- offset_start = len(concat) - len(t)
240
- offset_end = len(concat)
241
-
242
- tokens.append({
243
- "token": t,
244
- "offset_start": offset_start,
245
- "offset_end": offset_end,
246
- })
247
-
248
- ner_tags = [["O", 0] for o in tokens]
249
-
250
- for tag in tags:
251
-
252
- cpt = 0
253
-
254
- for idx, token in enumerate(tokens):
255
-
256
- rtok = range(token["offset_start"], token["offset_end"] + 1)
257
- rtag = range(tag["offset_start"], tag["offset_end"] + 1)
258
-
259
- # Check if the ranges are overlapping
260
- if bool(set(rtok) & set(rtag)):
261
-
262
- # if ner_tags[idx] != "O" and ner_tags[idx] != tag['label']:
263
- # print(f"{token} - currently: {ner_tags[idx]} - after: {tag['label']}")
264
-
265
- if ner_tags[idx][0] == "O":
266
- cpt += 1
267
- ner_tags[idx][0] = tag["label"]
268
- ner_tags[idx][1] = cpt
269
-
270
- for i in range(len(ner_tags)):
271
-
272
- tag = ner_tags[i][0]
273
-
274
- if tag == "O":
275
- continue
276
- elif tag != "O" and ner_tags[i][1] == 1:
277
- ner_tags[i][0] = "B-" + tag
278
- elif tag != "O" and ner_tags[i][1] != 1:
279
- ner_tags[i][0] = "I-" + tag
280
-
281
- obj = {
282
- "id": u["@id"],
283
- "tokens": [t["token"] for t in tokens],
284
- "ner_tags": [n[0] for n in ner_tags],
285
- }
286
-
287
- all_res.append(obj)
288
-
289
- ids = [r["id"] for r in all_res]
290
-
291
- random.seed(4)
292
- random.shuffle(ids)
293
- random.shuffle(ids)
294
- random.shuffle(ids)
295
-
296
- train, validation, test = np.split(ids, [int(len(ids) * 0.70), int(len(ids) * 0.80)])
297
-
298
- if split == "train":
299
- allowed_ids = list(train)
300
- elif split == "validation":
301
- allowed_ids = list(validation)
302
- elif split == "test":
303
- allowed_ids = list(test)
304
-
305
- for r in all_res:
306
- identifier = r["id"]
307
- if identifier in allowed_ids:
308
- yield identifier, r