Update MANTRAGSC.py
Browse filesFixed missing labels for other languages and updated labels order to prepare for parquet conversion.
- MANTRAGSC.py +41 -21
MANTRAGSC.py
CHANGED
|
@@ -25,7 +25,6 @@ import xmltodict
|
|
| 25 |
import numpy as np
|
| 26 |
|
| 27 |
import datasets
|
| 28 |
-
from filelock import FileLock
|
| 29 |
|
| 30 |
_CITATION = """\
|
| 31 |
@article{10.1093/jamia/ocv037,
|
|
@@ -86,6 +85,7 @@ _DATASET_TYPES = {
|
|
| 86 |
"patents": "Patent",
|
| 87 |
}
|
| 88 |
|
|
|
|
| 89 |
@dataclass
|
| 90 |
class DrBenchmarkConfig(datasets.BuilderConfig):
|
| 91 |
name: str = None
|
|
@@ -94,6 +94,7 @@ class DrBenchmarkConfig(datasets.BuilderConfig):
|
|
| 94 |
schema: str = None
|
| 95 |
subset_id: str = None
|
| 96 |
|
|
|
|
| 97 |
class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
| 98 |
|
| 99 |
SOURCE_VERSION = datasets.Version("1.0.0")
|
|
@@ -102,12 +103,13 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
| 102 |
|
| 103 |
for language, dataset_type in product(_LANGUAGES_2, _DATASET_TYPES):
|
| 104 |
|
| 105 |
-
|
|
|
|
| 106 |
continue
|
| 107 |
|
| 108 |
BUILDER_CONFIGS.append(
|
| 109 |
DrBenchmarkConfig(
|
| 110 |
-
name=
|
| 111 |
version=SOURCE_VERSION,
|
| 112 |
description=f"Mantra GSC {_LANGUAGES_2[language]} {_DATASET_TYPES[dataset_type]} source schema",
|
| 113 |
schema="source",
|
|
@@ -118,13 +120,34 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
| 118 |
DEFAULT_CONFIG_NAME = "fr_medline"
|
| 119 |
|
| 120 |
def _info(self):
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
features = datasets.Features(
|
| 130 |
{
|
|
@@ -132,7 +155,7 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
| 132 |
"tokens": [datasets.Value("string")],
|
| 133 |
"ner_tags": datasets.Sequence(
|
| 134 |
datasets.features.ClassLabel(
|
| 135 |
-
names
|
| 136 |
)
|
| 137 |
),
|
| 138 |
}
|
|
@@ -150,10 +173,7 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
| 150 |
|
| 151 |
language, dataset_type = self.config.name.split("_")
|
| 152 |
|
| 153 |
-
|
| 154 |
-
# cf. https://github.com/huggingface/datasets/issues/4661#issuecomment-2792885416
|
| 155 |
-
with FileLock(Path(datasets.config.HF_CACHE_HOME) / "tmp_MANTRAGSC.lock"):
|
| 156 |
-
data_dir = dl_manager.download_and_extract(_URL)
|
| 157 |
data_dir = Path(data_dir) / "GSC-v1.1" / f"{_DATASET_TYPES[dataset_type]}_GSC_{language}_man.xml"
|
| 158 |
|
| 159 |
return [
|
|
@@ -189,7 +209,7 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
| 189 |
|
| 190 |
for d in doc["Corpus"]["document"]:
|
| 191 |
|
| 192 |
-
if
|
| 193 |
d["unit"] = [d["unit"]]
|
| 194 |
|
| 195 |
for u in d["unit"]:
|
|
@@ -198,7 +218,7 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
| 198 |
|
| 199 |
if "e" in u.keys():
|
| 200 |
|
| 201 |
-
if
|
| 202 |
u["e"] = [u["e"]]
|
| 203 |
|
| 204 |
tags = [{
|
|
@@ -214,7 +234,7 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
| 214 |
tokens = []
|
| 215 |
for i, t in enumerate(_tokens):
|
| 216 |
|
| 217 |
-
concat = " ".join(_tokens[0:i+1])
|
| 218 |
|
| 219 |
offset_start = len(concat) - len(t)
|
| 220 |
offset_end = len(concat)
|
|
@@ -233,8 +253,8 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
| 233 |
|
| 234 |
for idx, token in enumerate(tokens):
|
| 235 |
|
| 236 |
-
rtok = range(token["offset_start"], token["offset_end"]+1)
|
| 237 |
-
rtag = range(tag["offset_start"], tag["offset_end"]+1)
|
| 238 |
|
| 239 |
# Check if the ranges are overlapping
|
| 240 |
if bool(set(rtok) & set(rtag)):
|
|
@@ -273,7 +293,7 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
| 273 |
random.shuffle(ids)
|
| 274 |
random.shuffle(ids)
|
| 275 |
|
| 276 |
-
train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
|
| 277 |
|
| 278 |
if split == "train":
|
| 279 |
allowed_ids = list(train)
|
|
|
|
| 25 |
import numpy as np
|
| 26 |
|
| 27 |
import datasets
|
|
|
|
| 28 |
|
| 29 |
_CITATION = """\
|
| 30 |
@article{10.1093/jamia/ocv037,
|
|
|
|
| 85 |
"patents": "Patent",
|
| 86 |
}
|
| 87 |
|
| 88 |
+
|
| 89 |
@dataclass
|
| 90 |
class DrBenchmarkConfig(datasets.BuilderConfig):
|
| 91 |
name: str = None
|
|
|
|
| 94 |
schema: str = None
|
| 95 |
subset_id: str = None
|
| 96 |
|
| 97 |
+
|
| 98 |
class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
| 99 |
|
| 100 |
SOURCE_VERSION = datasets.Version("1.0.0")
|
|
|
|
| 103 |
|
| 104 |
for language, dataset_type in product(_LANGUAGES_2, _DATASET_TYPES):
|
| 105 |
|
| 106 |
+
name = f"{language}_{dataset_type}"
|
| 107 |
+
if name in ['nl_patents', 'es_patents', 'en_medline']:
|
| 108 |
continue
|
| 109 |
|
| 110 |
BUILDER_CONFIGS.append(
|
| 111 |
DrBenchmarkConfig(
|
| 112 |
+
name=name,
|
| 113 |
version=SOURCE_VERSION,
|
| 114 |
description=f"Mantra GSC {_LANGUAGES_2[language]} {_DATASET_TYPES[dataset_type]} source schema",
|
| 115 |
schema="source",
|
|
|
|
| 120 |
DEFAULT_CONFIG_NAME = "fr_medline"
|
| 121 |
|
| 122 |
def _info(self):
|
| 123 |
+
# Label definition for each task
|
| 124 |
+
# Goals:
|
| 125 |
+
# - Tasks must not have extra labels (not present in their corpus)
|
| 126 |
+
# - Labels should have (roughly) the same index
|
| 127 |
+
|
| 128 |
+
# Labels common to every task (ordered by name and B-I)
|
| 129 |
+
common_names = ['O', 'B-ANAT', 'B-CHEM', 'I-CHEM', 'B-DEVI', 'B-DISO', 'I-DISO', 'B-LIVB', 'I-LIVB', 'B-OBJC', 'B-PHEN', 'B-PHYS', 'I-PHYS', 'B-PROC', 'I-PROC']
|
| 130 |
+
# Adding labels not common to every task (in an order that maximises labels having the same index accross tasks)
|
| 131 |
+
names = common_names + ["I-ANAT", "I-DEVI", "B-GEOG", "I-PHEN", "I-OBJC"]
|
| 132 |
+
unused_name_map = {
|
| 133 |
+
'de_emea': {'B-GEOG', 'I-OBJC'},
|
| 134 |
+
'en_emea': {'B-GEOG', 'I-OBJC'},
|
| 135 |
+
'es_emea': {'B-GEOG', 'I-OBJC'},
|
| 136 |
+
'fr_emea': {'B-GEOG', 'I-OBJC'},
|
| 137 |
+
'nl_emea': {'B-GEOG', 'I-OBJC'},
|
| 138 |
+
|
| 139 |
+
'de_medline': {'I-DEVI', 'I-PHEN'},
|
| 140 |
+
'es_medline': {'I-DEVI', 'I-OBJC'},
|
| 141 |
+
'fr_medline': {'I-OBJC', 'I-PHEN'},
|
| 142 |
+
'nl_medline': {'I-DEVI'},
|
| 143 |
+
|
| 144 |
+
'fr_patents': {'B-GEOG', 'I-OBJC', 'I-PHEN'},
|
| 145 |
+
'de_patents': {'B-GEOG', 'I-OBJC', 'I-PHEN', 'I-ANAT', 'I-DEVI'},
|
| 146 |
+
'en_patents': {'B-GEOG', 'I-OBJC', 'I-PHEN'}
|
| 147 |
+
}
|
| 148 |
+
names = [n for n in names if n not in unused_name_map.get(self.config.name, {})]
|
| 149 |
+
|
| 150 |
+
print(self.config.name)
|
| 151 |
|
| 152 |
features = datasets.Features(
|
| 153 |
{
|
|
|
|
| 155 |
"tokens": [datasets.Value("string")],
|
| 156 |
"ner_tags": datasets.Sequence(
|
| 157 |
datasets.features.ClassLabel(
|
| 158 |
+
names=names,
|
| 159 |
)
|
| 160 |
),
|
| 161 |
}
|
|
|
|
| 173 |
|
| 174 |
language, dataset_type = self.config.name.split("_")
|
| 175 |
|
| 176 |
+
data_dir = dl_manager.download_and_extract(_URL)
|
|
|
|
|
|
|
|
|
|
| 177 |
data_dir = Path(data_dir) / "GSC-v1.1" / f"{_DATASET_TYPES[dataset_type]}_GSC_{language}_man.xml"
|
| 178 |
|
| 179 |
return [
|
|
|
|
| 209 |
|
| 210 |
for d in doc["Corpus"]["document"]:
|
| 211 |
|
| 212 |
+
if not isinstance(d["unit"], list):
|
| 213 |
d["unit"] = [d["unit"]]
|
| 214 |
|
| 215 |
for u in d["unit"]:
|
|
|
|
| 218 |
|
| 219 |
if "e" in u.keys():
|
| 220 |
|
| 221 |
+
if not isinstance(u["e"], list):
|
| 222 |
u["e"] = [u["e"]]
|
| 223 |
|
| 224 |
tags = [{
|
|
|
|
| 234 |
tokens = []
|
| 235 |
for i, t in enumerate(_tokens):
|
| 236 |
|
| 237 |
+
concat = " ".join(_tokens[0:i + 1])
|
| 238 |
|
| 239 |
offset_start = len(concat) - len(t)
|
| 240 |
offset_end = len(concat)
|
|
|
|
| 253 |
|
| 254 |
for idx, token in enumerate(tokens):
|
| 255 |
|
| 256 |
+
rtok = range(token["offset_start"], token["offset_end"] + 1)
|
| 257 |
+
rtag = range(tag["offset_start"], tag["offset_end"] + 1)
|
| 258 |
|
| 259 |
# Check if the ranges are overlapping
|
| 260 |
if bool(set(rtok) & set(rtag)):
|
|
|
|
| 293 |
random.shuffle(ids)
|
| 294 |
random.shuffle(ids)
|
| 295 |
|
| 296 |
+
train, validation, test = np.split(ids, [int(len(ids) * 0.70), int(len(ids) * 0.80)])
|
| 297 |
|
| 298 |
if split == "train":
|
| 299 |
allowed_ids = list(train)
|