Upload texprax_dataset.py
Browse files- texprax_dataset.py +10 -30
texprax_dataset.py
CHANGED
|
@@ -42,36 +42,16 @@ _LICENSE = "Creative Commons Attribution-NonCommercial 4.0"
|
|
| 42 |
|
| 43 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
| 44 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 45 |
-
_URLS = {
|
| 46 |
-
"batch-1-zerspanung-sentences": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/zerspanung_sents_batch_1.csv",
|
| 47 |
-
"batch-1-industrie-sentences": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/industrie_sents_batch_1.csv",
|
| 48 |
-
"batch-2-sentences": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/sents_batch_2.csv",
|
| 49 |
-
"batch-3-sentences": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/sents_batch_3.csv",
|
| 50 |
-
"batch-1-zerspanung-entities": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/zerspanung_entities_batch_1.csv",
|
| 51 |
-
"batch-1-industrie-entities": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/industrie_entities_batch_1.csv",
|
| 52 |
-
"batch-2-entities": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/entities_batch_2.csv",
|
| 53 |
-
"batch-3-entities": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/entities_batch_3.csv",
|
| 54 |
-
}
|
| 55 |
|
| 56 |
-
|
| 57 |
-
"batch-1-zerspanung-sentences": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/zerspanung_sents_batch_1.csv",
|
| 58 |
-
"batch-1-industrie-sentences": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/industrie_sents_batch_1.csv",
|
| 59 |
-
"batch-2-sentences": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/sents_batch_2.csv",
|
| 60 |
-
"batch-3-sentences": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/sents_batch_3.csv",
|
| 61 |
-
}
|
| 62 |
|
| 63 |
-
|
| 64 |
-
"batch-1-zerspanung-entities": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/zerspanung_entities_batch_1.csv",
|
| 65 |
-
"batch-1-industrie-entities": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/industrie_entities_batch_1.csv",
|
| 66 |
-
"batch-2-entities": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/entities_batch_2.csv",
|
| 67 |
-
"batch-3-entities": "https://huggingface.co/datasets/UKPLab/TexPrax/blob/main/entities_batch_3.csv",
|
| 68 |
-
}
|
| 69 |
|
| 70 |
class TexPraxConfig(datasets.BuilderConfig):
|
| 71 |
"""BuilderConfig for SuperGLUE."""
|
| 72 |
|
| 73 |
def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
|
| 74 |
-
"""BuilderConfig for
|
| 75 |
|
| 76 |
Args:
|
| 77 |
features: *list[string]*, list of the features that will appear in the
|
|
@@ -123,8 +103,8 @@ class TexPraxDataset(datasets.GeneratorBasedBuilder):
|
|
| 123 |
TexPraxConfig(
|
| 124 |
name="named_entity_recognition",
|
| 125 |
description="Sentence level annotations of the TexPrax dataset.",
|
| 126 |
-
features=["
|
| 127 |
-
data_url=
|
| 128 |
),
|
| 129 |
#datasets.BuilderConfig(name="sentence_classification", version=VERSION, description="Sentence level annotations of the TexPrax dataset."),
|
| 130 |
#datasets.BuilderConfig(name="named_entity_recognition", version=VERSION, description="BIO-tagged named entites of the TexPrax dataset."),
|
|
@@ -177,7 +157,7 @@ class TexPraxDataset(datasets.GeneratorBasedBuilder):
|
|
| 177 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 178 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 179 |
self.config.name == "sentence_classification":
|
| 180 |
-
urls =
|
| 181 |
data_dir = dl_manager.download_and_extract(urls)
|
| 182 |
return [
|
| 183 |
datasets.SplitGenerator(
|
|
@@ -208,13 +188,13 @@ class TexPraxDataset(datasets.GeneratorBasedBuilder):
|
|
| 208 |
name=datasets.Split.TEST,
|
| 209 |
# These kwargs will be passed to _generate_examples
|
| 210 |
gen_kwargs={
|
| 211 |
-
"filepath": os.path.join(data_dir, "
|
| 212 |
"split": "batch-3"
|
| 213 |
},
|
| 214 |
),
|
| 215 |
]
|
| 216 |
else:
|
| 217 |
-
urls =
|
| 218 |
data_dir = dl_manager.download_and_extract(urls)
|
| 219 |
return [
|
| 220 |
datasets.SplitGenerator(
|
|
@@ -237,7 +217,7 @@ class TexPraxDataset(datasets.GeneratorBasedBuilder):
|
|
| 237 |
name=datasets.Split.VALIDATION,
|
| 238 |
# These kwargs will be passed to _generate_examples
|
| 239 |
gen_kwargs={
|
| 240 |
-
"filepath": os.path.join(data_dir, "
|
| 241 |
"split": "batch-2",
|
| 242 |
},
|
| 243 |
),
|
|
@@ -245,7 +225,7 @@ class TexPraxDataset(datasets.GeneratorBasedBuilder):
|
|
| 245 |
name=datasets.Split.TEST,
|
| 246 |
# These kwargs will be passed to _generate_examples
|
| 247 |
gen_kwargs={
|
| 248 |
-
"filepath": os.path.join(data_dir, "
|
| 249 |
"split": "batch-3"
|
| 250 |
},
|
| 251 |
),
|
|
|
|
| 42 |
|
| 43 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
| 44 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
+
_SENTENCE_URL = "https://tudatalib.ulb.tu-darmstadt.de/bitstream/handle/tudatalib/3534/texprax-sentences.zip?sequence=8&isAllowed=y"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
+
_ENTITY_URL = "https://tudatalib.ulb.tu-darmstadt.de/bitstream/handle/tudatalib/3534/texprax-ner.zip?sequence=9&isAllowed=y"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
class TexPraxConfig(datasets.BuilderConfig):
|
| 51 |
"""BuilderConfig for SuperGLUE."""
|
| 52 |
|
| 53 |
def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
|
| 54 |
+
"""BuilderConfig for TexPrax.
|
| 55 |
|
| 56 |
Args:
|
| 57 |
features: *list[string]*, list of the features that will appear in the
|
|
|
|
| 103 |
TexPraxConfig(
|
| 104 |
name="named_entity_recognition",
|
| 105 |
description="Sentence level annotations of the TexPrax dataset.",
|
| 106 |
+
features=["tokens"],
|
| 107 |
+
data_url=_ENTITY_URL,
|
| 108 |
),
|
| 109 |
#datasets.BuilderConfig(name="sentence_classification", version=VERSION, description="Sentence level annotations of the TexPrax dataset."),
|
| 110 |
#datasets.BuilderConfig(name="named_entity_recognition", version=VERSION, description="BIO-tagged named entites of the TexPrax dataset."),
|
|
|
|
| 157 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 158 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 159 |
self.config.name == "sentence_classification":
|
| 160 |
+
urls = _SENTENCE_URL
|
| 161 |
data_dir = dl_manager.download_and_extract(urls)
|
| 162 |
return [
|
| 163 |
datasets.SplitGenerator(
|
|
|
|
| 188 |
name=datasets.Split.TEST,
|
| 189 |
# These kwargs will be passed to _generate_examples
|
| 190 |
gen_kwargs={
|
| 191 |
+
"filepath": os.path.join(data_dir, "sents_batch_3.csv"),
|
| 192 |
"split": "batch-3"
|
| 193 |
},
|
| 194 |
),
|
| 195 |
]
|
| 196 |
else:
|
| 197 |
+
urls = _ENTITY_URL
|
| 198 |
data_dir = dl_manager.download_and_extract(urls)
|
| 199 |
return [
|
| 200 |
datasets.SplitGenerator(
|
|
|
|
| 217 |
name=datasets.Split.VALIDATION,
|
| 218 |
# These kwargs will be passed to _generate_examples
|
| 219 |
gen_kwargs={
|
| 220 |
+
"filepath": os.path.join(data_dir, "entities_batch_2.csv"),
|
| 221 |
"split": "batch-2",
|
| 222 |
},
|
| 223 |
),
|
|
|
|
| 225 |
name=datasets.Split.TEST,
|
| 226 |
# These kwargs will be passed to _generate_examples
|
| 227 |
gen_kwargs={
|
| 228 |
+
"filepath": os.path.join(data_dir, "entities_batch_3.csv"),
|
| 229 |
"split": "batch-3"
|
| 230 |
},
|
| 231 |
),
|