fernando-peres
commited on
Commit
·
1368fc3
1
Parent(s):
bab7bc3
resolving card bugs
Browse files- py_legislation.py +9 -10
py_legislation.py
CHANGED
|
@@ -31,9 +31,10 @@ _URLS = {
|
|
| 31 |
|
| 32 |
# [>] URLS:
|
| 33 |
|
|
|
|
| 34 |
_URLS = {
|
| 35 |
-
"raw_text": "
|
| 36 |
-
"unlabeled_sentences": "
|
| 37 |
# "labeled_sentences_train": "./labeled_sentences/train.parquet",
|
| 38 |
# "labeled_sentences_test": "./labeled_sentences/test.parquet",
|
| 39 |
}
|
|
@@ -244,8 +245,9 @@ class PYLegislation(datasets.GeneratorBasedBuilder):
|
|
| 244 |
features = None
|
| 245 |
description = None
|
| 246 |
if self.config.name in _CONFIGS.keys():
|
| 247 |
-
features = datasets.Features(
|
| 248 |
-
|
|
|
|
| 249 |
|
| 250 |
# if self.config.name == "raw_text":
|
| 251 |
# description = _metadata["raw_text"]["description"]
|
|
@@ -289,9 +291,9 @@ class PYLegislation(datasets.GeneratorBasedBuilder):
|
|
| 289 |
fp = dl_manager.download_and_extract(urls_to_download[k])
|
| 290 |
generators.append(
|
| 291 |
datasets.SplitGenerator(
|
| 292 |
-
name=datasets.Split.TRAIN,gen_kwargs={"filepath": fp}))
|
| 293 |
|
| 294 |
-
#us_train = dl_manager.download_and_extract(
|
| 295 |
# urls_to_download["unlabeled_sentences"])
|
| 296 |
|
| 297 |
# ls_train = dl_manager.download_and_extract(
|
|
@@ -300,10 +302,7 @@ class PYLegislation(datasets.GeneratorBasedBuilder):
|
|
| 300 |
# ls_test = dl_manager.download_and_extract(
|
| 301 |
# urls_to_download["labeled_sentences_test"])
|
| 302 |
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
#generators.append(
|
| 307 |
# datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": us_train}))
|
| 308 |
|
| 309 |
# datasets.SplitGenerator(
|
|
|
|
| 31 |
|
| 32 |
# [>] URLS:
|
| 33 |
|
| 34 |
+
URL = "https://huggingface.co/datasets/fernandoperes/py_legislation/raw/main"
|
| 35 |
_URLS = {
|
| 36 |
+
"raw_text": f"{URL}/raw_text/train.parquet",
|
| 37 |
+
"unlabeled_sentences": f"{URL}/unlabeled_sentences/train.parquet",
|
| 38 |
# "labeled_sentences_train": "./labeled_sentences/train.parquet",
|
| 39 |
# "labeled_sentences_test": "./labeled_sentences/test.parquet",
|
| 40 |
}
|
|
|
|
| 245 |
features = None
|
| 246 |
description = None
|
| 247 |
if self.config.name in _CONFIGS.keys():
|
| 248 |
+
features = datasets.Features(
|
| 249 |
+
_CONFIGS[self.config.name]["features"])
|
| 250 |
+
description = _CONFIGS[self.config.name]["description"]
|
| 251 |
|
| 252 |
# if self.config.name == "raw_text":
|
| 253 |
# description = _metadata["raw_text"]["description"]
|
|
|
|
| 291 |
fp = dl_manager.download_and_extract(urls_to_download[k])
|
| 292 |
generators.append(
|
| 293 |
datasets.SplitGenerator(
|
| 294 |
+
name=datasets.Split.TRAIN, gen_kwargs={"filepath": fp}))
|
| 295 |
|
| 296 |
+
# us_train = dl_manager.download_and_extract(
|
| 297 |
# urls_to_download["unlabeled_sentences"])
|
| 298 |
|
| 299 |
# ls_train = dl_manager.download_and_extract(
|
|
|
|
| 302 |
# ls_test = dl_manager.download_and_extract(
|
| 303 |
# urls_to_download["labeled_sentences_test"])
|
| 304 |
|
| 305 |
+
# generators.append(
|
|
|
|
|
|
|
|
|
|
| 306 |
# datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": us_train}))
|
| 307 |
|
| 308 |
# datasets.SplitGenerator(
|