fernando-peres
commited on
Commit
·
2081858
1
Parent(s):
e37fdb2
resolving card bugs
Browse files- py_legislation.py +21 -9
py_legislation.py
CHANGED
|
@@ -275,40 +275,52 @@ class PY_legislation(datasets.GeneratorBasedBuilder):
|
|
| 275 |
# TODO: labeled subset has two splits
|
| 276 |
urls_to_download = _metadata["urls"]
|
| 277 |
|
| 278 |
-
|
| 279 |
|
| 280 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
| 281 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 282 |
generators = [
|
| 283 |
datasets.SplitGenerator(
|
| 284 |
name=datasets.Split.TRAIN,
|
| 285 |
gen_kwargs={
|
| 286 |
-
"filepath": downloaded_files["raw_text"],
|
| 287 |
-
"split":"train"
|
| 288 |
},
|
| 289 |
),
|
| 290 |
|
| 291 |
datasets.SplitGenerator(
|
| 292 |
name=datasets.Split.TRAIN,
|
| 293 |
gen_kwargs={
|
| 294 |
-
"filepath": downloaded_files["unlabeled_sentences"],
|
| 295 |
-
"split":"train"
|
| 296 |
}
|
| 297 |
),
|
| 298 |
|
| 299 |
datasets.SplitGenerator(
|
| 300 |
name=datasets.Split.TRAIN,
|
| 301 |
gen_kwargs={
|
| 302 |
-
"filepath": downloaded_files["labeled_sentences_train"],
|
| 303 |
-
"split":"train"
|
| 304 |
},
|
| 305 |
),
|
| 306 |
|
| 307 |
datasets.SplitGenerator(
|
| 308 |
name=datasets.Split.TEST,
|
| 309 |
gen_kwargs={
|
| 310 |
-
"filepath": downloaded_files["labeled_sentences_test"],
|
| 311 |
-
"split":"test"
|
| 312 |
},
|
| 313 |
)
|
| 314 |
]
|
|
|
|
| 275 |
# TODO: labeled subset has two splits
|
| 276 |
urls_to_download = _metadata["urls"]
|
| 277 |
|
| 278 |
+
root = os.getcwd()
|
| 279 |
|
| 280 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
| 281 |
|
| 282 |
+
rt_train = dl_manager.download_and_extract(
|
| 283 |
+
os.path.join(root, _metadata["urls"]["raw_text"])
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
us_train = dl_manager.download_and_extract(
|
| 287 |
+
os.path.join(root, _metadata["urls"]["unlabeled_sentences"])
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
ls_train = dl_manager.download_and_extract(
|
| 291 |
+
os.path.join(root, _metadata["urls"]["labeled_sentences_train"])
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
ls_test = dl_manager.download_and_extract(
|
| 295 |
+
os.path.join(root, _metadata["urls"]["labeled_sentences_test"])
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
generators = [
|
| 299 |
datasets.SplitGenerator(
|
| 300 |
name=datasets.Split.TRAIN,
|
| 301 |
gen_kwargs={
|
| 302 |
+
"filepath": rt_train # downloaded_files["raw_text"],
|
|
|
|
| 303 |
},
|
| 304 |
),
|
| 305 |
|
| 306 |
datasets.SplitGenerator(
|
| 307 |
name=datasets.Split.TRAIN,
|
| 308 |
gen_kwargs={
|
| 309 |
+
"filepath": us_train # downloaded_files["unlabeled_sentences"],
|
|
|
|
| 310 |
}
|
| 311 |
),
|
| 312 |
|
| 313 |
datasets.SplitGenerator(
|
| 314 |
name=datasets.Split.TRAIN,
|
| 315 |
gen_kwargs={
|
| 316 |
+
"filepath": ls_train # downloaded_files["labeled_sentences_train"],
|
|
|
|
| 317 |
},
|
| 318 |
),
|
| 319 |
|
| 320 |
datasets.SplitGenerator(
|
| 321 |
name=datasets.Split.TEST,
|
| 322 |
gen_kwargs={
|
| 323 |
+
"filepath": ls_test # downloaded_files["labeled_sentences_test"],
|
|
|
|
| 324 |
},
|
| 325 |
)
|
| 326 |
]
|