fernando-peres commited on
Commit
e37fdb2
·
1 Parent(s): 2310e07

resolving card bugs

Browse files
Files changed (2) hide show
  1. README.md +1 -5
  2. py_legislation.py +16 -4
README.md CHANGED
@@ -8,11 +8,7 @@ task_categories:
8
  - text-classification
9
  tags:
10
  - legal
11
- configs:
12
- - config_name: raw_text
13
- data_files:
14
- - split: train
15
- path: raw_text/train-*
16
 
17
  ---
18
 
 
8
  - text-classification
9
  tags:
10
  - legal
11
+
 
 
 
 
12
 
13
  ---
14
 
py_legislation.py CHANGED
@@ -14,6 +14,7 @@ class PY_Legislation(datasets.GeneratorBasedBuilder)
14
  Defines the implementation of Paraguay Legislation dataset builder (GeneratorBasedBuilder).
15
 
16
  """
 
17
  import textwrap
18
  from textwrap import TextWrapper
19
  import datasets
@@ -274,30 +275,41 @@ class PY_legislation(datasets.GeneratorBasedBuilder):
274
  # TODO: labeled subset has two splits
275
  urls_to_download = _metadata["urls"]
276
 
 
 
277
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
278
 
279
  generators = [
280
  datasets.SplitGenerator(
281
  name=datasets.Split.TRAIN,
282
- gen_kwargs={"filepath": downloaded_files["raw_text"]},
 
 
 
283
  ),
284
 
285
  datasets.SplitGenerator(
286
  name=datasets.Split.TRAIN,
287
  gen_kwargs={
288
- "filepath": downloaded_files["unlabeled_sentences"]},
 
 
289
  ),
290
 
291
  datasets.SplitGenerator(
292
  name=datasets.Split.TRAIN,
293
  gen_kwargs={
294
- "filepath": downloaded_files["labeled_sentences_train"]},
 
 
295
  ),
296
 
297
  datasets.SplitGenerator(
298
  name=datasets.Split.TEST,
299
  gen_kwargs={
300
- "filepath": downloaded_files["labeled_sentences_test"]},
 
 
301
  )
302
  ]
303
 
 
14
  Defines the implementation of Paraguay Legislation dataset builder (GeneratorBasedBuilder).
15
 
16
  """
17
+ import os
18
  import textwrap
19
  from textwrap import TextWrapper
20
  import datasets
 
275
  # TODO: labeled subset has two splits
276
  urls_to_download = _metadata["urls"]
277
 
278
+ directory = os.getcwd()
279
+
280
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
281
 
282
  generators = [
283
  datasets.SplitGenerator(
284
  name=datasets.Split.TRAIN,
285
+ gen_kwargs={
286
+ "filepath": downloaded_files["raw_text"],
287
+ "split":"train"
288
+ },
289
  ),
290
 
291
  datasets.SplitGenerator(
292
  name=datasets.Split.TRAIN,
293
  gen_kwargs={
294
+ "filepath": downloaded_files["unlabeled_sentences"],
295
+ "split":"train"
296
+ }
297
  ),
298
 
299
  datasets.SplitGenerator(
300
  name=datasets.Split.TRAIN,
301
  gen_kwargs={
302
+ "filepath": downloaded_files["labeled_sentences_train"],
303
+ "split":"train"
304
+ },
305
  ),
306
 
307
  datasets.SplitGenerator(
308
  name=datasets.Split.TEST,
309
  gen_kwargs={
310
+ "filepath": downloaded_files["labeled_sentences_test"],
311
+ "split":"test"
312
+ },
313
  )
314
  ]
315