fernando-peres commited on
Commit
af267eb
·
1 Parent(s): f81c80d

resolving card bugs

Browse files
Files changed (2) hide show
  1. README.md +7 -25
  2. py_legislation.py +29 -26
README.md CHANGED
@@ -9,39 +9,21 @@ task_categories:
9
  tags:
10
  - legal
11
  configs:
12
- - config_name: sentences_labeled
13
  data_files:
14
  - split: train
15
- path: sentences_labeled/train*.parquet
16
- - split: test
17
- path: sentences_labeled/test*.parquet
18
  - config_name: sentences_unlabeled
19
  data_files:
20
  - split: train
21
  path: sentences_unlabeled/train*.parquet
22
- - config_name: text_raw
23
  data_files:
24
  - split: train
25
- path: text_raw/train-*
26
- dataset_info:
27
- config_name: text_raw
28
- features:
29
- - name: source_id
30
- dtype: int64
31
- - name: source_name
32
- dtype: string
33
- - name: text_id
34
- dtype: int64
35
- - name: text
36
- dtype: string
37
- - name: extension
38
- dtype: string
39
- splits:
40
- - name: train
41
- num_bytes: 605308
42
- num_examples: 867
43
- download_size: 288502
44
- dataset_size: 605308
45
  ---
46
 
47
  # Paraguay Legislation
 
9
  tags:
10
  - legal
11
  configs:
12
+ - config_name: text_raw
13
  data_files:
14
  - split: train
15
+ path: text_raw/train*.parquet
 
 
16
  - config_name: sentences_unlabeled
17
  data_files:
18
  - split: train
19
  path: sentences_unlabeled/train*.parquet
20
+ - config_name: sentences_labeled
21
  data_files:
22
  - split: train
23
+ path: labeled_sentences/train*.parquet
24
+ - split: test
25
+ path: sentences_labeled/test*.parquet
26
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  ---
28
 
29
  # Paraguay Legislation
py_legislation.py CHANGED
@@ -117,14 +117,14 @@ _metadata = {
117
  "license": "apache-2.0",
118
 
119
  "urls": {
120
- "text_raw": "./text_raw",
121
- "sentences_unlabeled": "./unlabeled",
122
- "sentences_labeled": "./labeled",
123
  },
124
 
125
  # [@] Config Names:
126
 
127
- "text_raw": {
128
  "description": textwrap.dedent("""
129
  Data extracted from the sources files (URls, PDFs and Word files) without any transformation or sentence splitter. It can be helpful because you can access the raw data extracted from the seeds (PDFs and Word files) and apply other preprocessing tasks from this point to prepare the data without returning to extract texts from source files.
130
  """),
@@ -140,7 +140,7 @@ _metadata = {
140
 
141
  },
142
 
143
- "sentences_unlabeled": {
144
  "description": textwrap.dedent("""
145
  Unlabeled corpora of Paraguay legislation. This data is prepared to be labeled by the experts. Each instance of the dataset represents a specific text passage, split by its original formatting extracted from raw text (from original documents)
146
 
@@ -165,7 +165,7 @@ _metadata = {
165
  }
166
  },
167
 
168
- "sentences_labeled": {
169
  "description": textwrap.dedent("""
170
  The labeled data is the ground truth data used to train the models. This data is annotated by legal experts indicating the existence of administrative costs (and other types) in the legislation.
171
 
@@ -197,26 +197,29 @@ class PY_legislation(datasets.GeneratorBasedBuilder):
197
 
198
  BUILDER_CONFIGS = [
199
  datasets.BuilderConfig(
200
- name="text_raw",
 
201
  version=VERSION,
202
- description=_metadata["text_raw"]["description"],
203
  ),
204
 
205
  datasets.BuilderConfig(
206
- name="sentences_unlabeled",
207
  version=VERSION,
208
- description=_metadata["sentences_unlabeled"]["description"],
 
209
  ),
210
 
211
  datasets.BuilderConfig(
212
- name="sentences_labeled",
213
  version=VERSION,
214
- description=_metadata["sentences_labeled"]["description"],
 
215
  ),
216
  ]
217
 
218
  # It's not mandatory to have a default configuration. Just use one if it make sense.
219
- DEFAULT_CONFIG_NAME = "text_raw"
220
 
221
  # [i] Info
222
  def _info(self):
@@ -227,8 +230,8 @@ class PY_legislation(datasets.GeneratorBasedBuilder):
227
  features = None
228
  description = ""
229
 
230
- if self.config.name == "text_raw":
231
- description = _metadata["text_raw"]["description"]
232
  features = datasets.Features(
233
  {
234
  "source_id": datasets.Value(dtype="int64"),
@@ -240,19 +243,19 @@ class PY_legislation(datasets.GeneratorBasedBuilder):
240
  }
241
  )
242
 
243
- if self.config.name == "sentences_unlabeled":
244
- description = _metadata["sentences_unlabeled"]["description"]
245
  features = datasets.Features(
246
- _metadata["sentences_unlabeled"]["features"])
247
 
248
- if self.config.name == "sentences_labeled":
249
- description = _metadata["sentences_labeled"]["description"]
250
  features = datasets.Features(
251
- _metadata["sentences_labeled"]["features"])
252
 
253
  else:
254
  features = datasets.Features(
255
- _metadata["text_raw"]["description"]
256
  )
257
 
258
  return datasets.DatasetInfo(
@@ -272,24 +275,24 @@ class PY_legislation(datasets.GeneratorBasedBuilder):
272
  # generators = [
273
  # datasets.SplitGenerator(
274
  # name=datasets.Split.TRAIN,
275
- # gen_kwargs={"filepath": downloaded_files["text_raw"]},
276
  # ),
277
 
278
  # datasets.SplitGenerator(
279
  # name=datasets.Split.TRAIN,
280
  # gen_kwargs={
281
- # "filepath": downloaded_files["sentences_unlabeled"]},
282
  # ),
283
 
284
  # datasets.SplitGenerator(
285
  # name=datasets.Split.TRAIN,
286
- # gen_kwargs={"filepath": downloaded_files["sentences_labeled_train"]},
287
  # ),
288
 
289
  # datasets.SplitGenerator(
290
  # name=datasets.Split.TEST,
291
  # gen_kwargs={
292
- # "filepath": downloaded_files["sentences_labeled_test"]},
293
  # )
294
  # ]
295
 
 
117
  "license": "apache-2.0",
118
 
119
  "urls": {
120
+ "raw_text": "./raw_text",
121
+ "unlabeled_sentences": "./unlabeled",
122
+ "labeled_sentences": "./labeled",
123
  },
124
 
125
  # [@] Config Names:
126
 
127
+ "raw_text": {
128
  "description": textwrap.dedent("""
129
  Data extracted from the sources files (URls, PDFs and Word files) without any transformation or sentence splitter. It can be helpful because you can access the raw data extracted from the seeds (PDFs and Word files) and apply other preprocessing tasks from this point to prepare the data without returning to extract texts from source files.
130
  """),
 
140
 
141
  },
142
 
143
+ "unlabeled_sentences": {
144
  "description": textwrap.dedent("""
145
  Unlabeled corpora of Paraguay legislation. This data is prepared to be labeled by the experts. Each instance of the dataset represents a specific text passage, split by its original formatting extracted from raw text (from original documents)
146
 
 
165
  }
166
  },
167
 
168
+ "labeled_sentences": {
169
  "description": textwrap.dedent("""
170
  The labeled data is the ground truth data used to train the models. This data is annotated by legal experts indicating the existence of administrative costs (and other types) in the legislation.
171
 
 
197
 
198
  BUILDER_CONFIGS = [
199
  datasets.BuilderConfig(
200
+ name="raw_text",
201
+ data_dir=_metadata["urls"]["raw_text"],
202
  version=VERSION,
203
+ description=_metadata["raw_text"]["description"],
204
  ),
205
 
206
  datasets.BuilderConfig(
207
+ name="unlabeled_sentences",
208
  version=VERSION,
209
+ data_dir=_metadata["urls"]["unlabeled_sentences"],
210
+ description=_metadata["unlabeled_sentences"]["description"],
211
  ),
212
 
213
  datasets.BuilderConfig(
214
+ name="labeled_sentences",
215
  version=VERSION,
216
+ data_dir=_metadata["urls"]["labeled_sentences"],
217
+ description=_metadata["labeled_sentences"]["description"],
218
  ),
219
  ]
220
 
221
  # It's not mandatory to have a default configuration. Just use one if it make sense.
222
+ DEFAULT_CONFIG_NAME = "raw_text"
223
 
224
  # [i] Info
225
  def _info(self):
 
230
  features = None
231
  description = ""
232
 
233
+ if self.config.name == "raw_text":
234
+ description = _metadata["raw_text"]["description"]
235
  features = datasets.Features(
236
  {
237
  "source_id": datasets.Value(dtype="int64"),
 
243
  }
244
  )
245
 
246
+ if self.config.name == "unlabeled_sentences":
247
+ description = _metadata["unlabeled_sentences"]["description"]
248
  features = datasets.Features(
249
+ _metadata["unlabeled_sentences"]["features"])
250
 
251
+ if self.config.name == "labeled_sentences":
252
+ description = _metadata["labeled_sentences"]["description"]
253
  features = datasets.Features(
254
+ _metadata["labeled_sentences"]["features"])
255
 
256
  else:
257
  features = datasets.Features(
258
+ _metadata["raw_text"]["description"]
259
  )
260
 
261
  return datasets.DatasetInfo(
 
275
  # generators = [
276
  # datasets.SplitGenerator(
277
  # name=datasets.Split.TRAIN,
278
+ # gen_kwargs={"filepath": downloaded_files["raw_text"]},
279
  # ),
280
 
281
  # datasets.SplitGenerator(
282
  # name=datasets.Split.TRAIN,
283
  # gen_kwargs={
284
+ # "filepath": downloaded_files["unlabeled_sentences"]},
285
  # ),
286
 
287
  # datasets.SplitGenerator(
288
  # name=datasets.Split.TRAIN,
289
+ # gen_kwargs={"filepath": downloaded_files["labeled_sentences_train"]},
290
  # ),
291
 
292
  # datasets.SplitGenerator(
293
  # name=datasets.Split.TEST,
294
  # gen_kwargs={
295
+ # "filepath": downloaded_files["labeled_sentences_test"]},
296
  # )
297
  # ]
298