fernando-peres commited on
Commit
44bce36
·
1 Parent(s): 772c39c

Builder in one file

Browse files
.gitignore CHANGED
@@ -1,2 +1,4 @@
1
  env
2
- /env
 
 
 
1
  env
2
+ /env
3
+ .vscode
4
+ /.vscode
.vscode/settings.json DELETED
@@ -1,3 +0,0 @@
1
- {
2
- "cSpell.words": ["Multiclassification"]
3
- }
 
 
 
 
py_legislation.py CHANGED
@@ -14,13 +14,149 @@ class PY_Legislation(datasets.GeneratorBasedBuilder)
14
  Defines the implementation of Paraguay Legislation dataset builder (GeneratorBasedBuilder).
15
 
16
  """
 
17
 
18
  from textwrap import TextWrapper
19
  import datasets
20
  import pyarrow.parquet as pq
21
 
22
- from features_specs import BASIC_FEATURES_SPEC, RAW_FEATURES_SPEC, SENTENCES_UNLABELED_FEATURES_SPEC
23
- from py_legislation_metadata import PY_LEGISLATION_METADATA
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
 
26
  class PY_legislation(datasets.GeneratorBasedBuilder):
@@ -29,15 +165,15 @@ class PY_legislation(datasets.GeneratorBasedBuilder):
29
  BUILDER_CONFIGS = [
30
  datasets.BuilderConfig(
31
  name="raw", version=VERSION,
32
- description=PY_LEGISLATION_METADATA["raw-description"],
33
  ),
34
  datasets.BuilderConfig(
35
  name="sentences_unlabeled", version=VERSION,
36
- description=PY_LEGISLATION_METADATA["sentences-unlabeled-description"],
37
  ),
38
  datasets.BuilderConfig(
39
  name="sentences_labeled", version=VERSION,
40
- description=PY_LEGISLATION_METADATA["sentences-labeled-description"],
41
  ),
42
  ]
43
 
@@ -51,15 +187,18 @@ class PY_legislation(datasets.GeneratorBasedBuilder):
51
  description = ""
52
 
53
  if self.config.name == "raw":
54
- description = PY_LEGISLATION_METADATA["raw-description"]
55
- features = datasets.Features(RAW_FEATURES_SPEC)
56
 
57
  if self.config.name == "sentences_unlabeled":
58
- description = PY_LEGISLATION_METADATA["sentences-unlabeled-description"]
59
- features = datasets.Features(SENTENCES_UNLABELED_FEATURES_SPEC)
 
60
 
61
  if self.config.name == "sentences_labeled":
62
- description = PY_LEGISLATION_METADATA["sentences-labeled-description"]
 
 
63
 
64
  else:
65
  features = datasets.Features(
@@ -69,13 +208,14 @@ class PY_legislation(datasets.GeneratorBasedBuilder):
69
  return datasets.DatasetInfo(
70
  description=description,
71
  features=features,
72
- homepage=PY_LEGISLATION_METADATA["homepage"],
73
- license=PY_LEGISLATION_METADATA["license"],
74
- citation=PY_LEGISLATION_METADATA["citation"],
75
  )
76
 
77
  def _split_generators(self, dl_manager):
78
- urls = PY_LEGISLATION_METADATA["urls"][self.config.name]
 
79
 
80
  urls = dl_manager.download_and_extract(urls)
81
 
@@ -98,4 +238,3 @@ class PY_legislation(datasets.GeneratorBasedBuilder):
98
  col_name: pq_table[col_name][i].as_py()
99
  for col_name in pq_table.column_names
100
  }
101
-
 
14
  Defines the implementation of Paraguay Legislation dataset builder (GeneratorBasedBuilder).
15
 
16
  """
17
+ import textwrap
18
 
19
  from textwrap import TextWrapper
20
  import datasets
21
  import pyarrow.parquet as pq
22
 
23
+ from obligations import affected_entity, cost_type, aa_categories, aa_categories_unique, io_categories
24
+
25
+ _metadata = {
26
+ "citation": """\
27
+ @InProceedings{
28
+ huggingface:dataset,
29
+ title = {Paraguay Legislation Dataset},
30
+ author={Peres, Fernando; Costa, Victor},
31
+ year={2023}
32
+ }
33
+ """,
34
+
35
+ "description": textwrap.dedent("""\
36
+ Dataset for researching - NLP techniques on PARAGUAY legislation.
37
+ """),
38
+
39
+ "homepage": "https://www.leyes.com.py/",
40
+
41
+ "license": "apache-2.0",
42
+
43
+ "urls": {
44
+ "raw": "./data/0_raw/raw.parquet",
45
+ "sentences_unlabeled": "./data/1_sentences_unlabeled/unlabeled.parquet",
46
+ "sentences_labeled": "./data/2_sentences_labeled/labeled.parquet",
47
+ },
48
+
49
+ # [@] Config Names:
50
+
51
+ "raw": {
52
+ "description": textwrap.dedent("""
53
+ Data extracted from the sources files (URls, PDFs and Word files) without any transformation or sentence splitter. It can be helpful because you can access the raw data extracted from the seeds (PDFs and Word files) and apply other preprocessing tasks from this point to prepare the data without returning to extract texts from source files.
54
+ """),
55
+
56
+ "features": {
57
+ "source_id": datasets.Value(dtype="int64"),
58
+ "doc_source_id": datasets.Value(dtype="int64"),
59
+ "document": datasets.Value(dtype="string"),
60
+ "text": datasets.Value(dtype="string"),
61
+ }
62
+
63
+ },
64
+
65
+ "sentences-unlabeled": {
66
+ "description": textwrap.dedent("""
67
+ Unlabeled corpora of Paraguay legislation. This data is prepared to be labeled by the experts. Each instance of the dataset represents a specific text passage, split by its original formatting extracted from raw text (from original documents)
68
+
69
+ Each observation of the dataset represents a specific text passage.
70
+ """),
71
+
72
+ "features": {
73
+ "source_id": datasets.Value(dtype="int64"),
74
+ "doc_source_id": datasets.Value(dtype="int64"),
75
+ "document": datasets.Value(dtype="string"),
76
+ "text": datasets.Value(dtype="string"),
77
+
78
+ # Categories
79
+ "cost_type": datasets.ClassLabel(names=cost_type,),
80
+ "affected_entity": datasets.ClassLabel(names=affected_entity,),
81
+ "io_categories": datasets.Sequence(
82
+ datasets.ClassLabel(names=io_categories,)),
83
+ "aa_categories": datasets.Sequence(
84
+ datasets.ClassLabel(names=aa_categories,)),
85
+ "aa_categories_unique": datasets.Sequence(
86
+ datasets.ClassLabel(names=aa_categories_unique,)),
87
+ }
88
+ },
89
+
90
+ "sentences-labeled": {
91
+ "description": textwrap.dedent("""
92
+ The labeled data is the ground truth data used to train the models. This data is annotated by legal experts indicating the existence of administrative costs (and other types) in the legislation.
93
+
94
+ Each observation of the dataset represents a specific text passage.
95
+ """),
96
+
97
+ "features": {
98
+ "source_id": datasets.Value(dtype="int64"),
99
+ "doc_source_id": datasets.Value(dtype="int64"),
100
+ "document": datasets.Value(dtype="string"),
101
+ "text": datasets.Value(dtype="string"),
102
+
103
+ # Categories
104
+ "cost_type": datasets.ClassLabel(names=cost_type,),
105
+ "affected_entity": datasets.ClassLabel(names=affected_entity,),
106
+ "io_categories": datasets.Sequence(
107
+ datasets.ClassLabel(names=io_categories,)),
108
+ "aa_categories": datasets.Sequence(
109
+ datasets.ClassLabel(names=aa_categories,)),
110
+ "aa_categories_unique": datasets.Sequence(
111
+ datasets.ClassLabel(names=aa_categories_unique,)),
112
+ }
113
+ }
114
+ }
115
+
116
+
117
+ x = {
118
+ "config_names": {
119
+ "raw": {
120
+ "description": "",
121
+ "features": {
122
+ "source_id": datasets.Value(dtype="int64"),
123
+ "doc_source_id": datasets.Value(dtype="int64"),
124
+ "document": datasets.Value(dtype="string"),
125
+ "text": datasets.Value(dtype="string"),
126
+ }
127
+ }
128
+ }
129
+ }
130
+
131
+ BASIC_FEATURES_SPEC = {
132
+ "source_id": datasets.Value(dtype="int64"),
133
+ "doc_source_id": datasets.Value(dtype="int64"),
134
+ "document": datasets.Value(dtype="string"),
135
+ "text": datasets.Value(dtype="string"),
136
+ }
137
+
138
+ RAW_FEATURES_SPEC = {
139
+ "source_id": datasets.Value(dtype="int64"),
140
+ "doc_source_id": datasets.Value(dtype="int64"),
141
+ "document": datasets.Value(dtype="string"),
142
+ "text": datasets.Value(dtype="string"),
143
+ }
144
+
145
+ SENTENCES_UNLABELED_FEATURES_SPEC = {
146
+ "source_id": datasets.Value(dtype="int64"),
147
+ "doc_source_id": datasets.Value(dtype="int64"),
148
+ "document": datasets.Value(dtype="string"),
149
+ "text": datasets.Value(dtype="string"),
150
+
151
+ #
152
+
153
+ # Categories
154
+ "cost_type": datasets.ClassLabel(names=cost_type,),
155
+ "affected_entity": datasets.ClassLabel(names=affected_entity,),
156
+ "io_categories": datasets.Sequence(datasets.ClassLabel(names=io_categories,)),
157
+ "aa_categories": datasets.Sequence(datasets.ClassLabel(names=aa_categories,)),
158
+ "aa_categories_unique": datasets.Sequence(datasets.ClassLabel(names=aa_categories_unique,)),
159
+ }
160
 
161
 
162
  class PY_legislation(datasets.GeneratorBasedBuilder):
 
165
  BUILDER_CONFIGS = [
166
  datasets.BuilderConfig(
167
  name="raw", version=VERSION,
168
+ description=_metadata["raw"]["description"],
169
  ),
170
  datasets.BuilderConfig(
171
  name="sentences_unlabeled", version=VERSION,
172
+ description=_metadata["sentences-unlabeled"]["description"],
173
  ),
174
  datasets.BuilderConfig(
175
  name="sentences_labeled", version=VERSION,
176
+ description=_metadata["sentences-labeled"]["description"],
177
  ),
178
  ]
179
 
 
187
  description = ""
188
 
189
  if self.config.name == "raw":
190
+ description = _metadata["raw"]["description"]
191
+ features = datasets.Features(_metadata["raw"]["features"])
192
 
193
  if self.config.name == "sentences_unlabeled":
194
+ description = _metadata["sentences-unlabeled"]["description"]
195
+ features = datasets.Features(
196
+ _metadata["sentences-unlabeled"]["features"])
197
 
198
  if self.config.name == "sentences_labeled":
199
+ description = _metadata["sentences-labeled"]["description"]
200
+ features = datasets.Features(
201
+ _metadata["sentences-labeled"]["features"])
202
 
203
  else:
204
  features = datasets.Features(
 
208
  return datasets.DatasetInfo(
209
  description=description,
210
  features=features,
211
+ homepage=_metadata["homepage"],
212
+ license=_metadata["license"],
213
+ citation=_metadata["citation"],
214
  )
215
 
216
  def _split_generators(self, dl_manager):
217
+ # TODO: labeled subset has two splits
218
+ urls = _metadata["urls"][self.config.name]
219
 
220
  urls = dl_manager.download_and_extract(urls)
221
 
 
238
  col_name: pq_table[col_name][i].as_py()
239
  for col_name in pq_table.column_names
240
  }
 
py_legislation_metadata.py CHANGED
@@ -34,7 +34,6 @@ PY_LEGISLATION_METADATA = {
34
  "sentences_labeled": "./data/2_sentences_labeled/labeled.parquet",
35
  },
36
 
37
-
38
  "raw-description" : textwrap.dedent("""
39
  Data extracted from the sources files (URls, PDFs and Word files) without any transformation or sentence splitter. It can be helpful because you can access the raw data extracted from the seeds (PDFs and Word files) and apply other preprocessing tasks from this point to prepare the data without returning to extract texts from source files.
40
  """),
 
34
  "sentences_labeled": "./data/2_sentences_labeled/labeled.parquet",
35
  },
36
 
 
37
  "raw-description" : textwrap.dedent("""
38
  Data extracted from the sources files (URls, PDFs and Word files) without any transformation or sentence splitter. It can be helpful because you can access the raw data extracted from the seeds (PDFs and Word files) and apply other preprocessing tasks from this point to prepare the data without returning to extract texts from source files.
39
  """),