File size: 11,817 Bytes
772c39c e37fdb2 44bce36 772c39c 593964c dd3d8ed 593964c f6b8b29 593964c 856f4cb 593964c 6a4afbf 44bce36 bab7bc3 44bce36 bab7bc3 af267eb 44bce36 80a1d65 44bce36 5dc863d fcf80cd 44bce36 af267eb 44bce36 5dc863d 44bce36 400c53e 44bce36 6a4afbf 44bce36 6a4afbf 44bce36 6a4afbf 44bce36 af267eb 44bce36 5dc863d 44bce36 400c53e 44bce36 6a4afbf 44bce36 6a4afbf 44bce36 6a4afbf 44bce36 772c39c f5c4f80 772c39c af267eb f3007b2 bab7bc3 772c39c f3007b2 b40d816 bab7bc3 b40d816 4223a24 fbe4159 bab7bc3 fbe4159 772c39c e7a615a 64fc934 baf6127 cdd1980 772c39c 5df4504 7cacf65 bab7bc3 1368fc3 7cacf65 1368fc3 cdd1980 772c39c fcf80cd 772c39c 44bce36 772c39c 56180ce 64fc934 56180ce 7cacf65 a49549e 2081858 1368fc3 593964c 2081858 fbe4159 593964c 2081858 fbe4159 593964c 1368fc3 c831d85 bc29eb8 a49549e 593964c 7741672 593964c 7cacf65 772c39c 56180ce 56c32e5 56180ce 56c32e5 772c39c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
Paraguay Legislation Dataset Builder
class PY_Legislation(datasets.GeneratorBasedBuilder)
Defines the implementation of Paraguay Legislation dataset builder (GeneratorBasedBuilder).
"""
import os
import textwrap
from textwrap import TextWrapper
import datasets
import pyarrow.parquet as pq
# [>] URLS:
# URL = "https://huggingface.co/datasets/fernandoperes/py_legislation/raw/main"
_URLS = {
"raw_text": f"./raw_text/train.parquet",
"unlabeled_sentences": f"./unlabeled_sentences/train.parquet",
# "labeled_sentences_train": "./labeled_sentences/train.parquet",
# "labeled_sentences_test": "./labeled_sentences/test.parquet",
}
_obligations = {
"cost_type": ["sin_costo", "costo_adm", "costo_directo", "otro_costo"],
"affected_entity": ["ent_no_afectada", "empresas", "ciudadanos", "adm_publica"],
"io_categories": [
"prestacion_de_informacion_empresarial_y_fiscal"
"solicitudes_de_licencias_y_otras"
"registros_y_notificaciones"
"solicitud_de_subsidios_y_otras"
"disponibilidad_de_manuales_y_otras"
"cooperacion_con_auditorías_y_otras"
"prestacion_de_informacion_a_consumidores"
"otras_OIS"
],
"aa_categories_unique": [
"familiarizacion_con_OI"
"recoleccion_y_organizacion_de_informacion"
"procesamiento_de_informacion"
"tiempos_de_espera"
"desplazamientos"
"envio_de_informacion"
"preservacion_de_informacion"
],
"aa_categories": [
"aa_1_familiarizacion_con_OI"
"aa_1_recoleccion_y_organizacion_de_informacion"
"aa_1_procesamiento_de_informacion"
"aa_1_tiempos_de_espera"
"aa_1_desplazamientos"
"aa_1_envio_de_informacion"
"aa_1_preservacion_de_informacion"
"aa_2_familiarizacion_con_OI"
"aa_2_recoleccion_y_organizacion_de_informacion"
"aa_2_procesamiento_de_informacion"
"aa_2_tiempos_de_espera"
"aa_2_desplazamientos"
"aa_2_envio_de_informacion"
"aa_2_preservacion_de_informacion"
"aa_3_familiarizacion_con_OI"
"aa_3_recoleccion_y_organizacion_de_informacion"
"aa_3_procesamiento_de_informacion"
"aa_3_tiempos_de_espera"
"aa_3_desplazamientos"
"aa_3_envio_de_informacion"
"aa_3_preservacion_de_informacion"
"aa_4_familiarizacion_con_OI"
"aa_4_recoleccion_y_organizacion_de_informacion"
"aa_4_procesamiento_de_informacion"
"aa_4_tiempos_de_espera"
"aa_4_desplazamientos"
"aa_4_envio_de_informacion"
"aa_4_preservacion_de_informacion"
"aa_5_familiarizacion_con_OI"
"aa_5_recoleccion_y_organizacion_de_informacion"
"aa_5_procesamiento_de_informacion"
"aa_5_tiempos_de_espera"
"aa_5_desplazamientos"
"aa_5_envio_de_informacion"
"aa_5_preservacion_de_informacion"
"aa_6_familiarizacion_con_OI"
"aa_6_recoleccion_y_organizacion_de_informacion"
"aa_6_procesamiento_de_informacion"
"aa_6_tiempos_de_espera"
"aa_6_desplazamientos"
"aa_6_envio_de_informacion"
"aa_6_preservacion_de_informacion"
"aa_7_familiarizacion_con_OI"
"aa_7_recoleccion_y_organizacion_de_informacion"
"aa_7_procesamiento_de_informacion"
"aa_7_tiempos_de_espera"
"aa_7_desplazamientos"
"aa_7_envio_de_informacion"
"aa_7_preservacion_de_informacion"
]
}
_metadata = {
"citation": """\
@InProceedings{
huggingface:dataset,
title = {Paraguay Legislation Dataset},
author={Peres, Fernando; Costa, Victor},
year={2023}
}
""",
"description": textwrap.dedent("""\
Dataset for researching - NLP techniques on PARAGUAY legislation.
"""),
"homepage": "https://www.leyes.com.py/",
"license": "apache-2.0",
}
# [@] Config Names:
_CONFIGS = {
"raw_text": {
"description": textwrap.dedent("""
Data extracted from the sources files (URls, PDFs and Word files) without any transformation or sentence splitter. It can be helpful because you can access the raw data extracted from the seeds (PDFs and Word files) and apply other preprocessing tasks from this point to prepare the data without returning to extract texts from source files.
"""),
"features": {
"source_id": datasets.Value(dtype="int64"),
"source_name": datasets.Value(dtype="string"),
"text_id": datasets.Value(dtype="int64"),
"text": datasets.Value(dtype="string"),
"extension": datasets.ClassLabel(names=["docx", "pdf", "html", "txt", "doc"],),
}
},
"unlabeled_sentences": {
"description": textwrap.dedent("""
Unlabeled corpora of Paraguay legislation. This data is prepared to be labeled by the experts. Each instance of the dataset represents a specific text passage, split by its original formatting extracted from raw text (from original documents)
Each observation of the dataset represents a specific text passage.
"""),
"features": {
"source_id": datasets.Value(dtype="int64"),
"source_name": datasets.Value(dtype="string"),
"text_id": datasets.Value(dtype="int64"),
"text": datasets.Value(dtype="string"),
# Categories
"cost_type": datasets.ClassLabel(
names=_obligations["cost_type"],),
"affected_entity": datasets.ClassLabel(
names=_obligations["affected_entity"],),
"io_categories": datasets.Sequence(
datasets.ClassLabel(names=_obligations["io_categories"],)),
"aa_categories": datasets.Sequence(
datasets.ClassLabel(names=_obligations["aa_categories"],)),
"aa_categories_unique": datasets.Sequence(
datasets.ClassLabel(names=_obligations["aa_categories_unique"],)),
}
},
"labeled_sentences": {
"description": textwrap.dedent("""
The labeled data is the ground truth data used to train the models. This data is annotated by legal experts indicating the existence of administrative costs (and other types) in the legislation.
Each observation of the dataset represents a specific text passage.
"""),
"features": {
"source_id": datasets.Value(dtype="int64"),
"source_name": datasets.Value(dtype="string"),
"text_id": datasets.Value(dtype="int64"),
"text": datasets.Value(dtype="string"),
# Categories
"cost_type": datasets.ClassLabel(
names=_obligations["cost_type"],),
"affected_entity": datasets.ClassLabel(
names=_obligations["affected_entity"],),
"io_categories": datasets.Sequence(
datasets.ClassLabel(names=_obligations["io_categories"],)),
"aa_categories": datasets.Sequence(
datasets.ClassLabel(names=_obligations["aa_categories"],)),
"aa_categories_unique": datasets.Sequence(
datasets.ClassLabel(names=_obligations["aa_categories_unique"],)),
}
}
}
class PYLegislation(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="raw_text",
version=VERSION,
description=_CONFIGS["raw_text"]["description"],
),
datasets.BuilderConfig(
name="unlabeled_sentences",
version=VERSION,
description=_CONFIGS["unlabeled_sentences"]["description"],
),
# datasets.BuilderConfig(
# name="labeled_sentences",
# version=VERSION,
# description=_CONFIGS["labeled_sentences"]["description"],
# ),
]
# It's not mandatory to have a default configuration. Just use one if it make sense.
DEFAULT_CONFIG_NAME = "raw_text"
# [i] Dataset Information:
def _info(self):
"""
This method specifies the datasets.DatasetInfo object which contains
information and typings for the dataset
"""
features = None
description = None
if self.config.name in _CONFIGS.keys():
features = datasets.Features(
_CONFIGS[self.config.name]["features"])
description = _CONFIGS[self.config.name]["description"]
else:
features = datasets.Features(
_CONFIGS["raw_text"]["features"])
description = _CONFIGS["raw_text"]["description"]
return datasets.DatasetInfo(
builder_name=self.config.name,
description=description,
features=features,
homepage=_metadata["homepage"],
license=_metadata["license"],
citation=_metadata["citation"],
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
# data_dir = dl_manager.download_and_extract(urls)
urls = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": urls},
),
]
# region old code
# for k in urls_to_download:
# fp = dl_manager.download_and_extract(urls_to_download[k])
# print(f"***************>>>> filepath = {fp}")
# generators.append(
# datasets.SplitGenerator(
# name=datasets.Split.TRAIN, gen_kwargs={"filepath": fp}))
# us_train = dl_manager.download_and_extract(
# urls_to_download["unlabeled_sentences"])
# ls_train = dl_manager.download_and_extract(
# urls_to_download["labeled_sentences_train"])
# ls_test = dl_manager.download_and_extract(
# urls_to_download["labeled_sentences_test"])
# generators.append(
# datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": us_train}))
# datasets.SplitGenerator(
# name=datasets.Split.TRAIN,
# gen_kwargs={
# # downloaded_files["labeled_sentences_train"],
# "filepath": ls_train
# },
# ),
# datasets.SplitGenerator(
# name=datasets.Split.TEST,
# gen_kwargs={
# # downloaded_files["labeled_sentences_test"],
# "filepath": ls_test
# },
# )
# ]
# return generators
# endregion
def _generate_examples(self, filepath):
"""
This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
Obs: method parameters are unpacked from `gen_kwargs` as given in `_split_generators`.
"""
# # Ensure that the filepath is an absolute local file path
# filepath = os.path.abspath(filepath)
# # Check if the file exists
# if not os.path.exists(filepath):
# raise FileNotFoundError(f"File not found: {filepath}")
pq_table = pq.read_table(filepath)
for i in range(len(pq_table)):
yield i, {
col_name: pq_table[col_name][i].as_py()
for col_name in pq_table.column_names
}
|