|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
|
Paraguay Legislation Dataset Builder |
|
class PY_Legislation(datasets.GeneratorBasedBuilder) |
|
|
|
Defines the implementation of Paraguay Legislation dataset builder (GeneratorBasedBuilder). |
|
|
|
""" |
|
import os |
|
import textwrap |
|
from textwrap import TextWrapper |
|
import datasets |
|
import pyarrow.parquet as pq |
|
|
|
|
|
|
|
|
|
|
|
_URLS = { |
|
"raw_text": f"./raw_text/train.parquet", |
|
"unlabeled_sentences": f"./unlabeled_sentences/train.parquet", |
|
|
|
|
|
} |
|
|
|
_obligations = { |
|
"cost_type": ["sin_costo", "costo_adm", "costo_directo", "otro_costo"], |
|
|
|
"affected_entity": ["ent_no_afectada", "empresas", "ciudadanos", "adm_publica"], |
|
|
|
"io_categories": [ |
|
"prestacion_de_informacion_empresarial_y_fiscal" |
|
"solicitudes_de_licencias_y_otras" |
|
"registros_y_notificaciones" |
|
"solicitud_de_subsidios_y_otras" |
|
"disponibilidad_de_manuales_y_otras" |
|
"cooperacion_con_auditorías_y_otras" |
|
"prestacion_de_informacion_a_consumidores" |
|
"otras_OIS" |
|
], |
|
|
|
"aa_categories_unique": [ |
|
"familiarizacion_con_OI" |
|
"recoleccion_y_organizacion_de_informacion" |
|
"procesamiento_de_informacion" |
|
"tiempos_de_espera" |
|
"desplazamientos" |
|
"envio_de_informacion" |
|
"preservacion_de_informacion" |
|
], |
|
|
|
"aa_categories": [ |
|
"aa_1_familiarizacion_con_OI" |
|
"aa_1_recoleccion_y_organizacion_de_informacion" |
|
"aa_1_procesamiento_de_informacion" |
|
"aa_1_tiempos_de_espera" |
|
"aa_1_desplazamientos" |
|
"aa_1_envio_de_informacion" |
|
"aa_1_preservacion_de_informacion" |
|
"aa_2_familiarizacion_con_OI" |
|
"aa_2_recoleccion_y_organizacion_de_informacion" |
|
"aa_2_procesamiento_de_informacion" |
|
"aa_2_tiempos_de_espera" |
|
"aa_2_desplazamientos" |
|
"aa_2_envio_de_informacion" |
|
"aa_2_preservacion_de_informacion" |
|
"aa_3_familiarizacion_con_OI" |
|
"aa_3_recoleccion_y_organizacion_de_informacion" |
|
"aa_3_procesamiento_de_informacion" |
|
"aa_3_tiempos_de_espera" |
|
"aa_3_desplazamientos" |
|
"aa_3_envio_de_informacion" |
|
"aa_3_preservacion_de_informacion" |
|
"aa_4_familiarizacion_con_OI" |
|
"aa_4_recoleccion_y_organizacion_de_informacion" |
|
"aa_4_procesamiento_de_informacion" |
|
"aa_4_tiempos_de_espera" |
|
"aa_4_desplazamientos" |
|
"aa_4_envio_de_informacion" |
|
"aa_4_preservacion_de_informacion" |
|
"aa_5_familiarizacion_con_OI" |
|
"aa_5_recoleccion_y_organizacion_de_informacion" |
|
"aa_5_procesamiento_de_informacion" |
|
"aa_5_tiempos_de_espera" |
|
"aa_5_desplazamientos" |
|
"aa_5_envio_de_informacion" |
|
"aa_5_preservacion_de_informacion" |
|
"aa_6_familiarizacion_con_OI" |
|
"aa_6_recoleccion_y_organizacion_de_informacion" |
|
"aa_6_procesamiento_de_informacion" |
|
"aa_6_tiempos_de_espera" |
|
"aa_6_desplazamientos" |
|
"aa_6_envio_de_informacion" |
|
"aa_6_preservacion_de_informacion" |
|
"aa_7_familiarizacion_con_OI" |
|
"aa_7_recoleccion_y_organizacion_de_informacion" |
|
"aa_7_procesamiento_de_informacion" |
|
"aa_7_tiempos_de_espera" |
|
"aa_7_desplazamientos" |
|
"aa_7_envio_de_informacion" |
|
"aa_7_preservacion_de_informacion" |
|
] |
|
} |
|
|
|
_metadata = { |
|
"citation": """\ |
|
@InProceedings{ |
|
huggingface:dataset, |
|
title = {Paraguay Legislation Dataset}, |
|
author={Peres, Fernando; Costa, Victor}, |
|
year={2023} |
|
} |
|
""", |
|
|
|
"description": textwrap.dedent("""\ |
|
Dataset for researching - NLP techniques on PARAGUAY legislation. |
|
"""), |
|
|
|
"homepage": "https://www.leyes.com.py/", |
|
|
|
"license": "apache-2.0", |
|
} |
|
|
|
|
|
_CONFIGS = { |
|
"raw_text": { |
|
"description": textwrap.dedent(""" |
|
Data extracted from the sources files (URls, PDFs and Word files) without any transformation or sentence splitter. It can be helpful because you can access the raw data extracted from the seeds (PDFs and Word files) and apply other preprocessing tasks from this point to prepare the data without returning to extract texts from source files. |
|
"""), |
|
|
|
"features": { |
|
"source_id": datasets.Value(dtype="int64"), |
|
"source_name": datasets.Value(dtype="string"), |
|
"text_id": datasets.Value(dtype="int64"), |
|
"text": datasets.Value(dtype="string"), |
|
|
|
"extension": datasets.ClassLabel(names=["docx", "pdf", "html", "txt", "doc"],), |
|
} |
|
|
|
}, |
|
|
|
"unlabeled_sentences": { |
|
"description": textwrap.dedent(""" |
|
Unlabeled corpora of Paraguay legislation. This data is prepared to be labeled by the experts. Each instance of the dataset represents a specific text passage, split by its original formatting extracted from raw text (from original documents) |
|
|
|
Each observation of the dataset represents a specific text passage. |
|
"""), |
|
|
|
"features": { |
|
"source_id": datasets.Value(dtype="int64"), |
|
"source_name": datasets.Value(dtype="string"), |
|
"text_id": datasets.Value(dtype="int64"), |
|
"text": datasets.Value(dtype="string"), |
|
|
|
|
|
"cost_type": datasets.ClassLabel( |
|
names=_obligations["cost_type"],), |
|
"affected_entity": datasets.ClassLabel( |
|
names=_obligations["affected_entity"],), |
|
"io_categories": datasets.Sequence( |
|
datasets.ClassLabel(names=_obligations["io_categories"],)), |
|
"aa_categories": datasets.Sequence( |
|
datasets.ClassLabel(names=_obligations["aa_categories"],)), |
|
"aa_categories_unique": datasets.Sequence( |
|
datasets.ClassLabel(names=_obligations["aa_categories_unique"],)), |
|
} |
|
}, |
|
|
|
"labeled_sentences": { |
|
"description": textwrap.dedent(""" |
|
The labeled data is the ground truth data used to train the models. This data is annotated by legal experts indicating the existence of administrative costs (and other types) in the legislation. |
|
|
|
Each observation of the dataset represents a specific text passage. |
|
"""), |
|
|
|
"features": { |
|
"source_id": datasets.Value(dtype="int64"), |
|
"source_name": datasets.Value(dtype="string"), |
|
"text_id": datasets.Value(dtype="int64"), |
|
"text": datasets.Value(dtype="string"), |
|
|
|
|
|
"cost_type": datasets.ClassLabel( |
|
names=_obligations["cost_type"],), |
|
"affected_entity": datasets.ClassLabel( |
|
names=_obligations["affected_entity"],), |
|
"io_categories": datasets.Sequence( |
|
datasets.ClassLabel(names=_obligations["io_categories"],)), |
|
"aa_categories": datasets.Sequence( |
|
datasets.ClassLabel(names=_obligations["aa_categories"],)), |
|
"aa_categories_unique": datasets.Sequence( |
|
datasets.ClassLabel(names=_obligations["aa_categories_unique"],)), |
|
} |
|
} |
|
} |
|
|
|
|
|
class PYLegislation(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="raw_text", |
|
version=VERSION, |
|
description=_CONFIGS["raw_text"]["description"], |
|
), |
|
|
|
datasets.BuilderConfig( |
|
name="unlabeled_sentences", |
|
version=VERSION, |
|
description=_CONFIGS["unlabeled_sentences"]["description"], |
|
), |
|
|
|
|
|
|
|
|
|
|
|
|
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "raw_text" |
|
|
|
|
|
def _info(self): |
|
""" |
|
This method specifies the datasets.DatasetInfo object which contains |
|
information and typings for the dataset |
|
""" |
|
features = None |
|
description = None |
|
|
|
if self.config.name in _CONFIGS.keys(): |
|
features = datasets.Features( |
|
_CONFIGS[self.config.name]["features"]) |
|
|
|
description = _CONFIGS[self.config.name]["description"] |
|
else: |
|
features = datasets.Features( |
|
_CONFIGS["raw_text"]["features"]) |
|
|
|
description = _CONFIGS["raw_text"]["description"] |
|
|
|
return datasets.DatasetInfo( |
|
builder_name=self.config.name, |
|
description=description, |
|
features=features, |
|
homepage=_metadata["homepage"], |
|
license=_metadata["license"], |
|
citation=_metadata["citation"], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS[self.config.name] |
|
|
|
urls = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filepath": urls}, |
|
), |
|
] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _generate_examples(self, filepath): |
|
""" |
|
This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. |
|
The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. |
|
Obs: method parameters are unpacked from `gen_kwargs` as given in `_split_generators`. |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pq_table = pq.read_table(filepath) |
|
for i in range(len(pq_table)): |
|
yield i, { |
|
col_name: pq_table[col_name][i].as_py() |
|
for col_name in pq_table.column_names |
|
} |
|
|