|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Racó Forums Corpus""" |
|
|
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """ |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The Racó Forums Corpus is a 19,205,185-million-sentence corpus of Catalan user-generated text built from the forums of Racó Català. |
|
|
|
Since the existing available corpora in Catalan lacked conversational data, we searched for a major source of such data for Catalan, and we found Racó Català, a popular multitopic online forum. We obtained a database dump and we transformed all the threads so that we obtained documents that traversed all the existing paths from the root (initial comment) to the leaves (last comment with no reply). In other words, if T is a tree such that T = {A,B,C,D} and the first comment is A that is replied by B and C independently, and, then, C is replied by D, we obtain two different documents A,B and A,C,D in the fairseq language modeling format. |
|
""" |
|
|
|
_HOMEPAGE = "https://www.racocatala.cat/forums" |
|
|
|
_LICENSE = "Creative Commons Non-commercial 4.0 International" |
|
|
|
_URL = "https://zenodo.org/record/7254179/files/data.zip?download=1" |
|
_TRAINING_FILE = "train.txt" |
|
_DEV_FILE = "valid.txt" |
|
|
|
|
|
class RacoForumsCorpus(datasets.GeneratorBasedBuilder): |
|
"""Racó Forums Corpus""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({"text": datasets.Value("string")}), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = dl_manager.download_and_extract(_URL) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, "data", _TRAINING_FILE)}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, "data", _DEV_FILE)}), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath): |
|
with open(filepath, encoding="utf-8") as f: |
|
text = "" |
|
for id_, line in enumerate(f): |
|
if line == "\n": |
|
yield id_, {"text": text.strip()} |
|
text = "" |
|
else: |
|
text += line |
|
|