Datasets:
Modalities:
Text
Size:
10K - 100K
File size: 4,265 Bytes
cb30f5f 3608d44 cb30f5f 3608d44 cb30f5f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
""" RobotsMaliAI: Bayelemaba """
import datasets
_CITATION = """\
@misc{bayelemabagamldataset2022
title={Machine Learning Dataset Development for Manding Languages},
author={
Valentin Vydrin and
Christopher Homan and
Michael Leventhal and
Allashera Auguste Tapo and
Marco Zampieri and
Jean-Jacques Meric and
Kirill Maslinsky and
Andrij Rovenchak and
Sebastien Diarra
},
howpublished = {url{https://github.com/robotsmali-ai/datasets}},
year={2022}
}
"""
_DESCRIPTION = """\
The Bayelemabaga dataset is a collection of 44160 aligned machine translation ready Bambara-French lines,
originating from Corpus Bambara de Reference. The dataset is constitued of text extracted from 231 source files,
varing from periodicals, books, short stories, blog posts, part of the Bible and the Quran.
"""
_URL = {
"parallel": "https://robotsmali-ai.github.io/datasets/bayelemabaga.tar.gz"
}
_LanguagePairs = [
"bam-fr", "fr-bam"]
class BayelemabagaConfig(datasets.BuilderConfig):
""" BuilderConfig for Bayelemabaga """
def __init__(self, language_pair, **kwargs) -> None:
"""
Args:
language_pair: language pair, you want to load
**kwargs: -> Super()
"""
super().__init__(**kwargs)
self.language_pair = language_pair
class Bayelemabaga(datasets.GeneratorBasedBuilder):
""" Bi-Lingual Bam, Fr text made for Machine Translation """
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIG_CLASS = BayelemabagaConfig
BUILDER_CONFIGS = [
BayelemabagaConfig(name="bam-fr", description=_DESCRIPTION, language_pair="bam-fr"),
BayelemabagaConfig(name="fr-bam", description=_DESCRIPTION, language_pair="fr-bam")
]
def _info(self):
src_tag, tgt_tag = self.config.language_pair.split("-")
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"translation": datasets.features.Translation(languages=(src_tag, tgt_tag))}),
supervised_keys=(src_tag, tgt_tag),
homepage="https://robotsmali-ai.github.io/datasets",
citation=_CITATION
)
def _split_generators(self, dl_manager):
lang_pair = self.config.language_pair
src_tag, tgt_tag = lang_pair.split("-")
archive = dl_manager.download(_URL["parallel"])
train_dir = "bayelemabaga/train"
valid_dir = "bayelemabaga/valid"
test_dir = "bayelemabaga/test"
train = datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs = {
"filepath": f"{train_dir}/train.{src_tag}",
"labelpath": f"{train_dir}/train.{tgt_tag}",
"files": dl_manager.iter_archive(archive)
}
)
valid = datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs = {
"filepath": f"{valid_dir}/dev.{src_tag}",
"labelpath": f"{valid_dir}/dev.{tgt_tag}",
"files": dl_manager.iter_archive(archive)
}
)
test = datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs = {
"filepath": f"{test_dir}/test.{src_tag}",
"labelpath": f"{test_dir}/test.{tgt_tag}",
"files": dl_manager.iter_archive(archive)
}
)
output = []
output.append(train)
output.append(valid)
output.append(test)
return output
def _generate_examples(self, filepath, labelpath, files):
""" Yield examples """
src_tag, tgt_tag = self.config.language_pair.split("-")
src, tgt = None, None
for path, f in files:
if(path == filepath):
src = f.read().decode("utf-8").split("\n")[:-1]
elif(path == labelpath):
tgt = f.read().decode("utf-8").split("\n")[:-1]
if(src is not None and tgt is not None):
for idx, (s,t) in enumerate(zip(src, tgt)):
yield idx, {"translation": {src_tag: s, tgt_tag: t}}
break
|