File size: 5,606 Bytes
0ac9e2c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import os
from pathlib import Path
from typing import Dict, List, Tuple
import datasets
from nusacrowd.utils.configs import NusantaraConfig
from nusacrowd.utils.constants import Tasks
from nusacrowd.utils import schemas
import jsonlines
from nltk.tokenize.treebank import TreebankWordDetokenizer
_CITATION = """\
@inproceedings{hasan2021xl,
title={XL-Sum: Large-Scale Multilingual Abstractive Summarization for 44 Languages},
author={Hasan, Tahmid and Bhattacharjee, Abhik and Islam, Md Saiful and Mubasshir, Kazi and Li, Yuan-Fang and Kang, Yong-Bin and Rahman, M Sohel and Shahriyar, Rifat},
booktitle={Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021},
pages={4693--4703},
year={2021}
}
"""
_LOCAL = False
_LANGUAGES = ["ind", "eng"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
_DATASETNAME = "xl_sum"
_DESCRIPTION = """\
XL-Sum is a large-scale multilingual summarization dataset that covers 45 languages including Indonesian text summarization.
The dataset is based on article-summary pairs from BBC, is highly abstractive, concise, and of high quality, as indicated by human and intrinsic evaluation.
"""
_HOMEPAGE = "https://github.com/csebuetnlp/xl-sum"
_LICENSE = "CC-BY-NC-SA 4.0"
_URLS = {
_DATASETNAME: "https://huggingface.co/datasets/csebuetnlp/xlsum/resolve/main/data/indonesian_XLSum_v2.0.tar.bz2",
}
_SUPPORTED_TASKS = [Tasks.SUMMARIZATION]
_SOURCE_VERSION = "2.0.0"
_NUSANTARA_VERSION = "1.0.0"
class XLSum(datasets.GeneratorBasedBuilder):
"""XL-Sum is a large-scale multilingual summarization dataset that covers 45 languages including Indonesian text summarization. The dataset is based on article-summary pairs from BBC, is highly abstractive, concise, and of high quality, as indicated by human and intrinsic evaluation."""
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
BUILDER_CONFIGS = [
NusantaraConfig(
name="xl_sum_source",
version=datasets.Version(_SOURCE_VERSION),
description="xl_sum source schema",
schema="source",
subset_id="xl_sum",
),
NusantaraConfig(
name="xl_sum_nusantara_t2t",
version=datasets.Version(_NUSANTARA_VERSION),
description="xl_sum Nusantara schema",
schema="nusantara_t2t",
subset_id="xl_sum",
),
]
DEFAULT_CONFIG_NAME = "xl_sum_source"
def _info(self) -> datasets.DatasetInfo:
if self.config.schema == "source":
features = datasets.Features(
{
"id": datasets.Value("string"),
"url": datasets.Value("string"),
"title": datasets.Value("string"),
"text": datasets.Value("string"),
"summary": datasets.Value("string")
}
)
elif self.config.schema == "nusantara_t2t":
features = schemas.text2text_features
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
data_dir = Path(dl_manager.download_and_extract(_URLS[_DATASETNAME]))
data_files = {
"train": "indonesian_train.jsonl",
"validation": "indonesian_val.jsonl",
"test": "indonesian_test.jsonl",
}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, data_files["train"]),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, data_files["validation"]),
"split": "dev",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, data_files["test"]),
"split": "test",
},
),
]
def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
if self.config.schema == "source":
with jsonlines.open(filepath) as f:
for each_data in f.iter():
ex = {
"id": each_data["id"],
"url": each_data["url"],
"title": each_data["title"],
"text": each_data["text"],
"summary": each_data["summary"],
}
yield each_data["id"], ex
elif self.config.schema == "nusantara_t2t":
with jsonlines.open(filepath) as f:
for each_data in f.iter():
ex = {
"id": each_data["id"],
"text_1": each_data["text"],
"text_2": each_data["summary"],
"text_1_name": each_data["title"],
"text_2_name": "summary"
}
yield each_data["id"], ex
else:
raise ValueError(f"Invalid config: {self.config.name}")
|