Datasets:

ArXiv:
License:
holylovenia commited on
Commit
0ac9e2c
·
1 Parent(s): a09a6f7

Upload xl_sum.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. xl_sum.py +154 -0
xl_sum.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ import datasets
6
+
7
+ from nusacrowd.utils.configs import NusantaraConfig
8
+ from nusacrowd.utils.constants import Tasks
9
+ from nusacrowd.utils import schemas
10
+ import jsonlines
11
+ from nltk.tokenize.treebank import TreebankWordDetokenizer
12
+
13
+ _CITATION = """\
14
+ @inproceedings{hasan2021xl,
15
+ title={XL-Sum: Large-Scale Multilingual Abstractive Summarization for 44 Languages},
16
+ author={Hasan, Tahmid and Bhattacharjee, Abhik and Islam, Md Saiful and Mubasshir, Kazi and Li, Yuan-Fang and Kang, Yong-Bin and Rahman, M Sohel and Shahriyar, Rifat},
17
+ booktitle={Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021},
18
+ pages={4693--4703},
19
+ year={2021}
20
+ }
21
+ """
22
+
23
+ _LOCAL = False
24
+ _LANGUAGES = ["ind", "eng"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
25
+ _DATASETNAME = "xl_sum"
26
+
27
+ _DESCRIPTION = """\
28
+ XL-Sum is a large-scale multilingual summarization dataset that covers 45 languages including Indonesian text summarization.
29
+ The dataset is based on article-summary pairs from BBC, is highly abstractive, concise, and of high quality, as indicated by human and intrinsic evaluation.
30
+ """
31
+
32
+ _HOMEPAGE = "https://github.com/csebuetnlp/xl-sum"
33
+
34
+ _LICENSE = "CC-BY-NC-SA 4.0"
35
+
36
+ _URLS = {
37
+ _DATASETNAME: "https://huggingface.co/datasets/csebuetnlp/xlsum/resolve/main/data/indonesian_XLSum_v2.0.tar.bz2",
38
+ }
39
+
40
+ _SUPPORTED_TASKS = [Tasks.SUMMARIZATION]
41
+
42
+ _SOURCE_VERSION = "2.0.0"
43
+
44
+ _NUSANTARA_VERSION = "1.0.0"
45
+
46
+ class XLSum(datasets.GeneratorBasedBuilder):
47
+ """XL-Sum is a large-scale multilingual summarization dataset that covers 45 languages including Indonesian text summarization. The dataset is based on article-summary pairs from BBC, is highly abstractive, concise, and of high quality, as indicated by human and intrinsic evaluation."""
48
+
49
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
50
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
51
+
52
+ BUILDER_CONFIGS = [
53
+ NusantaraConfig(
54
+ name="xl_sum_source",
55
+ version=datasets.Version(_SOURCE_VERSION),
56
+ description="xl_sum source schema",
57
+ schema="source",
58
+ subset_id="xl_sum",
59
+ ),
60
+ NusantaraConfig(
61
+ name="xl_sum_nusantara_t2t",
62
+ version=datasets.Version(_NUSANTARA_VERSION),
63
+ description="xl_sum Nusantara schema",
64
+ schema="nusantara_t2t",
65
+ subset_id="xl_sum",
66
+ ),
67
+ ]
68
+
69
+ DEFAULT_CONFIG_NAME = "xl_sum_source"
70
+
71
+ def _info(self) -> datasets.DatasetInfo:
72
+ if self.config.schema == "source":
73
+ features = datasets.Features(
74
+ {
75
+ "id": datasets.Value("string"),
76
+ "url": datasets.Value("string"),
77
+ "title": datasets.Value("string"),
78
+ "text": datasets.Value("string"),
79
+ "summary": datasets.Value("string")
80
+ }
81
+ )
82
+ elif self.config.schema == "nusantara_t2t":
83
+ features = schemas.text2text_features
84
+
85
+ return datasets.DatasetInfo(
86
+ description=_DESCRIPTION,
87
+ features=features,
88
+ homepage=_HOMEPAGE,
89
+ license=_LICENSE,
90
+ citation=_CITATION,
91
+ )
92
+
93
+
94
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
95
+ data_dir = Path(dl_manager.download_and_extract(_URLS[_DATASETNAME]))
96
+
97
+ data_files = {
98
+ "train": "indonesian_train.jsonl",
99
+ "validation": "indonesian_val.jsonl",
100
+ "test": "indonesian_test.jsonl",
101
+ }
102
+
103
+ return [
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TRAIN,
106
+
107
+ gen_kwargs={
108
+ "filepath": os.path.join(data_dir, data_files["train"]),
109
+ "split": "train",
110
+ },
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.VALIDATION,
114
+ gen_kwargs={
115
+ "filepath": os.path.join(data_dir, data_files["validation"]),
116
+ "split": "dev",
117
+ },
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TEST,
121
+ gen_kwargs={
122
+ "filepath": os.path.join(data_dir, data_files["test"]),
123
+ "split": "test",
124
+ },
125
+ ),
126
+ ]
127
+
128
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
129
+
130
+ if self.config.schema == "source":
131
+ with jsonlines.open(filepath) as f:
132
+ for each_data in f.iter():
133
+ ex = {
134
+ "id": each_data["id"],
135
+ "url": each_data["url"],
136
+ "title": each_data["title"],
137
+ "text": each_data["text"],
138
+ "summary": each_data["summary"],
139
+ }
140
+ yield each_data["id"], ex
141
+
142
+ elif self.config.schema == "nusantara_t2t":
143
+ with jsonlines.open(filepath) as f:
144
+ for each_data in f.iter():
145
+ ex = {
146
+ "id": each_data["id"],
147
+ "text_1": each_data["text"],
148
+ "text_2": each_data["summary"],
149
+ "text_1_name": each_data["title"],
150
+ "text_2_name": "summary"
151
+ }
152
+ yield each_data["id"], ex
153
+ else:
154
+ raise ValueError(f"Invalid config: {self.config.name}")