Datasets:

ArXiv:
License:
holylovenia commited on
Commit
4174af6
·
verified ·
1 Parent(s): 4b47cb4

Upload xl_sum.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. xl_sum.py +103 -67
xl_sum.py CHANGED
@@ -1,14 +1,20 @@
 
 
 
 
 
 
1
  import os
2
  from pathlib import Path
3
  from typing import Dict, List, Tuple
4
 
 
5
  import datasets
6
 
7
- from nusacrowd.utils.configs import NusantaraConfig
8
- from nusacrowd.utils.constants import Tasks
9
- from nusacrowd.utils import schemas
10
- import jsonlines
11
- from nltk.tokenize.treebank import TreebankWordDetokenizer
12
 
13
  _CITATION = """\
14
  @inproceedings{hasan2021xl,
@@ -21,52 +27,87 @@ _CITATION = """\
21
  """
22
 
23
  _LOCAL = False
24
- _LANGUAGES = ["ind", "eng"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
 
 
 
 
 
 
 
25
  _DATASETNAME = "xl_sum"
26
 
27
  _DESCRIPTION = """\
28
- XL-Sum is a large-scale multilingual summarization dataset that covers 45 languages including Indonesian text summarization.
29
- The dataset is based on article-summary pairs from BBC, is highly abstractive, concise, and of high quality, as indicated by human and intrinsic evaluation.
30
  """
31
 
32
  _HOMEPAGE = "https://github.com/csebuetnlp/xl-sum"
33
 
34
- _LICENSE = "CC-BY-NC-SA 4.0"
35
 
36
- _URLS = {
37
- _DATASETNAME: "https://huggingface.co/datasets/csebuetnlp/xlsum/resolve/main/data/indonesian_XLSum_v2.0.tar.bz2",
38
- }
39
 
40
  _SUPPORTED_TASKS = [Tasks.SUMMARIZATION]
41
 
42
  _SOURCE_VERSION = "2.0.0"
43
 
44
- _NUSANTARA_VERSION = "1.0.0"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  class XLSum(datasets.GeneratorBasedBuilder):
47
- """XL-Sum is a large-scale multilingual summarization dataset that covers 45 languages including Indonesian text summarization. The dataset is based on article-summary pairs from BBC, is highly abstractive, concise, and of high quality, as indicated by human and intrinsic evaluation."""
48
 
49
  SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
50
- NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
51
-
52
- BUILDER_CONFIGS = [
53
- NusantaraConfig(
54
- name="xl_sum_source",
55
- version=datasets.Version(_SOURCE_VERSION),
56
- description="xl_sum source schema",
57
- schema="source",
58
- subset_id="xl_sum",
59
- ),
60
- NusantaraConfig(
61
- name="xl_sum_nusantara_t2t",
62
- version=datasets.Version(_NUSANTARA_VERSION),
63
- description="xl_sum Nusantara schema",
64
- schema="nusantara_t2t",
65
- subset_id="xl_sum",
66
- ),
67
- ]
68
 
69
- DEFAULT_CONFIG_NAME = "xl_sum_source"
70
 
71
  def _info(self) -> datasets.DatasetInfo:
72
  if self.config.schema == "source":
@@ -79,7 +120,7 @@ class XLSum(datasets.GeneratorBasedBuilder):
79
  "summary": datasets.Value("string")
80
  }
81
  )
82
- elif self.config.schema == "nusantara_t2t":
83
  features = schemas.text2text_features
84
 
85
  return datasets.DatasetInfo(
@@ -92,63 +133,58 @@ class XLSum(datasets.GeneratorBasedBuilder):
92
 
93
 
94
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
95
- data_dir = Path(dl_manager.download_and_extract(_URLS[_DATASETNAME]))
96
-
97
- data_files = {
98
- "train": "indonesian_train.jsonl",
99
- "validation": "indonesian_val.jsonl",
100
- "test": "indonesian_test.jsonl",
101
- }
102
 
 
103
  return [
104
  datasets.SplitGenerator(
105
  name=datasets.Split.TRAIN,
106
-
107
  gen_kwargs={
108
- "filepath": os.path.join(data_dir, data_files["train"]),
109
- "split": "train",
110
  },
111
  ),
112
  datasets.SplitGenerator(
113
- name=datasets.Split.VALIDATION,
114
  gen_kwargs={
115
- "filepath": os.path.join(data_dir, data_files["validation"]),
116
- "split": "dev",
117
  },
118
  ),
119
  datasets.SplitGenerator(
120
- name=datasets.Split.TEST,
121
  gen_kwargs={
122
- "filepath": os.path.join(data_dir, data_files["test"]),
123
- "split": "test",
124
  },
125
  ),
126
  ]
127
 
128
- def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
129
 
130
  if self.config.schema == "source":
131
- with jsonlines.open(filepath) as f:
132
- for each_data in f.iter():
 
133
  ex = {
134
- "id": each_data["id"],
135
- "url": each_data["url"],
136
- "title": each_data["title"],
137
- "text": each_data["text"],
138
- "summary": each_data["summary"],
139
  }
140
- yield each_data["id"], ex
141
 
142
- elif self.config.schema == "nusantara_t2t":
143
- with jsonlines.open(filepath) as f:
144
- for each_data in f.iter():
 
 
145
  ex = {
146
- "id": each_data["id"],
147
- "text_1": each_data["text"],
148
- "text_2": each_data["summary"],
149
- "text_1_name": each_data["title"],
150
  "text_2_name": "summary"
151
  }
152
- yield each_data["id"], ex
153
  else:
154
  raise ValueError(f"Invalid config: {self.config.name}")
 
1
+ """
2
+ This new update refers to the this HF dataloader script
3
+ https://huggingface.co/datasets/csebuetnlp/xlsum/blob/main/xlsum.py
4
+ while conforming to SEACrowd schema
5
+ """
6
+
7
  import os
8
  from pathlib import Path
9
  from typing import Dict, List, Tuple
10
 
11
+ import json
12
  import datasets
13
 
14
+ from seacrowd.utils import schemas
15
+ from seacrowd.utils.configs import SEACrowdConfig
16
+ from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
17
+
 
18
 
19
  _CITATION = """\
20
  @inproceedings{hasan2021xl,
 
27
  """
28
 
29
  _LOCAL = False
30
+ _LANGUAGES = ["ind", "mya", "tha", "vie", "eng"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
31
+
32
+ _LANG_TO_DATASOURCE_LANG = {
33
+ "ind": "indonesian",
34
+ "mya": "burmese",
35
+ "vie": "vietnamese",
36
+ "tha": "thai"}
37
+
38
  _DATASETNAME = "xl_sum"
39
 
40
  _DESCRIPTION = """\
41
+ XL-Sum, a comprehensive and diverse dataset comprising 1 million professionally annotated article-summary pairs from BBC, was extracted using a set of carefully designed heuristics.
42
+ The dataset covers 44 languages ranging from low to high-resource, including 4 indigenous languages spoken in Southeast Asia region.
43
  """
44
 
45
  _HOMEPAGE = "https://github.com/csebuetnlp/xl-sum"
46
 
47
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
48
 
49
+ _URLS = "https://huggingface.co/datasets/csebuetnlp/xlsum/resolve/main/data/{}_XLSum_v{}.tar.bz2"
 
 
50
 
51
  _SUPPORTED_TASKS = [Tasks.SUMMARIZATION]
52
 
53
  _SOURCE_VERSION = "2.0.0"
54
 
55
+ _SEACROWD_VERSION = "2024.06.20"
56
+
57
+
58
+ def construct_configs_on_langs() -> List[SEACrowdConfig]:
59
+ """
60
+ The function `construct_configs` constructs a list of SEACrowdConfig objects based on `_LANGUAGES` var, and returns the list.
61
+
62
+ output:
63
+ a list of `SEACrowdConfig` objects based on instantiated init variables
64
+ """
65
+
66
+ # set output var
67
+ config_list = []
68
+
69
+ # construct zipped arg for config instantiation
70
+ CONFIG_SUFFIXES_FOR_TASK = [TASK_TO_SCHEMA.get(task).lower() for task in _SUPPORTED_TASKS]
71
+ TASKS_AND_CONFIG_SUFFIX_PAIRS = list(zip(_SUPPORTED_TASKS, CONFIG_SUFFIXES_FOR_TASK))
72
+
73
+ # implement source schema
74
+ version, config_name_prefix = _SOURCE_VERSION, "source"
75
+ config_list += [
76
+ SEACrowdConfig(
77
+ name=f"{_DATASETNAME}_{_LANG}_{config_name_prefix}",
78
+ version=datasets.Version(version),
79
+ description=f"{_DATASETNAME} {config_name_prefix} schema for language code {_LANG}",
80
+ schema=f"{config_name_prefix}",
81
+ subset_id=_LANG,
82
+ )
83
+ #skip english lang
84
+ for _LANG in _LANGUAGES if _LANG != "eng"
85
+ ]
86
+
87
+ # implement SEACrowd schema
88
+ version, config_name_prefix = _SEACROWD_VERSION, "seacrowd"
89
+ for task_obj, config_name_suffix in TASKS_AND_CONFIG_SUFFIX_PAIRS:
90
+ config_list += [
91
+ SEACrowdConfig(
92
+ name=f"{_DATASETNAME}_{_LANG}_{config_name_prefix}_{config_name_suffix}",
93
+ version=datasets.Version(version),
94
+ description=f"{_DATASETNAME} {config_name_prefix} schema for {task_obj.name} and language code {_LANG}",
95
+ schema=f"{config_name_prefix}_{config_name_suffix}",
96
+ subset_id=_LANG,
97
+ )
98
+ #skip english lang
99
+ for _LANG in _LANGUAGES if _LANG != "eng"
100
+ ]
101
+ return config_list
102
+
103
 
104
  class XLSum(datasets.GeneratorBasedBuilder):
105
+ """XL-Sum is a large-scale multilingual summarization dataset that covers 45 languages including Indonesian text summarization."""
106
 
107
  SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
108
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
+ BUILDER_CONFIGS = construct_configs_on_langs()
111
 
112
  def _info(self) -> datasets.DatasetInfo:
113
  if self.config.schema == "source":
 
120
  "summary": datasets.Value("string")
121
  }
122
  )
123
+ elif self.config.schema == "seacrowd_t2t":
124
  features = schemas.text2text_features
125
 
126
  return datasets.DatasetInfo(
 
133
 
134
 
135
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
136
+ lang = _LANG_TO_DATASOURCE_LANG[self.config.subset_id]
137
+ url = _URLS.format(lang, self.SOURCE_VERSION.version_str[:-2])
 
 
 
 
 
138
 
139
+ data_dir = dl_manager.download_and_extract(url)
140
  return [
141
  datasets.SplitGenerator(
142
  name=datasets.Split.TRAIN,
 
143
  gen_kwargs={
144
+ "filepath": os.path.join(data_dir, lang + "_train.jsonl"),
 
145
  },
146
  ),
147
  datasets.SplitGenerator(
148
+ name=datasets.Split.TEST,
149
  gen_kwargs={
150
+ "filepath": os.path.join(data_dir, lang + "_test.jsonl"),
 
151
  },
152
  ),
153
  datasets.SplitGenerator(
154
+ name=datasets.Split.VALIDATION,
155
  gen_kwargs={
156
+ "filepath": os.path.join(data_dir, lang + "_val.jsonl"),
 
157
  },
158
  ),
159
  ]
160
 
161
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
162
 
163
  if self.config.schema == "source":
164
+ with open(filepath, encoding="utf-8") as f:
165
+ for row in f:
166
+ data = json.loads(row)
167
  ex = {
168
+ "id": data["id"],
169
+ "url": data["url"],
170
+ "title": data["title"],
171
+ "text": data["text"],
172
+ "summary": data["summary"],
173
  }
174
+ yield data["id"], ex
175
 
176
+ elif self.config.schema == "seacrowd_t2t":
177
+ # the title is unused for this schema
178
+ with open(filepath, encoding="utf-8") as f:
179
+ for row in f:
180
+ data = json.loads(row)
181
  ex = {
182
+ "id": data["id"],
183
+ "text_1": data["text"],
184
+ "text_2": data["summary"],
185
+ "text_1_name": "text",
186
  "text_2_name": "summary"
187
  }
188
+ yield data["id"], ex
189
  else:
190
  raise ValueError(f"Invalid config: {self.config.name}")