holylovenia
commited on
Commit
·
892b009
1
Parent(s):
207715a
Upload kopi_cc.py with huggingface_hub
Browse files- kopi_cc.py +220 -0
kopi_cc.py
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""
|
17 |
+
KoPI-CC corpus
|
18 |
+
|
19 |
+
[nusantara_schema_name] = ssp
|
20 |
+
"""
|
21 |
+
|
22 |
+
import gzip
|
23 |
+
import json
|
24 |
+
from typing import List
|
25 |
+
|
26 |
+
import datasets
|
27 |
+
import zstandard as zstd
|
28 |
+
|
29 |
+
from nusacrowd.utils import schemas
|
30 |
+
from nusacrowd.utils.configs import NusantaraConfig
|
31 |
+
from nusacrowd.utils.constants import (DEFAULT_NUSANTARA_VIEW_NAME,
|
32 |
+
DEFAULT_SOURCE_VIEW_NAME, Tasks)
|
33 |
+
|
34 |
+
_DATASETNAME = "kopi_cc"
|
35 |
+
_LANGUAGES = ["ind"]
|
36 |
+
_LOCAL = False
|
37 |
+
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
38 |
+
_UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
|
39 |
+
_URL = "https://commoncrawl.org/"
|
40 |
+
_CITATION = """\
|
41 |
+
@ARTICLE{2022arXiv220106642A,
|
42 |
+
author = {{Abadji}, Julien and {Ortiz Suarez}, Pedro and {Romary}, Laurent and {Sagot}, Benoit},
|
43 |
+
title = "{Towards a Cleaner Document-Oriented Multilingual Crawled Corpus}",
|
44 |
+
journal = {arXiv e-prints},
|
45 |
+
keywords = {Computer Science - Computation and Language},
|
46 |
+
year = 2022,
|
47 |
+
month = jan,
|
48 |
+
eid = {arXiv:2201.06642},
|
49 |
+
pages = {arXiv:2201.06642},
|
50 |
+
archivePrefix = {arXiv},
|
51 |
+
eprint = {2201.06642},
|
52 |
+
primaryClass = {cs.CL},
|
53 |
+
adsurl = {https://ui.adsabs.harvard.edu/abs/2022arXiv220106642A},
|
54 |
+
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
|
55 |
+
}
|
56 |
+
@inproceedings{AbadjiOrtizSuarezRomaryetal.2021,
|
57 |
+
author = {Julien Abadji and Pedro Javier Ortiz Su{\'a}rez and Laurent Romary and Benoit Sagot},
|
58 |
+
title = {Ungoliant: An optimized pipeline for the generation of a very large-scale multilingual web corpus},
|
59 |
+
series = {Proceedings of the Workshop on Challenges in the Management of Large Corpora (CMLC-9) 2021. Limerick, 12 July 2021 (Online-Event)},
|
60 |
+
editor = {Harald L{\"u}ngen and Marc Kupietz and Piotr Bański and Adrien Barbaresi and Simon Clematide and Ines Pisetta},
|
61 |
+
publisher = {Leibniz-Institut f{\"u}r Deutsche Sprache},
|
62 |
+
address = {Mannheim},
|
63 |
+
doi = {10.14618/ids-pub-10468},
|
64 |
+
url = {https://nbn-resolving.org/urn:nbn:de:bsz:mh39-104688},
|
65 |
+
pages = {1 -- 9},
|
66 |
+
year = {2021},
|
67 |
+
abstract = {Since the introduction of large language models in Natural Language Processing, large raw corpora have played a crucial role in Computational Linguistics.},
|
68 |
+
language = {en}
|
69 |
+
}
|
70 |
+
|
71 |
+
"""
|
72 |
+
|
73 |
+
_DESCRIPTION = """\
|
74 |
+
KoPI-CC (Korpus Perayapan Indonesia)-CC is Indonesian Only Extract from Common Crawl snapshots ,each snapshots get extracted using ungoliant and get extra "filtering" using deduplication technique
|
75 |
+
|
76 |
+
"""
|
77 |
+
|
78 |
+
_HOMEPAGE = "https://huggingface.co/datasets/munggok/KoPI-CC"
|
79 |
+
|
80 |
+
_LICENSE = "CC0"
|
81 |
+
|
82 |
+
_URLS = {
|
83 |
+
"raw": "https://huggingface.co/datasets/munggok/KoPI-CC/resolve/main/{snapshot}/raw/id_meta_{index}.jsonl.zst",
|
84 |
+
"dedup": "https://huggingface.co/datasets/munggok/KoPI-CC/resolve/main/{snapshot}/dedup/oscar-{index:012d}.json.gz",
|
85 |
+
"neardup": "https://huggingface.co/datasets/munggok/KoPI-CC/resolve/main/{snapshot}/neardup/oscar-neardup-{index:012d}.json.gz",
|
86 |
+
"neardup_clean": "https://huggingface.co/datasets/munggok/KoPI-CC/resolve/main/{snapshot}/neardup_clean/cleaned_oscar-neardup-{index:012d}.json.gz",
|
87 |
+
}
|
88 |
+
|
89 |
+
|
90 |
+
_N_SHARDS_PER_SNAPSHOT = {
|
91 |
+
"2021_10": {"dedup": 132, "neardup": 120, "neardup_clean": 120},
|
92 |
+
"2021_17": {"raw": 31, "dedup": 47, "neardup": 41, "neardup_clean": 41},
|
93 |
+
"2021_21": {"raw": 63, "dedup": 37, "neardup": 33, "neardup_clean": 33},
|
94 |
+
"2021_25": {"raw": 31, "dedup": 32, "neardup": 28, "neardup_clean": 28},
|
95 |
+
"2021_31": {"raw": 35, "dedup": 47, "neardup": 42, "neardup_clean": 42},
|
96 |
+
"2021_39": {"raw": 35, "dedup": 44, "neardup": 38, "neardup_clean": 38},
|
97 |
+
"2021_43": {"raw": 35, "dedup": 44, "neardup": 39, "neardup_clean": 39},
|
98 |
+
"2021_49": {"dedup": 31, "neardup": 28, "neardup_clean": 28},
|
99 |
+
"2022_05": {"raw": 40, "dedup": 18, "neardup": 18, "neardup_clean": 35},
|
100 |
+
"2022_21": {"raw": 40, "dedup": 42, "neardup": 37, "neardup_clean": 37},
|
101 |
+
"2022_27": {"raw": 79, "dedup": 38, "neardup": 33, "neardup_clean": 33},
|
102 |
+
}
|
103 |
+
|
104 |
+
_SNAP_CONFIG = []
|
105 |
+
for m in list(_N_SHARDS_PER_SNAPSHOT.keys()):
|
106 |
+
ka = list(_N_SHARDS_PER_SNAPSHOT[m].keys())
|
107 |
+
conf = [m + "-" + a for a in ka]
|
108 |
+
_SNAP_CONFIG.extend(conf)
|
109 |
+
_SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
|
110 |
+
|
111 |
+
_ALL_CONFIG = ["all-raw", "all-dedup", "all-neardup", "all-neardup_clean"] + _SNAP_CONFIG
|
112 |
+
|
113 |
+
_SOURCE_VERSION = "2018.12.01"
|
114 |
+
|
115 |
+
_NUSANTARA_VERSION = "1.0.0"
|
116 |
+
|
117 |
+
|
118 |
+
def nusantara_config_constructor(snapshot, schema, version):
|
119 |
+
"""Construct NusantaraConfig"""
|
120 |
+
if schema != "source" and schema != "nusantara_ssp":
|
121 |
+
raise ValueError(f"Invalid schema: {schema}")
|
122 |
+
|
123 |
+
if snapshot == "":
|
124 |
+
raise ValueError(f"Snapshot is required. Choose one of these Snapshot: {_ALL_CONFIG}.")
|
125 |
+
elif snapshot in _SNAP_CONFIG + _ALL_CONFIG:
|
126 |
+
return NusantaraConfig(
|
127 |
+
name=f"{_DATASETNAME}_{snapshot}_{schema}",
|
128 |
+
version=datasets.Version(version),
|
129 |
+
description=f"KoPI-CC with {schema} schema for {snapshot}",
|
130 |
+
schema=schema,
|
131 |
+
subset_id="kopi_cc",
|
132 |
+
)
|
133 |
+
else:
|
134 |
+
raise ValueError(f"Invalid language: {snapshot}. Choose one of these snapshots: {_ALL_CONFIG}.")
|
135 |
+
|
136 |
+
|
137 |
+
class KoPICC(datasets.GeneratorBasedBuilder):
|
138 |
+
|
139 |
+
DEFAULT_CONFIG_NAME = "2021_17_dedup"
|
140 |
+
|
141 |
+
BUILDER_CONFIGS = [nusantara_config_constructor(sn, "source", _SOURCE_VERSION) for sn in _ALL_CONFIG] + [nusantara_config_constructor(sn, "nusantara_ssp", _NUSANTARA_VERSION) for sn in _ALL_CONFIG]
|
142 |
+
|
143 |
+
def _info(self):
|
144 |
+
if self.config.schema == "source":
|
145 |
+
features = datasets.Features(
|
146 |
+
{
|
147 |
+
"text": datasets.Value("string"),
|
148 |
+
"timestamp": datasets.Value("string"),
|
149 |
+
"url": datasets.Value("string"),
|
150 |
+
"meta": datasets.Value("string"),
|
151 |
+
}
|
152 |
+
)
|
153 |
+
elif self.config.schema == "nusantara_ssp":
|
154 |
+
features = schemas.self_supervised_pretraining.features
|
155 |
+
|
156 |
+
return datasets.DatasetInfo(
|
157 |
+
description=_DESCRIPTION,
|
158 |
+
features=features,
|
159 |
+
homepage=_HOMEPAGE,
|
160 |
+
license=_LICENSE,
|
161 |
+
citation=_CITATION,
|
162 |
+
)
|
163 |
+
|
164 |
+
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
|
165 |
+
"""Returns SplitGenerators."""
|
166 |
+
name = self.config.name.replace("_" + self.config.schema, "")
|
167 |
+
name = name.replace(_DATASETNAME + "_", "")
|
168 |
+
split_name = name.split("-")
|
169 |
+
if split_name[0] == "all":
|
170 |
+
urls = []
|
171 |
+
keys = list(_N_SHARDS_PER_SNAPSHOT.keys())
|
172 |
+
idx = 0
|
173 |
+
if split_name[1] == "raw":
|
174 |
+
idx = 1
|
175 |
+
keys = [ur for ur in list(_N_SHARDS_PER_SNAPSHOT.keys()) if _N_SHARDS_PER_SNAPSHOT[ur].get("raw") is not None]
|
176 |
+
for m in keys:
|
177 |
+
urls.extend([_URLS[split_name[1]].format(snapshot=m, index=k + idx) for k in range(_N_SHARDS_PER_SNAPSHOT[m].get(split_name[1]))])
|
178 |
+
else:
|
179 |
+
urls = [_URLS[split_name[1]].format(snapshot=split_name[0], index=k + 1) for k in range(_N_SHARDS_PER_SNAPSHOT[split_name[0]][split_name[1]])]
|
180 |
+
path = dl_manager.download(urls)
|
181 |
+
|
182 |
+
return [
|
183 |
+
datasets.SplitGenerator(
|
184 |
+
name=datasets.Split.TRAIN,
|
185 |
+
gen_kwargs={"filepaths": path, "split": "train", "type": split_name[1]},
|
186 |
+
),
|
187 |
+
]
|
188 |
+
|
189 |
+
def _generate_examples(self, filepaths, split, type):
|
190 |
+
"""This function returns the examples in the raw (text) form by iterating on all the files."""
|
191 |
+
id_ = 0
|
192 |
+
for filepath in filepaths:
|
193 |
+
if type == "raw":
|
194 |
+
with zstd.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
|
195 |
+
for line in f:
|
196 |
+
if line:
|
197 |
+
example = json.loads(line)
|
198 |
+
meta = dict()
|
199 |
+
meta["warc_headers"] = example["warc_headers"]
|
200 |
+
meta["warc_headers"]["warc-identified-content-language"] = example["warc_headers"].get("warc-identified-content-language")
|
201 |
+
meta["identification"] = example["metadata"]["identification"]
|
202 |
+
meta["annotations"] = example["metadata"]["annotation"]
|
203 |
+
meta["line_identifications"] = example["metadata"]["sentence_identifications"]
|
204 |
+
if self.config.schema == "nusantara_ssp":
|
205 |
+
yield id_, {"id": str(id_), "text": example["content"]}
|
206 |
+
id_ += 1
|
207 |
+
else:
|
208 |
+
yield id_, {"text": example["content"], "url": example["warc_headers"]["warc-target-uri"], "timestamp": example["warc_headers"]["warc-date"], "meta": json.dumps(meta)}
|
209 |
+
id_ += 1
|
210 |
+
else:
|
211 |
+
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
|
212 |
+
for line in f:
|
213 |
+
if line:
|
214 |
+
example = json.loads(line)
|
215 |
+
if self.config.schema == "nusantara_ssp":
|
216 |
+
yield id_, {"id": str(id_), "text": example["text"]}
|
217 |
+
id_ += 1
|
218 |
+
else:
|
219 |
+
yield id_, {"text": example["text"], "url": example["url"], "timestamp": example["timestamp"], "meta": example["meta"]}
|
220 |
+
id_ += 1
|