davanstrien HF staff commited on
Commit
8341efb
·
verified ·
1 Parent(s): 46736a7

switch to code examples for loading different splits

Browse files
Files changed (1) hide show
  1. europeana_newspapers.py +0 -173
europeana_newspapers.py DELETED
@@ -1,173 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """TODO"""
16
-
17
- from collections import defaultdict
18
- from pathlib import Path
19
-
20
- import datasets
21
- import pyarrow as pa
22
- import pyarrow.parquet as pq
23
- from datasets import Sequence, Value
24
- from datasets.config import PYARROW_VERSION
25
- from datasets.utils.logging import get_logger
26
- from huggingface_hub import hf_api
27
-
28
- logger = get_logger(__name__)
29
-
30
- if PYARROW_VERSION.major <= 6:
31
- msg = f"pyarrow version >= 7.0.0 required for this loading script, you have {PYARROW_VERSION}"
32
- logger.warning(msg)
33
- raise RuntimeError(msg)
34
-
35
- _DESCRIPTION = "TODO"
36
-
37
- _HOMEPAGE = "TODO"
38
-
39
-
40
- api = hf_api.HfApi()
41
- files = api.list_repo_files("biglam/europeana_newspapers", repo_type="dataset")
42
- data = defaultdict(dict)
43
- parquet_files = (f for f in files if f.endswith(".parquet"))
44
- for file in parquet_files:
45
- lang, decade = Path(file).stem.split("-")
46
- data[lang].update({decade: file})
47
- _DATA = dict(data)
48
-
49
- _LANG_CONFIGS = set(_DATA.keys())
50
-
51
-
52
- class EuropeanaNewspapersConfig(datasets.BuilderConfig):
53
- """BuilderConfig for the Europeana Newspapers dataset."""
54
-
55
- def __init__(
56
- self, *args, languages=None, min_decade=None, max_decade=None, **kwargs
57
- ):
58
- """BuilderConfig for the Europeana Newspapers dataset.
59
-
60
- Args:
61
- languages (:obj:`List[str]`): List of languages to load.
62
- **kwargs: keyword arguments forwarded to super.
63
- """
64
-
65
- super().__init__(
66
- *args,
67
- name="+".join(languages),
68
- **kwargs,
69
- )
70
- for lang in languages:
71
- if lang not in _LANG_CONFIGS:
72
- raise ValueError(
73
- f"{lang} not a valid language key for this dataset, valid keys are {_LANG_CONFIGS}"
74
- )
75
- self.languages = languages
76
- self.min_decade = min_decade
77
- self.max_decade = max_decade
78
-
79
-
80
- class EuropeanaNewspapers(datasets.GeneratorBasedBuilder):
81
- """TODO."""
82
-
83
- VERSION = datasets.Version("1.0.0")
84
-
85
- BUILDER_CONFIG_CLASS = EuropeanaNewspapersConfig
86
- BUILDER_CONFIGS = [
87
- EuropeanaNewspapersConfig(languages=[lang]) for lang in _LANG_CONFIGS
88
- ]
89
-
90
- def _info(self):
91
- return datasets.DatasetInfo(
92
- description=_DESCRIPTION,
93
- features=datasets.Features(
94
- {
95
- "text": Value(dtype="string"),
96
- "mean_ocr": Value(dtype="float64"),
97
- "std_ocr": Value(dtype="float64"),
98
- "bounding_boxes": Sequence(
99
- feature=Sequence(
100
- feature=Value(dtype="float64", id=None),
101
- length=-1,
102
- ),
103
- ),
104
- "title": Value(dtype="string"),
105
- "date": Value(dtype="string"),
106
- "language": Sequence(
107
- feature=Value(dtype="string", id=None),
108
- ),
109
- "item_iiif_url": Value(
110
- dtype="string",
111
- ),
112
- # "multi_language": Value(dtype="bool"),
113
- "issue_uri": Value(dtype="string"),
114
- "id": Value(dtype="string"),
115
- }
116
- ),
117
- supervised_keys=None,
118
- homepage=_HOMEPAGE,
119
- license="Multiple: see the 'license' field of each sample.",
120
- )
121
-
122
- def _split_generators(self, dl_manager):
123
- # parquet_files = list(Path(".").rglob("*.parquet"))
124
- languages = self.config.languages
125
- min_decade = self.config.min_decade
126
- max_decade = self.config.max_decade
127
- data_files = []
128
- for language in languages:
129
- for decade, file in _DATA[language].items():
130
- decade = int(decade)
131
- if max_decade is None and min_decade is None:
132
- data_files.append(file)
133
- if (
134
- max_decade is not None
135
- and min_decade is not None
136
- and min_decade <= decade <= max_decade
137
- ):
138
- data_files.append(file)
139
- if (
140
- min_decade is not None
141
- and max_decade is None
142
- and decade >= min_decade
143
- ):
144
- data_files.append(file)
145
- if (
146
- min_decade is None
147
- and max_decade is not None
148
- and decade <= max_decade
149
- ):
150
- data_files.append(file)
151
-
152
- files = dl_manager.download(data_files)
153
- return [
154
- datasets.SplitGenerator(
155
- name=datasets.Split.TRAIN,
156
- gen_kwargs={
157
- "files": files,
158
- },
159
- ),
160
- ]
161
-
162
- def _generate_examples(self, files):
163
- key = 0
164
- for file in files:
165
- with open(file, "rb") as f:
166
- parquet_file = pq.ParquetFile(f)
167
- for record_batch in parquet_file.iter_batches(batch_size=10_000):
168
- pa_table = pa.Table.from_batches([record_batch])
169
- rows = pa_table.to_pylist()
170
- for row in rows:
171
- row.pop("multi_language")
172
- yield key, row
173
- key += 1