albertvillanova HF staff commited on
Commit
f957799
·
verified ·
1 Parent(s): 52a3a0b

Add dataset loading script

Browse files
Files changed (1) hide show
  1. pmc_open_access.py +210 -0
pmc_open_access.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PMC Open Access Subset."""
16
+
17
+ import datetime
18
+
19
+ import pandas as pd
20
+
21
+ import datasets
22
+
23
+
24
+ # TODO: Add BibTeX citation
25
+ # Find for instance the citation on arxiv or on the dataset repo/website
26
+ _CITATION = """\
27
+ @InProceedings{huggingface:dataset,
28
+ title = {A great new dataset},
29
+ author={huggingface, Inc.
30
+ },
31
+ year={2020}
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ The PMC Open Access Subset includes more than 3.4 million journal articles and preprints that are made available under
37
+ license terms that allow reuse.
38
+
39
+ Not all articles in PMC are available for text mining and other reuse, many have copyright protection, however articles
40
+ in the PMC Open Access Subset are made available under Creative Commons or similar licenses that generally allow more
41
+ liberal redistribution and reuse than a traditional copyrighted work.
42
+
43
+ The PMC Open Access Subset is one part of the PMC Article Datasets
44
+ """
45
+
46
+ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/pmc/tools/openftlist/"
47
+
48
+ # TODO: Add the licence for the dataset here if you can find it
49
+ _LICENSE = ""
50
+
51
+ _URL = "https://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_bulk/{subset}/txt/"
52
+ _SUBSETS = {
53
+ "commercial": "oa_comm",
54
+ "non_commercial": "oa_noncomm",
55
+ "other": "oa_other",
56
+ }
57
+ _BASELINE_DATE = "2021-12-17"
58
+
59
+
60
+ class PmcOpenAccessConfig(datasets.BuilderConfig):
61
+ """BuilderConfig for the PMC Open Access Subset."""
62
+
63
+ def __init__(self, subsets=None, **kwargs):
64
+ """BuilderConfig for the PMC Open Access Subset.
65
+
66
+ Args:
67
+ subsets (:obj:`List[str]`): List of subsets/groups to load.
68
+ **kwargs: Keyword arguments forwarded to super.
69
+ """
70
+ subsets = [subsets] if isinstance(subsets, str) else subsets
71
+ super().__init__(
72
+ name="+".join(subsets), **kwargs,
73
+ )
74
+ self.subsets = subsets if subsets != ["all"] else list(_SUBSETS.keys())
75
+
76
+
77
+ class PmcOpenAccess(datasets.GeneratorBasedBuilder):
78
+ """PMC Open Access Subset."""
79
+
80
+ VERSION = datasets.Version("1.0.0")
81
+ BUILDER_CONFIG_CLASS = PmcOpenAccessConfig
82
+ BUILDER_CONFIGS = [PmcOpenAccessConfig(subsets="all")] + [
83
+ PmcOpenAccessConfig(subsets=subset) for subset in _SUBSETS
84
+ ]
85
+ DEFAULT_CONFIG_NAME = "all"
86
+
87
+ def _info(self):
88
+ return datasets.DatasetInfo(
89
+ description=_DESCRIPTION,
90
+ features=datasets.Features(
91
+ {
92
+ "text": datasets.Value("string"),
93
+ "pmid": datasets.Value("string"),
94
+ "accession_id": datasets.Value("string"),
95
+ "license": datasets.Value("string"),
96
+ "last_updated": datasets.Value("string"),
97
+ "retracted": datasets.Value("string"),
98
+ "citation": datasets.Value("string"),
99
+ }
100
+ ),
101
+ homepage=_HOMEPAGE,
102
+ license=_LICENSE,
103
+ citation=_CITATION,
104
+ )
105
+
106
+ def _split_generators(self, dl_manager):
107
+ for subset in self.config.subsets:
108
+ url = _URL.format(subset=_SUBSETS[subset])
109
+ basename = f"{_SUBSETS[subset]}_txt."
110
+ # Baselines
111
+ baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in range(9)]
112
+ # baseline_urls = {
113
+ # "baseline_file_lists": [f"{url}{basename}{baseline}.filelist.csv" for baseline in baselines],
114
+ # "baseline_archives": [f"{url}{basename}{baseline}.tar.gz" for baseline in baselines],
115
+ # }
116
+ # baseline_paths = dl_manager.download(baseline_urls)
117
+ baseline_file_lists = []
118
+ baseline_archives = []
119
+ for baseline in baselines:
120
+ baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv"
121
+ try:
122
+ baseline_file_list = dl_manager.download(baseline_file_list_url)
123
+ except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
124
+ continue
125
+ baseline_archive_url = f"{url}{basename}{baseline}.tar.gz"
126
+ try:
127
+ baseline_archive = dl_manager.download(baseline_archive_url)
128
+ except FileNotFoundError:
129
+ continue
130
+ baseline_file_lists.append(baseline_file_list)
131
+ baseline_archives.append(baseline_archive)
132
+ # Incremental
133
+ date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
134
+ incremental_dates = [
135
+ (datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat()
136
+ for i in range(date_delta.days)
137
+ ]
138
+ incrementals = [f"incr.{date}" for date in incremental_dates]
139
+ incremental_urls = {
140
+ "incremental_file_lists": [
141
+ f"{url}{basename}{incremental}.filelist.csv" for incremental in incrementals
142
+ ],
143
+ "incremental_archives": [f"{url}{basename}{incremental}.tar.gz" for incremental in incrementals],
144
+ }
145
+ incremental_paths = dl_manager.download(incremental_urls)
146
+ return [
147
+ datasets.SplitGenerator(
148
+ name=datasets.Split.TRAIN,
149
+ gen_kwargs={
150
+ "baseline_file_lists": baseline_file_lists,
151
+ "baseline_archives": [dl_manager.iter_archive(archive) for archive in baseline_archives],
152
+ "incremental_file_lists": incremental_paths["incremental_file_lists"],
153
+ "incremental_archives": [
154
+ dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"]
155
+ ],
156
+ },
157
+ ),
158
+ ]
159
+
160
+ def _generate_examples(self, baseline_file_lists, baseline_archives, incremental_file_lists, incremental_archives):
161
+ key = 0
162
+ # Baselines
163
+ for baseline_file_list, baseline_archive in zip(baseline_file_lists, baseline_archives):
164
+ try:
165
+ baselines = pd.read_csv(baseline_file_list, index_col="Article File").to_dict(orient="index")
166
+ for path, file in baseline_archive:
167
+ data = baselines.pop(path)
168
+ content = file.read()
169
+ try:
170
+ text = content.decode("utf-8")
171
+ except UnicodeDecodeError as e:
172
+ text = content.decode("latin-1")
173
+ data = {
174
+ "text": text,
175
+ "pmid": data["PMID"],
176
+ "accession_id": data["AccessionID"],
177
+ "license": data["License"],
178
+ "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"],
179
+ "retracted": data["Retracted"],
180
+ "citation": data["Article Citation"],
181
+ }
182
+ yield key, data
183
+ key += 1
184
+ except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
185
+ continue
186
+ # Incrementals
187
+ if incremental_file_lists:
188
+ for incremental_file_list, incremental_archive in zip(incremental_file_lists, incremental_archives):
189
+ import pdb
190
+
191
+ pdb.set_trace()
192
+ incrementals = pd.read_csv(incremental_file_list, index_col="Article File").to_dict(orient="index")
193
+ for path, file in incremental_archive:
194
+ data = incrementals.pop(path)
195
+ content = file.read()
196
+ try:
197
+ text = content.decode("utf-8")
198
+ except UnicodeDecodeError as e:
199
+ text = content.decode("latin-1")
200
+ data = {
201
+ "text": text,
202
+ "pmid": data["PMID"],
203
+ "accession_id": data["AccessionID"],
204
+ "license": data["License"],
205
+ "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"],
206
+ "retracted": data["Retracted"],
207
+ "citation": data["Article Citation"],
208
+ }
209
+ yield key, data
210
+ key += 1