Datasets:

Languages:
English
License:
gabrielaltay commited on
Commit
9554654
·
1 Parent(s): f9b32ec

upload hubscripts/bc7_litcovid_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bc7_litcovid.py +215 -0
bc7_litcovid.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Dict, List, Tuple
17
+
18
+ import datasets
19
+ import pandas as pd
20
+
21
+ from .bigbiohub import text_features
22
+ from .bigbiohub import BigBioConfig
23
+ from .bigbiohub import Tasks
24
+
25
+ _LANGUAGES = ['English']
26
+ _PUBMED = True
27
+ _LOCAL = False
28
+ _CITATION = """\
29
+ @inproceedings{chen2021overview,
30
+ title = {
31
+ Overview of the BioCreative VII LitCovid Track: multi-label topic
32
+ classification for COVID-19 literature annotation
33
+ },
34
+ author = {
35
+ Chen, Qingyu and Allot, Alexis and Leaman, Robert and Do{\\u{g}}an, Rezarta
36
+ Islamaj and Lu, Zhiyong
37
+ },
38
+ year = 2021,
39
+ booktitle = {Proceedings of the seventh BioCreative challenge evaluation workshop}
40
+ }
41
+
42
+ """
43
+
44
+ _DATASETNAME = "bc7_litcovid"
45
+ _DISPLAYNAME = "BC7-LitCovid"
46
+
47
+ _DESCRIPTION = """\
48
+ The training and development datasets contain the publicly-available \
49
+ text of over 30 thousand COVID-19-related articles and their metadata \
50
+ (e.g., title, abstract, journal). Articles in both datasets have been \
51
+ manually reviewed and articles annotated by in-house models.
52
+ """
53
+
54
+ _HOMEPAGE = "https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vii/track-5/"
55
+
56
+ _LICENSE = 'License information unavailable'
57
+
58
+ _BASE = "https://ftp.ncbi.nlm.nih.gov/pub/lu/LitCovid/biocreative/BC7-LitCovid-"
59
+
60
+ _URLS = {
61
+ _DATASETNAME: {
62
+ "train": _BASE + "Train.csv",
63
+ "validation": _BASE + "Dev.csv",
64
+ "test": _BASE + "Test-GS.csv",
65
+ },
66
+ }
67
+
68
+ _SUPPORTED_TASKS = [Tasks.TEXT_CLASSIFICATION]
69
+
70
+ _SOURCE_VERSION = "1.0.0"
71
+ _BIGBIO_VERSION = "1.0.0"
72
+
73
+ _CLASS_NAMES = [
74
+ "Epidemic Forecasting",
75
+ "Treatment",
76
+ "Prevention",
77
+ "Mechanism",
78
+ "Case Report",
79
+ "Transmission",
80
+ "Diagnosis",
81
+ ]
82
+
83
+
84
+ class BC7LitCovidDataset(datasets.GeneratorBasedBuilder):
85
+ """
86
+ Track 5 - LitCovid track Multi-label topic classification for
87
+ COVID-19 literature annotation
88
+ """
89
+
90
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
91
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
92
+
93
+ BUILDER_CONFIGS = [
94
+ BigBioConfig(
95
+ name="bc7_litcovid_source",
96
+ version=SOURCE_VERSION,
97
+ description="bc7_litcovid source schema",
98
+ schema="source",
99
+ subset_id="bc7_litcovid",
100
+ ),
101
+ BigBioConfig(
102
+ name="bc7_litcovid_bigbio_text",
103
+ version=BIGBIO_VERSION,
104
+ description="bc7_litcovid BigBio schema",
105
+ schema="bigbio_text",
106
+ subset_id="bc7_litcovid",
107
+ ),
108
+ ]
109
+
110
+ DEFAULT_CONFIG_NAME = "bc7_litcovid_source"
111
+
112
+ def _info(self) -> datasets.DatasetInfo:
113
+
114
+ if self.config.schema == "source":
115
+
116
+ features = datasets.Features(
117
+ {
118
+ "pmid": datasets.Value("string"),
119
+ "journal": datasets.Value("string"),
120
+ "title": datasets.Value("string"),
121
+ "abstract": datasets.Value("string"),
122
+ "keywords": datasets.Sequence(datasets.Value("string")),
123
+ "pub_type": datasets.Sequence(datasets.Value("string")),
124
+ "authors": datasets.Sequence(datasets.Value("string")),
125
+ "doi": datasets.Value("string"),
126
+ "labels": datasets.Sequence(
127
+ datasets.ClassLabel(names=_CLASS_NAMES)
128
+ ),
129
+ }
130
+ )
131
+
132
+ elif self.config.schema == "bigbio_text":
133
+ features = text_features
134
+
135
+ return datasets.DatasetInfo(
136
+ description=_DESCRIPTION,
137
+ features=features,
138
+ homepage=_HOMEPAGE,
139
+ license=str(_LICENSE),
140
+ citation=_CITATION,
141
+ )
142
+
143
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
144
+ """Returns SplitGenerators."""
145
+
146
+ # Download all the CSV
147
+ urls = _URLS[_DATASETNAME]
148
+ path_train = dl_manager.download(urls["train"])
149
+ path_validation = dl_manager.download(urls["validation"])
150
+ path_test = dl_manager.download(urls["test"])
151
+
152
+ return [
153
+ datasets.SplitGenerator(
154
+ name=datasets.Split.TRAIN,
155
+ gen_kwargs={
156
+ "filepath": path_train,
157
+ "split": "train",
158
+ },
159
+ ),
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TEST,
162
+ gen_kwargs={
163
+ "filepath": path_validation,
164
+ "split": "test",
165
+ },
166
+ ),
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.VALIDATION,
169
+ gen_kwargs={
170
+ "filepath": path_test,
171
+ "split": "dev",
172
+ },
173
+ ),
174
+ ]
175
+
176
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
177
+ """Yields examples as (key, example) tuples."""
178
+
179
+ idx = 0
180
+
181
+ # Load the CSV and convert it to the string format
182
+ df = pd.read_csv(filepath, sep=",").astype(str).replace({"nan": None})
183
+
184
+ for index, e in df.iterrows():
185
+
186
+ if self.config.schema == "source":
187
+
188
+ yield idx, {
189
+ "pmid": e["pmid"],
190
+ "journal": e["journal"],
191
+ "title": e["title"],
192
+ "abstract": e["abstract"],
193
+ "keywords": e["keywords"].split(";")
194
+ if e["keywords"] is not None
195
+ else [],
196
+ "pub_type": e["pub_type"].split(";")
197
+ if e["pub_type"] is not None
198
+ else [],
199
+ "authors": e["authors"].split(";")
200
+ if e["authors"] is not None
201
+ else [],
202
+ "doi": e["doi"],
203
+ "labels": e["label"].split(";"),
204
+ }
205
+
206
+ elif self.config.schema == "bigbio_text":
207
+
208
+ yield idx, {
209
+ "id": idx,
210
+ "document_id": e["pmid"],
211
+ "text": e["abstract"],
212
+ "labels": e["label"].split(";"),
213
+ }
214
+
215
+ idx += 1