Datasets:

Languages:
French
License:
gabrielaltay commited on
Commit
e127615
·
1 Parent(s): 3a5603f

upload hubscripts/cas_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. cas.py +261 -0
cas.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import os
16
+
17
+ import datasets
18
+ import numpy as np
19
+ import pandas as pd
20
+
21
+ from .bigbiohub import text_features
22
+ from .bigbiohub import BigBioConfig
23
+ from .bigbiohub import Tasks
24
+
25
+ _LANGUAGES = ['French']
26
+ _PUBMED = False
27
+ _LOCAL = True
28
+ _CITATION = """\
29
+ @inproceedings{grabar-etal-2018-cas,
30
+ title = {{CAS}: {F}rench Corpus with Clinical Cases},
31
+ author = {Grabar, Natalia and Claveau, Vincent and Dalloux, Cl{\'e}ment},
32
+ year = 2018,
33
+ month = oct,
34
+ booktitle = {
35
+ Proceedings of the Ninth International Workshop on Health Text Mining and
36
+ Information Analysis
37
+ },
38
+ publisher = {Association for Computational Linguistics},
39
+ address = {Brussels, Belgium},
40
+ pages = {122--128},
41
+ doi = {10.18653/v1/W18-5614},
42
+ url = {https://aclanthology.org/W18-5614},
43
+ abstract = {
44
+ Textual corpora are extremely important for various NLP applications as
45
+ they provide information necessary for creating, setting and testing these
46
+ applications and the corresponding tools. They are also crucial for
47
+ designing reliable methods and reproducible results. Yet, in some areas,
48
+ such as the medical area, due to confidentiality or to ethical reasons, it
49
+ is complicated and even impossible to access textual data representative of
50
+ those produced in these areas. We propose the CAS corpus built with
51
+ clinical cases, such as they are reported in the published scientific
52
+ literature in French. We describe this corpus, currently containing over
53
+ 397,000 word occurrences, and the existing linguistic and semantic
54
+ annotations.
55
+ }
56
+ }"""
57
+
58
+ _DATASETNAME = "cas"
59
+ _DISPLAYNAME = "CAS"
60
+
61
+ _DESCRIPTION = """\
62
+ We manually annotated two corpora from the biomedical field. The ESSAI corpus \
63
+ contains clinical trial protocols in French. They were mainly obtained from the \
64
+ National Cancer Institute The typical protocol consists of two parts: the \
65
+ summary of the trial, which indicates the purpose of the trial and the methods \
66
+ applied; and a detailed description of the trial with the inclusion and \
67
+ exclusion criteria. The CAS corpus contains clinical cases published in \
68
+ scientific literature and training material. They are published in different \
69
+ journals from French-speaking countries (France, Belgium, Switzerland, Canada, \
70
+ African countries, tropical countries) and are related to various medical \
71
+ specialties (cardiology, urology, oncology, obstetrics, pulmonology, \
72
+ gastro-enterology). The purpose of clinical cases is to describe clinical \
73
+ situations of patients. Hence, their content is close to the content of clinical \
74
+ narratives (description of diagnoses, treatments or procedures, evolution, \
75
+ family history, expected audience, etc.). In clinical cases, the negation is \
76
+ frequently used for describing the patient signs, symptoms, and diagnosis. \
77
+ Speculation is present as well but less frequently.
78
+
79
+ This version only contain the annotated CAS corpus
80
+ """
81
+
82
+ _HOMEPAGE = "https://clementdalloux.fr/?page_id=28"
83
+
84
+ _LICENSE = 'Data User Agreement'
85
+
86
+ _URLS = {
87
+ "cas_source": "",
88
+ "cas_bigbio_text": "",
89
+ "cas_bigbio_kb": "",
90
+ }
91
+
92
+ _SOURCE_VERSION = "1.0.0"
93
+ _BIGBIO_VERSION = "1.0.0"
94
+
95
+ _SUPPORTED_TASKS = [Tasks.TEXT_CLASSIFICATION]
96
+
97
+
98
+ class CAS(datasets.GeneratorBasedBuilder):
99
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
100
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
101
+
102
+ DEFAULT_CONFIG_NAME = "cas_source"
103
+
104
+ BUILDER_CONFIGS = [
105
+ BigBioConfig(
106
+ name="cas_source",
107
+ version=SOURCE_VERSION,
108
+ description="CAS source schema",
109
+ schema="source",
110
+ subset_id="cas",
111
+ ),
112
+ BigBioConfig(
113
+ name="cas_bigbio_text",
114
+ version=BIGBIO_VERSION,
115
+ description="CAS simplified BigBio schema for negation/speculation classification",
116
+ schema="bigbio_text",
117
+ subset_id="cas",
118
+ ),
119
+ BigBioConfig(
120
+ name="cas_bigbio_kb",
121
+ version=BIGBIO_VERSION,
122
+ description="CAS simplified BigBio schema for part-of-speech-tagging",
123
+ schema="bigbio_kb",
124
+ subset_id="cas",
125
+ ),
126
+ ]
127
+
128
+ def _info(self):
129
+ if self.config.schema == "source":
130
+ features = datasets.Features(
131
+ {
132
+ "id": datasets.Value("string"),
133
+ "document_id": datasets.Value("string"),
134
+ "text": [datasets.Value("string")],
135
+ "lemmas": [datasets.Value("string")],
136
+ "POS_tags": [datasets.Value("string")],
137
+ "labels": [datasets.Value("string")],
138
+ }
139
+ )
140
+ elif self.config.schema == "bigbio_text":
141
+ features = text_features
142
+ elif self.config.schema == "bigbio_kb":
143
+ features = kb_features
144
+
145
+ return datasets.DatasetInfo(
146
+ description=_DESCRIPTION,
147
+ features=features,
148
+ supervised_keys=None,
149
+ homepage=_HOMEPAGE,
150
+ license=str(_LICENSE),
151
+ citation=_CITATION,
152
+ )
153
+
154
+ def _split_generators(self, dl_manager):
155
+ if self.config.data_dir is None:
156
+ raise ValueError(
157
+ "This is a local dataset. Please pass the data_dir kwarg to load_dataset."
158
+ )
159
+ else:
160
+ data_dir = self.config.data_dir
161
+ return [
162
+ datasets.SplitGenerator(
163
+ name=datasets.Split.TRAIN,
164
+ gen_kwargs={"datadir": data_dir},
165
+ ),
166
+ ]
167
+
168
+ def _generate_examples(self, datadir):
169
+ key = 0
170
+ for file in ["CAS_neg.txt", "CAS_spec.txt"]:
171
+ filepath = os.path.join(datadir, file)
172
+ label = "negation" if "neg" in file else "speculation"
173
+ id_docs = []
174
+ id_words = []
175
+ words = []
176
+ lemmas = []
177
+ POS_tags = []
178
+
179
+ with open(filepath) as f:
180
+ for line in f.readlines():
181
+ line_content = line.split("\t")
182
+ if len(line_content) > 1:
183
+ id_docs.append(line_content[0])
184
+ id_words.append(line_content[1])
185
+ words.append(line_content[2])
186
+ lemmas.append(line_content[3])
187
+ POS_tags.append(line_content[4])
188
+
189
+ dic = {
190
+ "id_docs": np.array(list(map(int, id_docs))),
191
+ "id_words": id_words,
192
+ "words": words,
193
+ "lemmas": lemmas,
194
+ "POS_tags": POS_tags,
195
+ }
196
+ if self.config.schema == "source":
197
+ for doc_id in set(dic["id_docs"]):
198
+ idces = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
199
+ text = [dic["words"][id] for id in idces]
200
+ text_lemmas = [dic["lemmas"][id] for id in idces]
201
+ POS_tags_ = [dic["POS_tags"][id] for id in idces]
202
+ yield key, {
203
+ "id": key,
204
+ "document_id": doc_id,
205
+ "text": text,
206
+ "lemmas": text_lemmas,
207
+ "POS_tags": POS_tags_,
208
+ "labels": [label],
209
+ }
210
+ key += 1
211
+ elif self.config.schema == "bigbio_text":
212
+ for doc_id in set(dic["id_docs"]):
213
+ idces = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
214
+ text = " ".join([dic["words"][id] for id in idces])
215
+ yield key, {
216
+ "id": key,
217
+ "document_id": doc_id,
218
+ "text": text,
219
+ "labels": [label],
220
+ }
221
+ key += 1
222
+ elif self.config.schema == "bigbio_kb":
223
+ for doc_id in set(dic["id_docs"]):
224
+ idces = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
225
+ text = [dic["words"][id] for id in idces]
226
+ POS_tags_ = [dic["POS_tags"][id] for id in idces]
227
+
228
+ data = {
229
+ "id": str(key),
230
+ "document_id": doc_id,
231
+ "passages": [],
232
+ "entities": [],
233
+ "relations": [],
234
+ "events": [],
235
+ "coreferences": [],
236
+ }
237
+ key += 1
238
+
239
+ data["passages"] = [
240
+ {
241
+ "id": str(key + i),
242
+ "type": "sentence",
243
+ "text": [text[i]],
244
+ "offsets": [[i, i + 1]],
245
+ }
246
+ for i in range(len(text))
247
+ ]
248
+ key += len(text)
249
+
250
+ for i in range(len(text)):
251
+ entity = {
252
+ "id": key,
253
+ "type": "POS_tag",
254
+ "text": [POS_tags_[i]],
255
+ "offsets": [[i, i + 1]],
256
+ "normalized": [],
257
+ }
258
+ data["entities"].append(entity)
259
+ key += 1
260
+
261
+ yield key, data