thbndi commited on
Commit
8b8ed7d
·
1 Parent(s): 8b9e3ab

Create Mimic4Dataset.py

Browse files
Files changed (1) hide show
  1. Mimic4Dataset.py +145 -0
Mimic4Dataset.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+ import pandas as pd
5
+ import datasets
6
+
7
+ _DESCRIPTION = """\
8
+ Dataset for mimic4 data, by default for the Mortality task.
9
+ Available tasks are: Mortality, Length of Stay, Readmission, Phenotype.
10
+ The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main'
11
+ #TODO ADD DESCRIPTION COHORTS
12
+ """
13
+
14
+ _HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset"
15
+ _CITATION = "https://proceedings.mlr.press/v193/gupta22a.html"
16
+
17
+ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
18
+ VERSION = datasets.Version("1.0.0")
19
+
20
+ BUILDER_CONFIGS = [
21
+ datasets.BuilderConfig(
22
+ name="Phenotype",
23
+ version=VERSION,
24
+ data_dir=os.path.abspath("./data/csv/Phenotype"),
25
+ description="Dataset for mimic4 Phenotype task",
26
+ ),
27
+ datasets.BuilderConfig(
28
+ name="Readmission",
29
+ version=VERSION,
30
+ data_dir=os.path.abspath("./data/csv/Readmission"),
31
+ description="Dataset for mimic4 Readmission task",
32
+ ),
33
+ datasets.BuilderConfig(
34
+ name="Length of Stay",
35
+ version=VERSION,
36
+ data_dir=os.path.abspath("./data/csv/Lenght_of_Stay"),
37
+ description="Dataset for mimic4 Length of Stay task",
38
+ ),
39
+ datasets.BuilderConfig(
40
+ name="Mortality",
41
+ version=VERSION,
42
+ data_dir=os.path.abspath("./data/csv/Mortality"),
43
+ description="Dataset for mimic4 Mortality task",
44
+ ),
45
+ ]
46
+
47
+ DEFAULT_CONFIG_NAME = "Mortality"
48
+
49
+ def _info(self):
50
+
51
+
52
+ features = datasets.Features(
53
+ {
54
+ "gender": datasets.Value("string"),
55
+ "ethnicity": datasets.Value("string"),
56
+ "insurance": datasets.Value("string"),
57
+ "age": datasets.Value("int32"),
58
+ "COND": datasets.Sequence(datasets.Value("int32"), length=None),
59
+ "MEDS": datasets.Sequence(datasets.Value("int32"), length=None),
60
+ "PROC": datasets.Sequence(datasets.Value("int32"), length=None),
61
+ "CHART": datasets.Sequence(datasets.Value("int32"), length=None),
62
+ "OUT": datasets.Sequence(datasets.Value("int32"), length=None),
63
+ "label": datasets.ClassLabel(names=["0", "1"]),
64
+ }
65
+ )
66
+ return datasets.DatasetInfo(
67
+ description=_DESCRIPTION,
68
+ features=features,
69
+ homepage=_HOMEPAGE,
70
+ citation=_CITATION,
71
+ )
72
+
73
+ def _split_generators(self, dl_manager):
74
+ data_dir = self.config.data_dir
75
+
76
+ # Collect file paths for all CSV files in the subfolders
77
+ train_files = []
78
+ for split_name in os.listdir(data_dir):
79
+ split_dir = os.path.join(data_dir, split_name)
80
+ if os.path.isdir(split_dir):
81
+ for file_name in os.listdir(split_dir):
82
+ if file_name.endswith(".csv"):
83
+ file_path = os.path.join(split_dir, file_name)
84
+ train_files.append(file_path)
85
+
86
+ # Return a single SplitGenerator for the train split
87
+ return [
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.TRAIN,
90
+ gen_kwargs={
91
+ "filepaths": train_files,
92
+ "split": datasets.Split.TRAIN,
93
+ },
94
+ )
95
+ ]
96
+
97
+
98
+ def _generate_examples(self, filepaths, split):
99
+ #each 3 successive files are the same admission (demographic, static, dynamic)
100
+ labels = pd.read_csv("./data/csv/"+self.config.name +"labels.csv")
101
+ for i in range(0, len(filepaths), 3):
102
+ file1, file2, file3 = filepaths[i:i+3]
103
+ static_file = file1 if "static.csv" in file1 else file2 if "static.csv" in file2 else file3
104
+ demographic_file = file1 if "demo.csv" in file1 else file2 if "demo.csv" in file2 else file3
105
+ dynamic_file = file1 if "dynamic.csv" in file1 else file2 if "dynamic.csv" in file2 else file3
106
+
107
+ #dynamic
108
+ dyn = pd.read_csv(dynamic_file, header=[0, 1])
109
+ meds = dyn['MEDS']
110
+ proc = dyn['PROC']
111
+ chart = dyn['CHART']
112
+ out = dyn['OUT']
113
+
114
+ #static
115
+ stat = pd.read_csv(static_file, header=[0, 1])
116
+ stat = stat['COND']
117
+
118
+ #demo
119
+ demo = pd.read_csv(demographic_file, header=0)
120
+
121
+ #dict
122
+ stat_dict = stat.iloc[0].to_dict()
123
+ demo_dict = demo.iloc[0].to_dict()
124
+ meds_dict = meds.iloc[0].to_dict()
125
+ proc_dict = proc.iloc[0].to_dict()
126
+ chart_dict = chart.iloc[0].to_dict()
127
+ out_dict = out.iloc[0].to_dict()
128
+
129
+ #get stay_id which is the name of the folder containing the files
130
+ stay_id = demographic_file.split("/")[-2]
131
+ #get the label
132
+ label = int(labels.loc[labels['stay_id'] == stay_id]['label'])
133
+
134
+ yield stay_id, {
135
+ "gender" : demo_dict['gender'],
136
+ "ethnicity" : demo_dict['ethnicity'],
137
+ "insurance" : demo_dict['insurance'],
138
+ "age" : demo_dict['age'],
139
+ "MEDS" : meds_dict,
140
+ "PROC" : proc_dict,
141
+ "CHART" : chart_dict,
142
+ "OUT" : out_dict,
143
+ "COND" : stat_dict,
144
+ "label" : label
145
+ }