Liouss commited on
Commit
2873ac9
·
1 Parent(s): ba7227a

Upload mimic3-benchmarks-irit.py

Browse files
Files changed (1) hide show
  1. mimic3-benchmarks-irit.py +1824 -0
mimic3-benchmarks-irit.py ADDED
@@ -0,0 +1,1824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ import datasets
4
+ import numpy as np
5
+ from datetime import datetime
6
+ import pandas as pd
7
+ from datasets import IterableDataset
8
+ from scipy.stats import skew
9
+ import sys
10
+ import pickle
11
+ from sklearn.preprocessing import LabelEncoder
12
+
13
+ DATASET_SAVE_PATH = os.path.join(os.path.expanduser('~'),"mimic3_dataset")
14
+ os.makedirs(DATASET_SAVE_PATH,exist_ok=True)
15
+
16
+ np.set_printoptions(threshold=sys.maxsize)
17
+ np.set_printoptions(suppress=True)
18
+ ###################################
19
+ # SOME UTILS #
20
+ ###################################
21
+
22
+ def get_progression(current,total,length=20,filled_str="=",empty_str="-"):
23
+ nb = round(length*current/total)
24
+ return "["+(nb*filled_str)+((length-nb)*empty_str)+"]"
25
+
26
+ def is_empty_value(value,empty_value):
27
+ """
28
+ Returns if value is an empty value (for exemple np.nan if empty_value is np.nan)
29
+ value must not be a list
30
+ """
31
+ return (isinstance(value,float) and np.isnan(empty_value) and np.isnan(value)) or ((type(value) != list) and (value == empty_value))
32
+
33
+ def is_empty_list(l,empty_value):
34
+ """
35
+ Returns if list is filled only with empty values (for exemple empty_value==np.nan and empty_value==[np.nan,np.nan])
36
+ value must be a list
37
+ """
38
+ if isinstance(l,float) or isinstance(l,str) or isinstance(l,int):
39
+ return False
40
+ for elem in l:
41
+ if not is_empty_value(elem,empty_value):
42
+ return False
43
+ return True
44
+
45
+ def dtc(x):
46
+ """
47
+ string to datetime
48
+ """
49
+ return datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
50
+
51
+ def bic(x):
52
+ """
53
+ string to int
54
+ """
55
+ try:
56
+ return (-1 if x == "" else int(x))
57
+ except:
58
+ print("error",x)
59
+ return -1
60
+ def bfc(x):
61
+ """
62
+ string to float
63
+ """
64
+ try:
65
+ return (-1 if x == "" else float(x))
66
+ except:
67
+ print("error",x)
68
+ return -1
69
+
70
+ def id_to_string(id):
71
+ """
72
+ id (string or float) to float
73
+ """
74
+ if (isinstance(id,float) and np.isnan(id)) or not id or id == "":
75
+ return id
76
+ try:
77
+ return str(int(float(id)))
78
+ except:
79
+ return str(id)
80
+
81
+
82
+ ################################################################################
83
+ ################################################################################
84
+ ## ##
85
+ ## DATASET TO NUMPY ARRAY ##
86
+ ## ##
87
+ ################################################################################
88
+ ################################################################################
89
+
90
+ ###################################
91
+ # ABOUT DATA NORMALIZATION #
92
+ ###################################
93
+
94
+ def calculate_normalization(iterator):
95
+ """
96
+ calculates means and stds over every columns of every episode given by iterator\n
97
+ """
98
+ nb = 0
99
+ sum_x = None
100
+ sum_x_sq = None
101
+
102
+ #feeding data
103
+ for batch in iterator:
104
+ x = np.array(batch[0])
105
+ nb += x.shape[0]*x.shape[1]
106
+ if sum_x is None:
107
+ sum_x = np.sum(x, axis=(0,1))
108
+ sum_x_sq = np.sum(x**2, axis=(0,1))
109
+ else:
110
+ sum_x += np.sum(x, axis=(0,1))
111
+ sum_x_sq += np.sum(x**2, axis=(0,1))
112
+
113
+ #Computing mean
114
+ means = (1.0 / nb) * sum_x
115
+ eps = 1e-7
116
+
117
+ #Computing stds
118
+ stds = np.sqrt((1.0/(nb - 1)) * (sum_x_sq - (2.0 * sum_x * means) + (nb * means**2)))
119
+ stds[stds < eps] = eps
120
+
121
+ return means,stds
122
+
123
+ def normalize(X, means, stds, columns=[]):
124
+ """
125
+ normalizes X with means and stds. Columns is the list of columns you want to normalize. if no columns given everything is normalized\n
126
+ """
127
+ ret = 1.0 * X
128
+ if len(columns) > 0:
129
+ for col in columns:
130
+ ret[:,:,col] = (X[:,:,col] - means[col]) / stds[col]
131
+ else:
132
+ for col in range(X.shape[2]):
133
+ ret[:,:,col] = (X[:,:,col] - means[col]) / stds[col]
134
+ return ret
135
+
136
+ def try_load_normalizer(path, nb_columns):
137
+ """
138
+ Tries to load means and stds from saved file.\n
139
+ If files (path) doesn't exist returns empty means and stds lists
140
+ nb_columns is the number of columns in the dataset (not the number of columns you load)
141
+ """
142
+ means,stds = np.zeros(nb_columns),np.ones(nb_columns)
143
+
144
+ if not os.path.isfile(path):
145
+ return [],[]
146
+
147
+ with open(path, newline='') as csvfile:
148
+ spamreader = csv.DictReader(csvfile, delimiter=',')
149
+ for row in spamreader:
150
+ means[int(row["column"])] = float(row["mean"])
151
+ stds[int(row["column"])] = float(row["std"])
152
+
153
+ return means,stds
154
+
155
+
156
+
157
+ ###################################
158
+ # THE DICTIONARIES / CONSTANTS #
159
+ ###################################
160
+
161
+
162
+ #The default values for some columns
163
+ normal_values = {
164
+ "Capillary refill rate": 0.0,
165
+ "Diastolic blood pressure": 59.0,
166
+ "Fraction inspired oxygen": 0.21,
167
+ "Glascow coma scale eye opening": "4 Spontaneously",
168
+ "Glascow coma scale motor response": "6 Obeys Commands",
169
+ "Glascow coma scale total": "15.0",
170
+ "Glascow coma scale verbal response": "5 Oriented",
171
+ "Glucose": 128.0,
172
+ "Heart Rate": 86,
173
+ "Height": 170.0,
174
+ "Mean blood pressure": 77.0,
175
+ "Oxygen saturation": 98.0,
176
+ "Respiratory rate": 19,
177
+ "Systolic blood pressure": 118.0,
178
+ "Temperature": 36.6,
179
+ "Weight": 81.0,
180
+ "pH": 7.4
181
+ }
182
+
183
+ #Dictionary to transform some string values in columns to integers or indexes
184
+ discretizer = {
185
+ "Glascow coma scale eye opening": [
186
+ (["None"],0),
187
+ (["1 No Response"],1),
188
+ (["2 To pain","To Pain"],2),
189
+ (["3 To speech","To Speech"],3),
190
+ (["4 Spontaneously","Spontaneously"],4),
191
+
192
+ ],
193
+ "Glascow coma scale motor response": [
194
+ (["1 No Response","No response"],1),
195
+ (["2 Abnorm extensn","Abnormal extension"],2),
196
+ (["3 Abnorm flexion","Abnormal Flexion"],3),
197
+ (["4 Flex-withdraws","Flex-withdraws"],4),
198
+ (["5 Localizes Pain","Localizes Pain"],5),
199
+ (["6 Obeys Commands","Obeys Commands"],6),
200
+ ],
201
+ "Glascow coma scale total": [
202
+ (["3.0"],3),
203
+ (["4.0"],4),
204
+ (["5.0"],5),
205
+ (["6.0"],6),
206
+ (["7.0"],7),
207
+ (["8.0"],8),
208
+ (["9.0"],9),
209
+ (["10.0"],10),
210
+ (["11.0"],11),
211
+ (["12.0"],12),
212
+ (["13.0"],13),
213
+ (["14.0"],14),
214
+ (["15.0"],15),
215
+ ],
216
+ "Glascow coma scale verbal response": [
217
+ (["1 No Response","No Response-ETT","1.0 ET/Trach","No Response"],1),
218
+ (["2 Incomp sounds","Incomprehensible sounds"],2),
219
+ (["3 Inapprop words","Inappropriate Words"],3),
220
+ (["4 Confused","Confused"],4),
221
+ (["5 Oriented","Oriented"],5),
222
+ ]
223
+ }
224
+
225
+ #The loaded files dictionaries
226
+ itemiddict = {}
227
+
228
+
229
+ ######################################################################
230
+ # NORMALIZATION TYPE "WINDOW" WITH AMOUNT/RATE PROBLEM #
231
+ ######################################################################
232
+
233
+ def normalize_onehot_episodes_window(row, code_column="", value_column=False, period_length=48.0, window_size=1e-1):
234
+ """
235
+ returns a dict which keys are the items of code_column, and values lists representing the sliding window over period_length of size window_size
236
+ made for hot encodings
237
+ """
238
+
239
+ N_bins = int(period_length / window_size + 1.0 - 0.000001)
240
+
241
+ returned_rates = {}
242
+
243
+ for idx,starttime in enumerate(row["STARTTIME"]):
244
+
245
+ if not pd.isnull(row["ENDTIME"][idx]) and row["ENDTIME"][idx] != None and row["ENDTIME"][idx] != "":
246
+ endtime = row["ENDTIME"][idx]
247
+ isRate = True
248
+ else:
249
+ endtime = starttime
250
+ isRate = False
251
+ code = row[code_column][idx]
252
+ if code == "" or (isinstance(code,float) and np.isnan(code)) or pd.isnull(code):
253
+ continue
254
+
255
+ first_bin_id = int(starttime / window_size - 0.000001)
256
+ last_bin_id = min(N_bins-1,int(endtime / window_size - 0.000001))
257
+
258
+ val = 1
259
+ if value_column:
260
+ val = row["RATE"][idx]*60 if isRate else row["AMOUNT"][idx]*60
261
+
262
+ #If code not in dict we add an array of size N_bins containing zeros
263
+ if not code in returned_rates:
264
+ returned_rates[code] = [0]*N_bins
265
+
266
+ #We add the current value to the good timestamp in the rates array
267
+ for bin_id in range(first_bin_id,last_bin_id+1):
268
+ returned_rates[code][bin_id] += val
269
+
270
+ return returned_rates
271
+
272
+
273
+ #######################################
274
+ # NORMALIZATION TYPE "WINDOW" #
275
+ #######################################
276
+
277
+ def normalize_episodes_window(row, period_length=48.0, window_size=1e-1):
278
+ """
279
+ returns a window for the first period_length hours with window_size hours
280
+ values in the dict "row" must not be lists
281
+ """
282
+
283
+ #Getting types in every columns
284
+ types = {}
285
+ for e in row["episode"]:
286
+ if isinstance(row["episode"][e][0],float):
287
+ types[e] = float
288
+ else:
289
+ types[e] = str
290
+
291
+
292
+ episode = {}
293
+
294
+ #Number of rows
295
+ N_bins = int(period_length / window_size + 1.0 - 0.000001)
296
+
297
+ #Building every column with empty values
298
+ for e in row["episode"]:
299
+ if e != "Hours":
300
+ episode[e] = [np.nan]*N_bins
301
+
302
+ #Filling with avaible data in the episode
303
+ for idx,time in enumerate(row["episode"]["Hours"]):
304
+
305
+ #Calculating row of the current data
306
+ bin_id = int(time / window_size - 0.000001)
307
+
308
+ #Filling for every column
309
+ for col in episode:
310
+
311
+ v = row["episode"][col][idx]
312
+
313
+ #If data is not empty we add it
314
+ if v != "" and not (isinstance(v,float) and np.isnan(v)) and not v == None:
315
+ episode[col][bin_id] = v
316
+
317
+ return episode
318
+
319
+ #######################################
320
+ # NORMALIZATION TYPE "STATISTICS" #
321
+ #######################################
322
+
323
+ def normalize_episodes_statistics(row, column_scale=True,windows = [(0,1),(0,0.10),(0,0.25),(0,0.50),(0.90,1),(0.75,1),(0.50,1)],functions = [(min,"min"), (max,"max"), (np.mean,"mean"), (np.std,"std"), (skew,"skew"), (len,"len")]):
324
+ """
325
+ Doing statistics over episode (row["episode"]) and returning array of it
326
+ windows is an array containing all the periods to do statistics on (tuples of percentages, ex: (0.5,0.6) means "between 50% and 60% of the episode")\n
327
+ functions are the functions to apply to compute statistics\n
328
+ column_scale=True means we calculate the percentages between first and last value for every column. False means we calculate the pourcentages between first and last hours in episode.
329
+ """
330
+ episode = row["episode"]
331
+
332
+ returned_episode = {x:[] for _,x in functions}
333
+
334
+ #First and last hour (we will keep it if column_scale=False)
335
+ L = row["episode"]["Hours"][0]
336
+ R = row["episode"]["Hours"][-1]
337
+ length = R - L
338
+
339
+ #For every column in episode
340
+ for e in episode:
341
+
342
+ #If column_scale we find first and last hour that has value (!= np.nan)
343
+ if column_scale:
344
+ Li = 0
345
+ Ri = len(row["episode"]["Hours"])-1
346
+ while Li < len(row["episode"]["Hours"])-1 and (np.isnan(row["episode"][e][Li]) or row["episode"][e][Li] == ""):
347
+ Li += 1
348
+ while Ri >= 0 and (np.isnan(row["episode"][e][Ri]) or row["episode"][e][Ri] == ""):
349
+ Ri -= 1
350
+ if Ri < 0 or Li >= len(row["episode"]["Hours"]):
351
+ Li,Ri = 0,0
352
+ L = row["episode"]["Hours"][Li]
353
+ R = row["episode"]["Hours"][Ri]
354
+ length = R - L
355
+
356
+ #We ignore Hour column
357
+ if e == "Hours":
358
+ continue
359
+
360
+ #For every statistics windows
361
+ for window in windows:
362
+ #We calculate first and last hour for current column
363
+ start_index,end_index = window
364
+ start_index,end_index = L + start_index*length,L + end_index*length
365
+ onepiece = []
366
+ #For every value in the column, if is on the window we add it to statistics
367
+ for i,x in enumerate(row["episode"][e]):
368
+ if not np.isnan(x) and end_index+1e-6 > row["episode"]["Hours"][i] > start_index-1e-6:
369
+ onepiece.append(x)
370
+ #If there are no values to do statistics on, we return array of np.nan
371
+ if len(onepiece) == 0:
372
+ for function,fname in functions:
373
+ returned_episode[fname].append(np.nan)
374
+ #else we compute every functions on the list
375
+ else:
376
+ for function,fname in functions:
377
+ returned_episode[fname].append(function(onepiece))
378
+ return returned_episode
379
+
380
+
381
+ #######################################
382
+ # SINGLE VALUE TRANSFORMATION #
383
+ #######################################
384
+
385
+
386
+ def convert_CODE_to_onehot(itemid, d_path, field):
387
+ """
388
+ returns a oneshot encoding for item of itemid
389
+ the dict is found in (d_path)
390
+ the fields the itemid are in the dict are in columns field
391
+ """
392
+
393
+ global itemiddict
394
+
395
+ #If itemiddict doesn't contain the field we load id
396
+ if not field in itemiddict:
397
+ itemiddict[field] = pd.DataFrame()
398
+ for e in d_path:
399
+ itemiddict[field] = pd.concat([itemiddict[field],pd.read_csv(e,converters={field:lambda x:str(x)})],ignore_index=True)
400
+ itemiddict[field] = itemiddict[field].sort_values(by=field,ignore_index=True).reset_index(drop=True)
401
+
402
+ #We build the oneshot encoding of size of the field column
403
+ length = len(itemiddict[field].index)
404
+ one_hot = np.zeros((length))
405
+
406
+ #Filling the onehot encoding
407
+ if itemid != "" and itemid != 0:
408
+ idx = itemiddict[field][field].searchsorted(str(itemid))
409
+ if idx > 0:
410
+ one_hot[idx-1] = 1
411
+
412
+
413
+ return one_hot
414
+
415
+ def codes_to_onehot(episode):
416
+ """
417
+ returns the episode with every not float value as onehot encodings
418
+ """
419
+ episode = episode.copy()
420
+
421
+ #For every column in the episode
422
+ for e in episode:
423
+
424
+ #If the column is in the local discretizer
425
+ if e in discretizer:
426
+
427
+ #Computing size of the onehot encoding
428
+ size = 0
429
+ for die in discretizer[e]:
430
+ size += len(die[0])
431
+
432
+ #for every value in the column
433
+ for i in range(len(episode[e])):
434
+
435
+ v = episode[e][i]
436
+
437
+ #If the value we are transforming means something
438
+ if (not isinstance(v,float) or not np.isnan(v)) and v != "" and v != 0:
439
+
440
+ #Transforming the value to onehot encoding
441
+ episode[e][i] = np.zeros(size,dtype=int)
442
+ index = 0
443
+
444
+ #Finding the index in the onehot encoding to put 1
445
+ for die in discretizer[e]:
446
+ for item in die[0]:
447
+ if str(v) == item:
448
+ episode[e][i][index] = 1
449
+ index += 1
450
+
451
+ #If the value is empty returns a full 0 array
452
+ else:
453
+ episode[e][i] = np.full(size,fill_value=np.nan)
454
+
455
+ #Special column that may contain floats but must be converted to onehot encoding
456
+ elif e == "Capillary refill rate":
457
+ for i in range(len(episode[e])):
458
+ v = episode[e][i]
459
+ episode[e][i] = np.zeros(2,dtype=int)
460
+ if v != "" and float(v) == 1:
461
+ episode[e][i][1] = 1
462
+ elif v != "" and float(v) == 0:
463
+ episode[e][i][0] = 1
464
+
465
+ return episode
466
+
467
+ def convert_CODE_to_int(itemid, d_path, field):
468
+ """
469
+ returns an int encoding for item of itemid
470
+ the dict is found in (d_path)
471
+ the fields the itemid are in the dict are in columns field
472
+ """
473
+ global itemiddict
474
+
475
+ #If the field is not avaible in local, we load it from d_path
476
+ if not field in itemiddict:
477
+ itemiddict[field] = pd.DataFrame()
478
+ for e in d_path:
479
+ itemiddict[field] = pd.concat([itemiddict[field],pd.read_csv(e,converters={field:lambda x:str(x)})],ignore_index=True)
480
+ itemiddict[field] = itemiddict[field].sort_values(by=field,ignore_index=True).reset_index(drop=True)
481
+
482
+ #If the itemid is avaible we return the associated value we find
483
+ if itemid != "" and itemid != 0:
484
+ idx = itemiddict[field][field].searchsorted(str(itemid))
485
+ if idx > 0:
486
+ return idx-1
487
+ return np.nan
488
+
489
+ def codes_to_int(episode):
490
+ """
491
+ returns the episode with every not float value as int encodings
492
+ """
493
+
494
+ episode = episode.copy()
495
+
496
+ #For every column in episode
497
+ for e in episode:
498
+
499
+ #If the column is avaible in local discretizer
500
+ if e in discretizer:
501
+
502
+ #For every value in the column
503
+ for i in range(len(episode[e])):
504
+
505
+ v = episode[e][i]
506
+
507
+ #If the current value is not None or NaN, we find the encoding
508
+ if not isinstance(v,float) or not np.isnan(v):
509
+
510
+ #If the value is not empty or 0 we find in the encoder
511
+ if v != "" and v != 0:
512
+ value = np.nan
513
+ for die in discretizer[e]:
514
+ if str(v) in die[0]:
515
+ value = die[1]
516
+ episode[e][i] = value
517
+
518
+ #Else we said it's not found
519
+ else:
520
+ episode[e][i] = np.nan
521
+
522
+ return episode
523
+
524
+
525
+
526
+ #######################################
527
+ # FULL EPISODE TRANSFORM UTILS #
528
+ #######################################
529
+
530
+ def convert_to_numpy_arrays(episode, empty_value=np.nan):
531
+ """
532
+ returns the episode as numpy array of shape (row_number,features_width(=features are the keys in episode, can contain arrays,list or values))
533
+ """
534
+
535
+ #Computing features length
536
+ features_width = 0
537
+ row_number = 0
538
+ for e in episode["episode"]:
539
+ x = episode["episode"][e][0]
540
+ if isinstance(x,int) or isinstance(x,float) or x == "":
541
+ features_width += 1
542
+ else:
543
+ features_width += len(x)
544
+ row_number = len(episode["episode"][e])
545
+
546
+ #Computing y_true length
547
+ y_length = 0
548
+ for e in episode:
549
+ if e != "episode":
550
+ y_length += 1
551
+
552
+ #Computing y_true
553
+ y_true = np.empty(y_length)
554
+ index = 0
555
+ for e in episode:
556
+ if e != "episode":
557
+ y_true[index] = episode[e]
558
+ index+=1
559
+
560
+ #Computing features
561
+ features = np.empty((row_number,features_width))
562
+ index = 0
563
+
564
+ #For every column in episode
565
+ for e in episode["episode"]:
566
+
567
+ #For every row in the column
568
+ for line,x in enumerate(episode["episode"][e]):
569
+
570
+ #If the value is empty, we fill with empty_value
571
+ if (isinstance(x,float) and np.isnan(x)) or x == "":
572
+ features[line,index] = empty_value
573
+
574
+ #Else we fill the array with the numeric value
575
+ elif isinstance(x,int) or isinstance(x,float):
576
+ features[line,index] = x
577
+
578
+ #Else (is array or list)
579
+ else:
580
+ is_empty_array = True
581
+
582
+ #We check if the array contains only np.nan (is empty)
583
+ for elem in x:
584
+ if not is_empty_value(elem,np.nan):
585
+ is_empty_array = False
586
+ break
587
+
588
+ #If the array is not empty, if we copy the value of it in the right place in the returned array
589
+ if not is_empty_array:
590
+ features[line,index:index+len(x)] = x
591
+
592
+ #Else we fill the part of the returned array with empty_value so user knows the data is missing here
593
+ else:
594
+ features[line,index:index+len(x)] = np.full(len(x),empty_value)
595
+
596
+ #checking the number of elements we added in the returned array
597
+ column_exemple = episode["episode"][e][0]
598
+ if isinstance(column_exemple,int) or isinstance(column_exemple,float) or x == "":
599
+ index += 1
600
+ else:
601
+ index += len(x)
602
+
603
+ return features,y_true
604
+
605
+ def filter_episode(row, episode_filter):
606
+ """
607
+ Row contains an episode and the y_trues.
608
+ Filters row["episode"] to remove rows within it that satisfies the episode_filter
609
+ """
610
+ episode = {col:[] for col in row["episode"]}
611
+
612
+ for i in range(len(row["episode"]["Hours"])):
613
+ #Calculating a row (dico) (= row["episode"][:][i])
614
+ dico = {header:row["episode"][header][i] for header in row["episode"]}
615
+
616
+ #If episode_filter returns true we add the row
617
+ if episode_filter(dico):
618
+ for col in episode:
619
+ episode[col].append(row["episode"][col][i])
620
+
621
+ #Building returned episode
622
+ returned = {}
623
+ for col in row:
624
+ if col != "episode":
625
+ returned[col] = row[col]
626
+ returned["episode"] = episode
627
+
628
+ return returned
629
+
630
+ #######################################
631
+ # ABOUT IMPUTING VALUES #
632
+ #######################################
633
+
634
+ def input_values(features, empty_value=np.nan, strategy="previous"):
635
+ """
636
+ Inputing values in the features (to replace empty_value values in features) with strategy
637
+ strategy is in ["previous", "previous-next"]
638
+ """
639
+ features = features.copy()
640
+
641
+ #Inputing previous value if exists, next else, empty_value if no next
642
+ if strategy == "previous-next":
643
+ for col in features:
644
+ col_vals = features[col]
645
+
646
+ for i in range(len(col_vals)):
647
+ #If current value if the empty_value
648
+ if is_empty_list(col_vals[i],np.nan) or is_empty_value(col_vals[i], empty_value):
649
+ prev_index = i-1
650
+
651
+ #We find the previous value
652
+ while prev_index >= 0 and (is_empty_list(col_vals[prev_index],np.nan) or is_empty_value(col_vals[prev_index], empty_value)):
653
+ prev_index -= 1
654
+
655
+ #If found we input it
656
+ if prev_index >= 0:
657
+ features[col][i] = col_vals[prev_index]
658
+
659
+ #Else we check next value
660
+ else:
661
+ prev_index = i+1
662
+ while prev_index < len(col_vals) and (is_empty_list(col_vals[prev_index],np.nan) or is_empty_value(col_vals[prev_index], empty_value)):
663
+ prev_index += 1
664
+
665
+ if prev_index >= i+1 and prev_index < len(col_vals):
666
+ features[col][i] = col_vals[prev_index]
667
+ elif col in normal_values:
668
+ features[col][i] = normal_values[col]
669
+ elif strategy == "previous":
670
+ for col in features:
671
+ col_vals = features[col]
672
+
673
+ for i in range(len(col_vals)):
674
+ #If current value if the empty_value
675
+ if is_empty_list(col_vals[i],np.nan) or is_empty_value(col_vals[i], empty_value):
676
+ prev_index = i-1
677
+
678
+ #We find the previous value
679
+ while prev_index >= 0 and (is_empty_list(col_vals[prev_index],np.nan) or is_empty_value(col_vals[prev_index], empty_value)):
680
+ prev_index -= 1
681
+
682
+ #If found we input it
683
+ if prev_index >= 0:
684
+ features[col][i] = col_vals[prev_index]
685
+ #Else we input normal value if found
686
+ elif col in normal_values:
687
+ features[col][i] = normal_values[col]
688
+
689
+
690
+ return features
691
+
692
+
693
+
694
+ def add_mask(episode):
695
+ """
696
+ Adding special features to the episode for every column, which is an array of 1 for every not null value
697
+ Can be used before DataImputer to know where data were imputed
698
+ """
699
+ keys = [key for key in episode.keys()]
700
+ for e in keys:
701
+ episode["mask_"+e] = []
702
+ for el in episode[e]:
703
+ if el == "" or (isinstance(el,float) and np.isnan(el)):
704
+ episode["mask_"+e].append(0)
705
+ else:
706
+ episode["mask_"+e].append(1)
707
+ return episode
708
+
709
+ #######################################
710
+ # DATASET TO READABLE DATA FOR ML #
711
+ #######################################
712
+
713
+ def preprocess_to_learn(
714
+ episode,
715
+ code_to_onehot=True,
716
+ episode_filter=None,
717
+ mode="full",
718
+
719
+ window_period_length=48.0,
720
+ window_size=0.7,
721
+
722
+ statistics_mode_column_scale=True,
723
+
724
+ empty_value=np.nan,
725
+ input_strategy=None,
726
+ add_mask_columns=False,
727
+ ):
728
+ """
729
+ Main function to transform dataset rows to numpy arrays\n
730
+ episode is the episode to transform\n
731
+ code_to_onehot is True if you want to transform non-float data to onehot, else it is converted to int\n
732
+ episode_filter is a filter function you want to apply to episodes to remove rows\n
733
+ mode is the mode of transformation. Avaible : statistics (for randomforest), window (for LSTM)\n\n
734
+ window_period_length is the length of episode to do windows in (for window mode)\n
735
+ window_size is the size of the window (for window mode)\n\n
736
+ statistics_mode_column_scale is the column mode for statistics mode (see normalize_episodes_statistics)\n
737
+ empty_value is the value to put where no data\n
738
+ input_strategy can be "previous" or "previous-next" or "None" (see input_values)\n
739
+ add_mask_columns adds mask features before imputing missing data (see add_mask) \n
740
+ episode_length is the episode length for window mode\n
741
+ """
742
+
743
+ #Filtering rows from the episode
744
+ if episode_filter == None:
745
+ discr_episode = episode
746
+ else:
747
+ discr_episode = filter_episode(episode, episode_filter)
748
+
749
+ #Discretization of data
750
+ if mode == "statistics":
751
+ discr_episode["episode"] = codes_to_int(discr_episode["episode"])
752
+ discr_episode["episode"] = normalize_episodes_statistics(discr_episode,column_scale=statistics_mode_column_scale)
753
+
754
+ elif mode == "window":
755
+ discr_episode["episode"] = normalize_episodes_window(discr_episode, window_period_length, window_size)
756
+
757
+ #Adding mask
758
+ if add_mask_columns:
759
+ discr_episode["episode"] = add_mask(discr_episode["episode"])
760
+
761
+ #Trying to input some missing values
762
+ discr_episode["episode"] = input_values(discr_episode["episode"],empty_value=empty_value,strategy=input_strategy)
763
+
764
+ #Transforming text to integer (index of string in file) or onehot vector
765
+ if mode != "statistics":
766
+ if code_to_onehot:
767
+ discr_episode["episode"] = codes_to_onehot(discr_episode["episode"])
768
+ else:
769
+ discr_episode["episode"] = codes_to_int(discr_episode["episode"])
770
+ #Transforming to numpy array from dict
771
+ returned = convert_to_numpy_arrays(discr_episode, empty_value=empty_value)
772
+ return returned
773
+
774
+
775
+ #######################################
776
+ # ITERATOR FROM DATASET #
777
+ #######################################
778
+
779
+ def my_generator(dataset,transform):
780
+ iterator = iter(dataset)
781
+ for x in iterator:
782
+ yield transform(x)
783
+
784
+ def mapped_iterabledataset(dataset, function):
785
+ return IterableDataset.from_generator(my_generator, gen_kwargs={"dataset": dataset,"transform":function})
786
+
787
+
788
+ ################################################################################
789
+ ################################################################################
790
+ ## ##
791
+ ## DATASET CREATION AND DOWNLOADING ##
792
+ ## ##
793
+ ################################################################################
794
+ ################################################################################
795
+
796
+ def do_listfile(task,subfolder,mimic3_benchmark_data_folder,mimic3_benchmark_new_data_folder,stays,inputevents,procedurevents,diagnoses,insurances):
797
+
798
+
799
+ file = subfolder+"_listfile.csv"
800
+
801
+ print("working on",task+"/"+file)
802
+
803
+ listfile = pd.read_csv(os.path.join(mimic3_benchmark_data_folder,file),sep=',')
804
+ listfile = listfile.sort_values(by=["stay"]) if not "period_length" in listfile else listfile.sort_values(by=["stay","period_length"])
805
+
806
+ subfolder = "train"
807
+ if "test" in file:
808
+ subfolder = "test"
809
+
810
+ to_save = []
811
+ if task == "mimic4-in-hospital-mortality":
812
+ for idx,(_,x) in enumerate(listfile.iterrows()):
813
+ print(get_progression(idx,len(listfile.index),length=20),str(round(100*idx/len(listfile.index),2))+"%",file,end="\r")
814
+
815
+ current_dict = {}
816
+
817
+ #Getting episode/subject ids
818
+ fname = x["stay"].split("_")
819
+ subject_id = fname[0]
820
+ episode_number = int(fname[1][7:])
821
+
822
+ #Getting current episode start date
823
+ current_ep_desc = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"root",subfolder,subject_id,"episode"+str(episode_number)+".csv"))
824
+ icustay_id = current_ep_desc.at[current_ep_desc.index[0],"Icustay"]
825
+
826
+ deathtime = stays.loc[stays["ICUSTAY_ID"] == icustay_id]
827
+ dt = np.nan
828
+ bd = np.nan
829
+ #Doing basic data (age ethnicity and gender)
830
+ for _,y in deathtime.iterrows():
831
+ if isinstance(y["DEATHTIME"], str) and y["DEATHTIME"] != "":
832
+ dt = dtc(y["DEATHTIME"])
833
+ bd = dtc(y["INTIME"])
834
+ current_dict["age"] = y["AGE"]
835
+ current_dict["ethnicity"] = y["ETHNICITY"]
836
+ current_dict["gender"] = y["GENDER"]
837
+ current_dict["insurance"] = insurances.loc[insurances["HADM_ID"] == y["HADM_ID"]]["INSURANCE"].iloc[0]
838
+
839
+
840
+ #checking if is dead or not, and if data is valid
841
+ valid = True
842
+ if isinstance(dt, datetime):
843
+ sec = (dt - bd).total_seconds() >= 54*3600
844
+ if sec:
845
+ current_dict["label"] = 1
846
+ else:
847
+ valid = False
848
+ else:
849
+ current_dict["label"] = 0
850
+
851
+ if not valid:
852
+ continue
853
+
854
+ #Building diagnoses
855
+ current_diags = diagnoses[diagnoses["ICUSTAY_ID"] == icustay_id]
856
+ ICD9_list = []
857
+ for _,icd_code in current_diags.iterrows():
858
+ ICD9_list.append(icd_code["ICD9_CODE"])
859
+ current_dict["Cond"] = {"fids":ICD9_list}
860
+
861
+
862
+
863
+ def map_date(date):
864
+ if isinstance(date,datetime):
865
+ return (date - bd).total_seconds()/3600.0
866
+ else:
867
+ return date
868
+
869
+
870
+ #Building procedurevents
871
+ pde = procedurevents[procedurevents["ICUSTAY_ID"] == icustay_id].applymap(map_date,na_action="ignore")
872
+ current_dict["Proc"] = normalize_onehot_episodes_window(pde.to_dict(orient='list'), value_column=False, code_column="ITEMID", period_length=48.0, window_size=1)
873
+
874
+ #Building inputevents
875
+ ie = inputevents[inputevents["ICUSTAY_ID"] == icustay_id].applymap(map_date,na_action="ignore")
876
+ current_dict["Med"] = normalize_onehot_episodes_window(ie.to_dict(orient='list'), value_column=True, code_column="ITEMID", period_length=48.0, window_size=1)
877
+
878
+ #Building chartevents
879
+ current_ep_charts = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"in-hospital-mortality",subfolder,x["stay"])).to_dict(orient='list')
880
+ current_dict["Chart"] = normalize_episodes_window({"episode":current_ep_charts})
881
+
882
+ #The output events are in the chartevents
883
+ current_dict["Out"] = {}
884
+
885
+ to_save.append(current_dict)
886
+ else:
887
+ for idx,(_,x) in enumerate(listfile.iterrows()):
888
+ print(get_progression(idx,len(listfile.index),length=20),str(round(100*idx/len(listfile.index),2))+"%",file,end="\r")
889
+ to_save.append(x)
890
+
891
+
892
+
893
+ os.makedirs(mimic3_benchmark_new_data_folder,exist_ok=True)
894
+ with open(os.path.join(mimic3_benchmark_new_data_folder,file[:-3]+"pkl"), "wb+") as fp:
895
+ pickle.dump(to_save,fp,pickle.HIGHEST_PROTOCOL)
896
+
897
+
898
+ def generate_dics(diagnoses, inputevents, procedurevents, insurances, stays, mimic3_path):
899
+
900
+ #Diagnoses dictionary
901
+ if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,"icd_dict.csv")):
902
+ print("creating icd indexes")
903
+
904
+ #Loading Diagnoses
905
+ used_col = ["ICD9_CODE","SHORT_TITLE","LONG_TITLE"]
906
+ dtype = {"ICD9_CODE":str,"SHORT_TITLE":str,"LONG_TITLE":str}
907
+ dcsv = pd.read_csv(mimic3_path+"/D_ICD_DIAGNOSES.csv",sep=',',usecols=used_col,dtype=dtype)
908
+ print("icd ressources loaded")
909
+ dic = {}
910
+ for _,row in diagnoses.iterrows():
911
+ if not row["ICD9_CODE"] in dic:
912
+ fif = dcsv.loc[dcsv["ICD9_CODE"] == row["ICD9_CODE"]]
913
+ dic[row["ICD9_CODE"]] = {"SHORT_TITLE":fif["SHORT_TITLE"].values[0],"LONG_TITLE":fif["LONG_TITLE"].values[0]}
914
+ with open(os.path.join(DATASET_SAVE_PATH,'icd_dict.csv'), 'w') as f:
915
+ f.write("ICD9_CODE,SHORT_TITLE,LONG_TITLE\n")
916
+ for key in dic.keys():
917
+ f.write("%s,\"%s\",\"%s\"\n"%(key,dic[key]["SHORT_TITLE"],dic[key]["LONG_TITLE"]))
918
+
919
+ #itemids dictionary
920
+ if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,"ie_itemid_dict.csv")):
921
+ print("creating itemid indexes")
922
+
923
+ #Loading itemids
924
+ used_col = ["ITEMID","LABEL","ABBREVIATION"]
925
+ dtype = {"ITEMID":int,"LABEL":str,"ABBREVIATION":str}
926
+ itemidcsv = pd.read_csv(mimic3_path+"/D_ITEMS.csv",sep=',',usecols=used_col,dtype=dtype)
927
+
928
+ print("itemid ressources loaded")
929
+ dic = {}
930
+ for _,row in inputevents.iterrows():
931
+ if not row["ITEMID"] in dic:
932
+ fif = itemidcsv.loc[itemidcsv["ITEMID"] == row["ITEMID"]]
933
+ dic[row["ITEMID"]] = {"LABEL":fif["LABEL"].values[0],"ABBREVIATION":fif["ABBREVIATION"].values[0]}
934
+ with open(os.path.join(DATASET_SAVE_PATH,'ie_itemid_dict.csv'), 'w') as f:
935
+ f.write("ITEMID,LABEL,ABBREVIATION\n")
936
+ for key in dic.keys():
937
+ f.write("%s,\"%s\",\"%s\"\n"%(key,dic[key]["ABBREVIATION"],dic[key]["LABEL"]))
938
+
939
+ dic = {}
940
+ for _,row in procedurevents.iterrows():
941
+ if not row["ITEMID"] in dic:
942
+ fif = itemidcsv.loc[itemidcsv["ITEMID"] == row["ITEMID"]]
943
+ dic[row["ITEMID"]] = {"LABEL":fif["LABEL"].values[0],"ABBREVIATION":fif["ABBREVIATION"].values[0]}
944
+ with open(os.path.join(DATASET_SAVE_PATH,'pe_itemid_dict.csv'), 'w') as f:
945
+ f.write("ITEMID,LABEL,ABBREVIATION\n")
946
+ for key in dic.keys():
947
+ f.write("%s,\"%s\",\"%s\"\n"%(key,dic[key]["ABBREVIATION"],dic[key]["LABEL"]))
948
+
949
+ #insurances dictionary
950
+ if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,"insurances_dict.csv")):
951
+ print("creating insurances indexes")
952
+ dic = {}
953
+ index = 0
954
+ for _,row in insurances.iterrows():
955
+ if not row["INSURANCE"] in dic:
956
+ dic[row["INSURANCE"]] = index
957
+ index += 1
958
+ with open(os.path.join(DATASET_SAVE_PATH,'insurances_dict.csv'), 'w') as f:
959
+ f.write("INSURANCE,INDEX\n")
960
+ for key in dic.keys():
961
+ f.write("\"%s\",%s\n"%(key,dic[key]))
962
+
963
+ #gender dictionary
964
+ if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,"genders_dict.csv")):
965
+ print("creating genders indexes")
966
+ dic = {}
967
+ index = 0
968
+ for _,row in stays.iterrows():
969
+ if not row["GENDER"] in dic:
970
+ dic[row["GENDER"]] = index
971
+ index += 1
972
+ with open(os.path.join(DATASET_SAVE_PATH,'genders_dict.csv'), 'w') as f:
973
+ f.write("GENDER,INDEX\n")
974
+ for key in dic.keys():
975
+ f.write("\"%s\",%s\n"%(key,dic[key]))
976
+
977
+
978
+ #age dictionary
979
+ if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,"ages_dict.csv")):
980
+ print("creating ages indexes")
981
+ dic = {}
982
+ index = 0
983
+ for _,row in stays.iterrows():
984
+ if not round(row["AGE"]) in dic:
985
+ dic[round(row["AGE"])] = index
986
+ index += 1
987
+ with open(os.path.join(DATASET_SAVE_PATH,'ages_dict.csv'), 'w') as f:
988
+ f.write("AGE,INDEX\n")
989
+ for key in dic.keys():
990
+ f.write("%s,%s\n"%(key,dic[key]))
991
+
992
+ #ethny dictionary
993
+ if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,"ethnicities_dict.csv")):
994
+ print("creating ethnicities indexes")
995
+ dic = {}
996
+ index = 0
997
+ for _,row in stays.iterrows():
998
+ if not row["ETHNICITY"] in dic:
999
+ dic[row["ETHNICITY"]] = index
1000
+ index += 1
1001
+ with open(os.path.join(DATASET_SAVE_PATH,'ethnicities_dict.csv'), 'w') as f:
1002
+ f.write("ETHNICITY,INDEX\n")
1003
+ for key in dic.keys():
1004
+ f.write("\"%s\",%s\n"%(key,dic[key]))
1005
+
1006
+ def clean_units(df):
1007
+ df.loc[df["AMOUNTUOM"].isin(["grams","L"]),"AMOUNT"] = df.loc[df["AMOUNTUOM"].isin(["grams","L"]),"AMOUNT"].apply((lambda x:x*1000))
1008
+ df.loc[df["AMOUNTUOM"].isin(["ounces"]),"AMOUNT"] = df.loc[df["AMOUNTUOM"].isin(["ounces"]),"AMOUNT"].apply((lambda x:x*28.3495*1000))
1009
+ df.loc[df["AMOUNTUOM"].isin(["uL"]),"AMOUNT"] = df.loc[df["AMOUNTUOM"].isin(["uL"]),"AMOUNT"].apply((lambda x:x/1000))
1010
+ df.loc[df["AMOUNTUOM"].isin(["mlhr","Hours"]),"AMOUNT"] = df.loc[df["AMOUNTUOM"].isin(["mlhr","Hours"]),"AMOUNT"].apply((lambda x:x/60))
1011
+
1012
+ df.loc[df["RATEUOM"].isin(["mLhour","unitshour","mcghour","mcgkghour","mgkghour","mLkghour","mEq.hour"]),"RATE"] = df.loc[df["RATEUOM"].isin(["mLhour","unitshour","mcghour","mcgkghour","mgkghour","mLkghour","mEq.hour"]),"RATE"].apply((lambda x:x/60))
1013
+ df.loc[df["RATEUOM"].isin(["gramshour"]),"RATE"] = df.loc[df["RATEUOM"].isin(["gramshour"]),"RATE"].apply((lambda x:x*1000/60))
1014
+ df.loc[df["RATEUOM"].isin(["gramsmin","gramskgmin"]),"RATE"] = df.loc[df["RATEUOM"].isin(["gramsmin","gramskgmin"]),"RATE"].apply((lambda x:x*1000))
1015
+
1016
+
1017
+ def load_mimic3_files(mimic3_dir):
1018
+ #Loading inputevents
1019
+ used_col = ["SUBJECT_ID","ICUSTAY_ID","CHARTTIME","ITEMID","AMOUNT","AMOUNTUOM","RATE","RATEUOM"]
1020
+ dtype = {"AMOUNTUOM":str,"RATEUOM":str}
1021
+ converters={"SUBJECT_ID":bic,"ICUSTAY_ID":bic,"CHARTTIME":dtc,"ITEMID":bic,"AMOUNT":bfc,"RATE":bfc}
1022
+ inputevents = pd.read_csv(mimic3_dir+"/INPUTEVENTS_CV.csv",sep=',',usecols=used_col,dtype=dtype,converters=converters)
1023
+ inputevents.rename(columns={"CHARTTIME": "STARTTIME"}, inplace=True)
1024
+ print("inputevents 1/2 loaded")
1025
+
1026
+ used_col = ["SUBJECT_ID","ICUSTAY_ID","STARTTIME","ENDTIME","ITEMID","AMOUNT","AMOUNTUOM","RATE","RATEUOM"]
1027
+ dtype = {"AMOUNTUOM":str,"RATEUOM":str}
1028
+ converters={"SUBJECT_ID":bic,"ICUSTAY_ID":bic,"STARTTIME":dtc,"ENDTIME":dtc,"ITEMID":bic,"AMOUNT":bfc,"RATE":bfc}
1029
+ inputevents_2 = pd.read_csv(mimic3_dir+"/INPUTEVENTS_MV.csv",sep=',',usecols=used_col,dtype=dtype,converters=converters)
1030
+ inputevents = pd.concat([inputevents,inputevents_2])
1031
+ inputevents.drop(inputevents[(inputevents["SUBJECT_ID"] == -1) | (inputevents["ICUSTAY_ID"] == -1)].index, inplace=True)
1032
+ clean_units(inputevents)
1033
+ print("inputevents 2/2 loaded")
1034
+
1035
+ #Loading procedurevents
1036
+ used_col = ["SUBJECT_ID","ICUSTAY_ID","STARTTIME","ENDTIME","ITEMID"]
1037
+ converters={"SUBJECT_ID":bic,"ICUSTAY_ID":bic,"STARTTIME":dtc,"ENDTIME":dtc,"ITEMID":bic}
1038
+ procedurevents = pd.read_csv(mimic3_dir+"/PROCEDUREEVENTS_MV.csv",sep=',',usecols=used_col,converters=converters)
1039
+ procedurevents.drop(procedurevents[(procedurevents["SUBJECT_ID"] == -1) | (procedurevents["ICUSTAY_ID"] == -1)].index, inplace=True)
1040
+ print("procedurevents loaded")
1041
+
1042
+ #Loading Diagnoses
1043
+ used_col = ["SUBJECT_ID","SEQ_NUM","ICD9_CODE","ICUSTAY_ID"]
1044
+ dtype = {"ICD9_CODE":str}
1045
+ converters={"SUBJECT_ID":bic,"SEQ_NUM":bic,"ICUSTAY_ID":bic}
1046
+ diagnoses = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"root","all_diagnoses.csv"),sep=',',usecols=used_col,dtype=dtype,converters=converters)
1047
+ print("diagnoses loaded")
1048
+
1049
+ #Loading stays
1050
+ used_col = ["SUBJECT_ID","HADM_ID","ICUSTAY_ID","INTIME","DEATHTIME","ETHNICITY","GENDER","AGE"]
1051
+ dtype = {"INTIME":str,"DEATHTIME":str,"ETHNICITY":str,"GENDER":str}
1052
+ converters={"SUBJECT_ID":bic,"HADM_ID":bic,"ICUSTAY_ID":bic,"AGE":bfc}
1053
+ stays = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"root","all_stays.csv"),sep=',',usecols=used_col,dtype=dtype,converters=converters)
1054
+ print("stays loaded")
1055
+
1056
+ #Loading insurances
1057
+ used_col = ["SUBJECT_ID","HADM_ID","INSURANCE"]
1058
+ dtype = {"INSURANCE":str}
1059
+ converters={"SUBJECT_ID":bic,"HADM_ID":bic}
1060
+ insurances = pd.read_csv(mimic3_dir+"/ADMISSIONS.csv",sep=',',usecols=used_col,dtype=dtype,converters=converters)
1061
+ print("insurances loaded")
1062
+
1063
+ generate_dics(diagnoses, inputevents, procedurevents, insurances, stays, mimic3_dir)
1064
+
1065
+ diagnoses.drop(diagnoses[(diagnoses["SUBJECT_ID"] == -1) | (diagnoses["ICUSTAY_ID"] == -1)].index, inplace=True)
1066
+ diagnoses.drop(diagnoses[(diagnoses["ICD9_CODE"] == 7981) | (diagnoses["ICD9_CODE"] == 7982) | (diagnoses["ICD9_CODE"] == 7989)].index, inplace=True)
1067
+ diagnoses["Hours"] = 0
1068
+ diagnoses = diagnoses.sort_values(by="SEQ_NUM")
1069
+
1070
+ return stays,inputevents,procedurevents,diagnoses,insurances
1071
+
1072
+
1073
+
1074
+ def do_directory_cleaning(current_file):
1075
+
1076
+ if "IC9_CODE" in current_file:
1077
+ current_file["ICD9_CODE"] = current_file["ICD9_CODE"].apply(id_to_string)
1078
+
1079
+ #Cleaning
1080
+ current_file.loc[current_file["AMOUNT"] == -1, "AMOUNT"] = np.nan
1081
+ current_file.loc[current_file["RATE"] == -1, "RATE"] = np.nan
1082
+ current_file["ITEMID"] = current_file["ITEMID"].astype(pd.Int64Dtype())
1083
+ if "SEQ_NUM" in current_file:
1084
+ current_file["SEQ_NUM"] = current_file["SEQ_NUM"].astype(pd.Int64Dtype())
1085
+ clean_units(current_file)
1086
+ current_file = current_file.drop(["AMOUNTUOM","RATEUOM"], axis=1)
1087
+ return current_file
1088
+
1089
+
1090
+ def load_mimic3_benchmark(mimic3_path):
1091
+
1092
+ mimic3_path = os.path.join(os.getcwd(),mimic3_path)
1093
+ starting_dir = os.getcwd()
1094
+ os.chdir(DATASET_SAVE_PATH)
1095
+
1096
+ print("Starting preprocessing of raw mimic3 data...")
1097
+
1098
+ if not os.path.isdir("mimic3-benchmarks"):
1099
+ print("MIMIC3-BENCHMARK Data not found... Loading mimic3-benchmark github...")
1100
+ os.system('git clone https://github.com/YerevaNN/mimic3-benchmarks.git')
1101
+
1102
+ if not os.path.isdir("mimic3-benchmarks"):
1103
+ print("Could not load the github... Exiting...")
1104
+ exit(1)
1105
+ os.chdir("mimic3-benchmarks")
1106
+ print("Preprocessing of data... This step may take hours.")
1107
+
1108
+ print("Extracting subjects...")
1109
+ os.system("python -m mimic3benchmark.scripts.extract_subjects "+mimic3_path+" ../root/")
1110
+
1111
+ print("Fixing issues...")
1112
+ os.system("python -m mimic3benchmark.scripts.validate_events ../root/")
1113
+
1114
+ print("Extracting episodes...")
1115
+ os.system("python -m mimic3benchmark.scripts.extract_episodes_from_subjects ../root/")
1116
+
1117
+ print("Spliting train and test...")
1118
+ os.system("python -m mimic3benchmark.scripts.split_train_and_test ../root/")
1119
+
1120
+ print("Creating specific tasks")
1121
+ os.system("python -m mimic3benchmark.scripts.create_in_hospital_mortality ../root/ ../in-hospital-mortality/")
1122
+ os.system("python -m mimic3benchmark.scripts.create_decompensation ../root/ ../decompensation/")
1123
+ os.system("python -m mimic3benchmark.scripts.create_length_of_stay ../root/ ../length-of-stay/")
1124
+ os.system("python -m mimic3benchmark.scripts.create_phenotyping ../root/ ../phenotyping/")
1125
+ os.system("python -m mimic3benchmark.scripts.create_multitask ../root/ ../multitask/")
1126
+
1127
+ print("Spliting validation...")
1128
+ os.system("python -m mimic3models.split_train_val ../in-hospital-mortality/")
1129
+ os.system("python -m mimic3models.split_train_val ../decompensation/")
1130
+ os.system("python -m mimic3models.split_train_val ../length-of-stay/")
1131
+ os.system("python -m mimic3models.split_train_val ../phenotyping/")
1132
+ os.system("python -m mimic3models.split_train_val ../multitask/")
1133
+ os.chdir(starting_dir)
1134
+
1135
+ def preprocess(task,mimic3_dir=None):
1136
+ origin_task = task
1137
+ if "mimic4-" in task:
1138
+ origin_task = task[7:]
1139
+
1140
+ original_task_path = os.path.join(DATASET_SAVE_PATH,origin_task)
1141
+ print("need of",original_task_path,"to generate new task...")
1142
+ if not os.path.isdir(original_task_path):
1143
+ if mimic3_dir == None:
1144
+ mimic3_dir = input("Preprocessing has to be done, please enter mimic3's path : ")
1145
+ if not os.path.isdir(mimic3_dir):
1146
+ print("Could not load mimic3 files...")
1147
+ exit(1)
1148
+ load_mimic3_benchmark(mimic3_dir)
1149
+
1150
+ loaded,inputevents,procedurevents,diagnoses = False,None,None,None
1151
+
1152
+ mimic3_benchmark_data_folder,mimic3_benchmark_new_data_folder = None,None
1153
+ if "mimic4-" in task:
1154
+ print("the requested task is a mimic4-benchmark task...")
1155
+ #Data folder
1156
+ mimic3_benchmark_data_folder = os.path.join(DATASET_SAVE_PATH,task[7:])
1157
+
1158
+ #New data folder
1159
+ mimic3_benchmark_new_data_folder = os.path.join(DATASET_SAVE_PATH,task)
1160
+
1161
+
1162
+ for subfolder in ["train","test","val"]:
1163
+ print("checking subfolder",subfolder)
1164
+
1165
+ #Chargement des fichiers mimic3 pour modification
1166
+ if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,task,subfolder+"_listfile.pkl")):
1167
+ if not loaded:
1168
+ if mimic3_dir == None:
1169
+ mimic3_dir = input("preprocessing has to be done, please enter mimic3's path : ")
1170
+ if not os.path.isdir(mimic3_dir):
1171
+ print("Could not load mimic3 files...")
1172
+ exit(1)
1173
+ print("this task does not exist yet... loading required files to create the task. this may take 20 minutes")
1174
+ stays,inputevents,procedurevents,diagnoses,insurances = load_mimic3_files(mimic3_dir)
1175
+ loaded = True
1176
+ print("creating the subfolder",subfolder,"| estimated time : 1h")
1177
+ do_listfile(task, subfolder, mimic3_benchmark_data_folder, mimic3_benchmark_new_data_folder, stays, inputevents, procedurevents, diagnoses, insurances)
1178
+ if not os.path.isfile("icd_dict.csv"):
1179
+ if mimic3_dir == None:
1180
+ mimic3_dir = input("preprocessing has to be done, please enter mimic3's path : ")
1181
+ if not os.path.isdir(mimic3_dir):
1182
+ print("Could not load mimic3 files...")
1183
+ exit(1)
1184
+ print("loading data and creating dicts...")
1185
+ load_mimic3_files(mimic3_dir)
1186
+
1187
+
1188
+ ################################################################################
1189
+ ################################################################################
1190
+ ## ##
1191
+ ## HUGGING FACE DATASET ##
1192
+ ## ##
1193
+ ################################################################################
1194
+ ################################################################################
1195
+
1196
+
1197
+ class Mimic3DatasetConfig(datasets.BuilderConfig):
1198
+ def __init__(self, **kwargs):
1199
+ super().__init__(**kwargs)
1200
+
1201
+ class Mimic3Benchmark_Dataset(datasets.GeneratorBasedBuilder):
1202
+ def __init__(self, **kwargs):
1203
+ self.code_to_onehot=kwargs.pop("code_to_onehot",True)
1204
+ self.episode_filter=kwargs.pop("episode_filter",None)
1205
+ self.mode=kwargs.pop("mode","statistics")
1206
+ self.window_period_length=kwargs.pop("window_period_length",48.0)
1207
+ self.window_size=kwargs.pop("window_size",0.7)
1208
+ self.empty_value=kwargs.pop("empty_value",np.nan)
1209
+ self.input_strategy=kwargs.pop("input_strategy",None)
1210
+ self.add_mask_columns=kwargs.pop("add_mask_columns",False)
1211
+ self.statistics_mode_column_scale=kwargs.pop("statistics_mode_column_scale",True)
1212
+ self.mimic3_path=kwargs.pop("mimic3_path",None)
1213
+
1214
+ self.mimic4_text_demos = kwargs.pop("mimic4_text_demos",True)
1215
+ self.mimic4_text_charts = kwargs.pop("mimic4_text_charts",True)
1216
+ self.mimic4_text_meds = kwargs.pop("mimic4_text_meds",True)
1217
+ self.mimic4_text_cond = kwargs.pop("mimic4_text_cond",True)
1218
+ self.mimic4_text_procs = kwargs.pop("mimic4_text_procs",True)
1219
+
1220
+ self.full_meds_loaded = False
1221
+ self.full_proc_loaded = False
1222
+ self.full_cond_loaded = False
1223
+
1224
+ self.full_gens_loaded = False
1225
+ self.full_ages_loaded = False
1226
+ self.full_eths_loaded = False
1227
+ self.full_ins_loaded = False
1228
+
1229
+ super().__init__(**kwargs)
1230
+
1231
+ VERSION = datasets.Version("1.0.0")
1232
+
1233
+ BUILDER_CONFIGS = [
1234
+ Mimic3DatasetConfig(name="in-hospital-mortality", version=VERSION, description="This datasets covers the in-hospital-mortality benchmark of mimiciii-benchmark"),
1235
+ Mimic3DatasetConfig(name="decompensation", version=VERSION, description="This datasets covers the decompensation benchmark of mimiciii-benchmark"),
1236
+ Mimic3DatasetConfig(name="length-of-stay", version=VERSION, description="This datasets covers the length-of-stay benchmark of mimiciii-benchmark"),
1237
+ Mimic3DatasetConfig(name="multitask", version=VERSION, description="This datasets covers the multitask benchmark of mimiciii-benchmark"),
1238
+ Mimic3DatasetConfig(name="phenotyping", version=VERSION, description="This datasets covers the in phenotyping benchmark of mimiciii-benchmark"),
1239
+ Mimic3DatasetConfig(name="mimic4-in-hospital-mortality", version=VERSION, description="This datasets covers the mimic4-in-hospital-mortality benchmark of mimiciii-benchmark"),
1240
+ ]
1241
+
1242
+ def _info(self):
1243
+ if self.config.name in ["in-hospital-mortality", "decompensation", "phenotyping", "mimic4-in-hospital-mortality", "length-of-stay"]:
1244
+
1245
+
1246
+
1247
+ if self.config.name == "phenotyping":
1248
+ return datasets.DatasetInfo(
1249
+ description="Dataset "+self.config.name,
1250
+ features=datasets.Features(
1251
+ {
1252
+ "Acute and unspecified renal failure": datasets.Value("float"),
1253
+ "Acute cerebrovascular disease": datasets.Value("float"),
1254
+ "Acute myocardial infarction": datasets.Value("float"),
1255
+ "Cardiac dysrhythmias": datasets.Value("float"),
1256
+ "Chronic kidney disease": datasets.Value("float"),
1257
+ "Chronic obstructive pulmonary disease and bronchiectasis": datasets.Value("float"),
1258
+ "Complications of surgical procedures or medical care": datasets.Value("float"),
1259
+ "Conduction disorders": datasets.Value("float"),
1260
+ "Congestive heart failure; nonhypertensive": datasets.Value("float"),
1261
+ "Coronary atherosclerosis and other heart disease": datasets.Value("float"),
1262
+ "Diabetes mellitus with complications": datasets.Value("float"),
1263
+ "Diabetes mellitus without complication": datasets.Value("float"),
1264
+ "Disorders of lipid metabolism": datasets.Value("float"),
1265
+ "Essential hypertension": datasets.Value("float"),
1266
+ "Fluid and electrolyte disorders": datasets.Value("float"),
1267
+ "Gastrointestinal hemorrhage": datasets.Value("float"),
1268
+ "Hypertension with complications and secondary hypertension": datasets.Value("float"),
1269
+ "Other liver diseases": datasets.Value("float"),
1270
+ "Other lower respiratory disease": datasets.Value("float"),
1271
+ "Other upper respiratory disease": datasets.Value("float"),
1272
+ "Pleurisy; pneumothorax; pulmonary collapse": datasets.Value("float"),
1273
+ "Pneumonia (except that caused by tuberculosis or sexually transmitted disease)": datasets.Value("float"),
1274
+ "Respiratory failure; insufficiency; arrest (adult)": datasets.Value("float"),
1275
+ "Septicemia (except in labor)": datasets.Value("float"),
1276
+ "Shock": datasets.Value("float"),
1277
+ "episode": datasets.Array2D(shape=(None,None), dtype=float)
1278
+ }),
1279
+ homepage="",
1280
+ license="",
1281
+ citation="",
1282
+ )
1283
+ elif self.config.name == "mimic4-in-hospital-mortality" and self.mode in ["mimic4-aggreg"]:
1284
+ return datasets.DatasetInfo(
1285
+ description="Dataset "+self.config.name,
1286
+ features = datasets.Features(
1287
+ {
1288
+ "label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
1289
+ "features" : datasets.Sequence(datasets.Value("float32")),
1290
+ "columns": datasets.Squence(datasets.value("string"))
1291
+ }
1292
+ ),
1293
+ homepage="",
1294
+ license="",
1295
+ citation="",)
1296
+ elif self.config.name == "mimic4-in-hospital-mortality" and self.mode == "mimic4-naive-prompt":
1297
+ return datasets.DatasetInfo(
1298
+ description="Dataset "+self.config.name,
1299
+ features = datasets.Features(
1300
+ {
1301
+ "label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
1302
+ "features" : datasets.Value(dtype='string', id=None),
1303
+ }
1304
+ ),
1305
+ homepage="",
1306
+ license="",
1307
+ citation="",)
1308
+ elif self.config.name == "mimic4-in-hospital-mortality" and self.mode == "mimic4-tensor":
1309
+ return datasets.DatasetInfo(
1310
+ description="Dataset "+self.config.name,
1311
+ features = datasets.Features(
1312
+ {
1313
+ "label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
1314
+ "DEMO": datasets.Sequence(datasets.Value("int64")),
1315
+ "COND" : datasets.Sequence(datasets.Value("int64")),
1316
+ "MEDS" : datasets.Array2D(shape=(None, None), dtype='int64') ,
1317
+ "PROC" : datasets.Array2D(shape=(None, None), dtype='int64') ,
1318
+ "CHART/LAB" : datasets.Array2D(shape=(None, None), dtype='int64')
1319
+ }
1320
+ ),
1321
+ homepage="",
1322
+ license="",
1323
+ citation="",)
1324
+ return datasets.DatasetInfo(
1325
+ description="Dataset "+self.config.name,
1326
+ features=datasets.Features(
1327
+ {
1328
+ "y_true": datasets.Value("float"),
1329
+ "episode": datasets.Array2D(shape=(None,None), dtype=float)
1330
+ }),
1331
+ homepage="",
1332
+ license="",
1333
+ citation="",
1334
+ )
1335
+
1336
+ def _split_generators(self, dl_manager):
1337
+ self.path = os.path.join(DATASET_SAVE_PATH,self.config.name)
1338
+ preprocess(self.config.name,self.mimic3_path)
1339
+ if "mimic4" in self.config.name:
1340
+ return [
1341
+ datasets.SplitGenerator(
1342
+ name=datasets.Split.TRAIN,
1343
+ gen_kwargs={
1344
+ "filepath":os.path.join(self.path,"train_listfile.pkl"),
1345
+ "split": "train",
1346
+ },
1347
+ ),
1348
+ datasets.SplitGenerator(
1349
+ name=datasets.Split.VALIDATION,
1350
+ gen_kwargs={
1351
+ "filepath":os.path.join(self.path,"val_listfile.pkl"),
1352
+ "split": "validation",
1353
+ },
1354
+ ),
1355
+ datasets.SplitGenerator(
1356
+ name=datasets.Split.TEST,
1357
+ gen_kwargs={
1358
+ "filepath":os.path.join(self.path,"test_listfile.pkl"),
1359
+ "split": "test"
1360
+ },
1361
+ ),
1362
+ ]
1363
+ return [
1364
+ datasets.SplitGenerator(
1365
+ name=datasets.Split.TRAIN,
1366
+ gen_kwargs={
1367
+ "filepath":os.path.join(self.path,"train_listfile.csv"),
1368
+ "split": "train",
1369
+ },
1370
+ ),
1371
+ datasets.SplitGenerator(
1372
+ name=datasets.Split.VALIDATION,
1373
+ gen_kwargs={
1374
+ "filepath":os.path.join(self.path,"val_listfile.csv"),
1375
+ "split": "validation",
1376
+ },
1377
+ ),
1378
+ datasets.SplitGenerator(
1379
+ name=datasets.Split.TEST,
1380
+ gen_kwargs={
1381
+ "filepath":os.path.join(self.path,"test_listfile.csv"),
1382
+ "split": "test"
1383
+ },
1384
+ ),
1385
+ ]
1386
+
1387
+ def _generate_exemples_CHARTONLY(self, filepath):
1388
+ key = 0
1389
+ with open(filepath, encoding="utf-8") as f:
1390
+ reader1 = csv.DictReader(f)
1391
+ for data in reader1:
1392
+
1393
+ y_trues = {}
1394
+
1395
+ for e in data:
1396
+ if e != "period_length" and e != "stay":
1397
+ y_trues[e] = data[e]
1398
+
1399
+ if "period_length" in data:
1400
+ period_length = float(data["period_length"])
1401
+ else:
1402
+ period_length = self.window_period_length
1403
+ stay = data["stay"]
1404
+
1405
+ if os.path.isfile(os.path.join(self.path,"test",stay)):
1406
+ stay = os.path.join(self.path,"test",stay)
1407
+ else:
1408
+ stay = os.path.join(self.path,"train",stay)
1409
+
1410
+ # stay = self.path+"/train/30820_episode1_timeseries.csv"
1411
+ # period_length = 42.0
1412
+ episode = {
1413
+ "Hours": [],
1414
+ "Capillary refill rate": [],
1415
+ "Diastolic blood pressure": [],
1416
+ "Fraction inspired oxygen": [],
1417
+ "Glascow coma scale eye opening": [],
1418
+ "Glascow coma scale motor response": [],
1419
+ "Glascow coma scale total": [],
1420
+ "Glascow coma scale verbal response": [],
1421
+ "Glucose": [],
1422
+ "Heart Rate": [],
1423
+ "Height": [],
1424
+ "Mean blood pressure": [],
1425
+ "Oxygen saturation": [],
1426
+ "Respiratory rate": [],
1427
+ "Systolic blood pressure": [],
1428
+ "Temperature": [],
1429
+ "Weight": [],
1430
+ "pH": [],
1431
+ }
1432
+ with open(stay, encoding="utf-8") as f2:
1433
+ reader2 = csv.DictReader(f2)
1434
+ for data2 in reader2:
1435
+ if self.config.name in ["length-of-stay","decompensation"] and float(data2["Hours"]) > period_length + 1e-6:
1436
+ break
1437
+
1438
+ episode["Hours"].append(float(data2["Hours"]) if data2["Hours"] else 0.0)
1439
+ episode["Capillary refill rate"].append(float(data2["Capillary refill rate"]) if data2["Capillary refill rate"] else np.nan)
1440
+ episode["Diastolic blood pressure"].append(float(data2["Diastolic blood pressure"]) if data2["Diastolic blood pressure"] else np.nan)
1441
+ episode["Fraction inspired oxygen"].append(float(data2["Fraction inspired oxygen"]) if data2["Fraction inspired oxygen"] else np.nan)
1442
+ episode["Glascow coma scale eye opening"].append(data2["Glascow coma scale eye opening"])
1443
+ episode["Glascow coma scale motor response"].append(data2["Glascow coma scale motor response"])
1444
+ episode["Glascow coma scale total"].append(float(data2["Glascow coma scale total"]) if data2["Glascow coma scale total"] else np.nan)
1445
+ episode["Glascow coma scale verbal response"].append(data2["Glascow coma scale verbal response"])
1446
+ episode["Glucose"].append(float(data2["Glucose"]) if data2["Glucose"] else np.nan)
1447
+ episode["Heart Rate"].append(float(data2["Heart Rate"]) if data2["Heart Rate"] else np.nan)
1448
+ episode["Height"].append(float(data2["Height"]) if data2["Height"] else np.nan)
1449
+ episode["Mean blood pressure"].append(float(data2["Mean blood pressure"]) if data2["Mean blood pressure"] else np.nan)
1450
+ episode["Oxygen saturation"].append(float(data2["Oxygen saturation"]) if data2["Oxygen saturation"] else np.nan)
1451
+ episode["Respiratory rate"].append(float(data2["Respiratory rate"]) if data2["Respiratory rate"] else np.nan)
1452
+ episode["Systolic blood pressure"].append(float(data2["Systolic blood pressure"]) if data2["Systolic blood pressure"] else np.nan)
1453
+ episode["Temperature"].append(float(data2["Temperature"]) if data2["Temperature"] else np.nan)
1454
+ episode["Weight"].append(float(data2["Weight"]) if data2["Weight"] else np.nan)
1455
+ episode["pH"].append(float(data2["pH"]) if data2["pH"] else np.nan)
1456
+
1457
+ X,Y = preprocess_to_learn(
1458
+ {
1459
+ "episode":episode
1460
+ },
1461
+ code_to_onehot=self.code_to_onehot,
1462
+ episode_filter=self.episode_filter,
1463
+ mode=self.mode,
1464
+ window_size=self.window_size,
1465
+ empty_value=self.empty_value,
1466
+ input_strategy=self.input_strategy,
1467
+ add_mask_columns=self.add_mask_columns,
1468
+ statistics_mode_column_scale=self.statistics_mode_column_scale,
1469
+ window_period_length=period_length
1470
+ )
1471
+ # print(np.around(X.flatten(),4).tolist())
1472
+ # exit(0)
1473
+ y_trues["episode"] = X
1474
+ yield key, y_trues
1475
+ key += 1
1476
+
1477
+ ##################################################################################################################################################
1478
+ #### GENERATION D'EXEMPLES COMPLETS MODE TENSOR (CHARTS + INPUTEVENTS + DIAGNOSES) ##### DE THOURIA ##############################################
1479
+ ##################################################################################################################################################
1480
+
1481
+ def load_vocab(self):
1482
+ if self.full_gens_loaded == False:
1483
+ self.full_gens = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"genders_dict.csv"))["GENDER"].tolist()
1484
+ self.full_gens_loaded = True
1485
+ self.full_gens_len = len(self.full_gens)
1486
+ self.full_gens_reverse = {k: v for v, k in enumerate(self.full_gens)}
1487
+
1488
+ if self.full_eths_loaded == False:
1489
+ self.full_eths = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"ethnicities_dict.csv"))["ETHNICITY"].tolist()
1490
+ self.full_eths_loaded = True
1491
+ self.full_eths_len = len(self.full_eths)
1492
+ self.full_eths_reverse = {k: v for v, k in enumerate(self.full_eths)}
1493
+
1494
+ if self.full_ins_loaded == False:
1495
+ self.full_ins = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"insurances_dict.csv"))["INSURANCE"].tolist()
1496
+ self.full_ins_loaded = True
1497
+ self.full_ins_len = len(self.full_ins)
1498
+ self.full_ins_reverse = {k: v for v, k in enumerate(self.full_ins)}
1499
+
1500
+ if self.full_cond_loaded == False:
1501
+ self.full_cond = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"icd_dict.csv"),names=["COND","SHORT","LONG"],skiprows=1)
1502
+ self.full_cond_loaded = True
1503
+ self.full_cond_len = len(self.full_cond)
1504
+
1505
+ if self.full_proc_loaded == False:
1506
+ self.full_proc = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"pe_itemid_dict.csv"),names=["PROC","SHORT","LONG"],skiprows=1)
1507
+ self.full_proc_loaded = True
1508
+ self.full_proc_len = len(self.full_proc["PROC"])
1509
+
1510
+ if self.full_meds_loaded == False:
1511
+ self.full_meds = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"ie_itemid_dict.csv"),names=["MEDS","LONG","SHORT"],skiprows=1)
1512
+ self.full_meds_loaded = True
1513
+ self.full_meds_len = len(self.full_meds["MEDS"])
1514
+
1515
+ if self.full_ages_loaded == False:
1516
+ self.full_ages = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"ages_dict.csv"),names=["AGE","INDEX"],skiprows=1)["AGE"]
1517
+ self.full_ages_loaded = True
1518
+ self.full_ages_len = len(self.full_ages)
1519
+ self.full_ages_reverse = {k: v for v, k in enumerate(self.full_ages)}
1520
+ self.chartDic = pd.DataFrame({"CHART":["Capillary refill rate","Diastolic blood pressure","Fraction inspired oxygen","Glascow coma scale eye opening","Glascow coma scale motor response","Glascow coma scale total","Glascow coma scale verbal response","Glucose","Heart Rate","Height","Mean blood pressure","Oxygen saturation","Respiratory rate","Systolic blood pressure","Temperature","Weight","pH"]})
1521
+
1522
+ def generate_deep(self,data):
1523
+ dyn,cond_df,demo=self.concat_data(data)
1524
+ charts = dyn['CHART'].values
1525
+
1526
+ meds = dyn['MEDS'].values
1527
+
1528
+ proc = dyn['PROC'].values
1529
+
1530
+ stat = cond_df.values[0]
1531
+
1532
+ y = int(demo['label'])
1533
+ demo["gender"].replace(self.full_gens_reverse, inplace=True)
1534
+ demo["ethnicity"].replace(self.full_eths_reverse, inplace=True)
1535
+ demo["insurance"].replace(self.full_ins_reverse, inplace=True)
1536
+ demo["Age"] = demo["Age"].round()
1537
+ demo["insurance"].replace(self.full_ages_reverse, inplace=True)
1538
+
1539
+ demo = demo[["gender","ethnicity","insurance","Age"]].values[0]
1540
+ return stat, demo, meds, charts, proc, y
1541
+
1542
+
1543
+ def _generate_examples_deep(self, filepath):
1544
+
1545
+ self.load_vocab()
1546
+
1547
+ with open(filepath, 'rb') as fp:
1548
+ dico = pickle.load(fp)
1549
+
1550
+ for key, data in enumerate(dico):
1551
+ stat, demo, meds, chart, proc, y = self.generate_deep(data)
1552
+ yielded = {
1553
+ 'label': y,
1554
+ 'DEMO': demo,
1555
+ 'COND': stat,
1556
+ 'MEDS': meds,
1557
+ 'PROC': proc,
1558
+ 'CHART/LAB': chart,
1559
+ }
1560
+ yield int(key), yielded
1561
+
1562
+ ##################################################################################################################################################
1563
+ #### GENERATION D'EXEMPLES COMPLETS MODE CONCAT/AGGREG (CHARTS + INPUTEVENTS + DIAGNOSES) ##### DE THOURIA #######################################
1564
+ ##################################################################################################################################################
1565
+
1566
+ def concat_data(self,data):
1567
+ meds = data['Med']
1568
+ proc = data['Proc']
1569
+ chart = codes_to_int(input_values(data['Chart']))
1570
+ cond = data['Cond']['fids']
1571
+
1572
+ cond_df,proc_df,chart_df,meds_df=pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),pd.DataFrame()
1573
+
1574
+ #demographic
1575
+ demo=pd.DataFrame(columns=['Age','gender','ethnicity','label','insurance'])
1576
+ new_row = {'Age': data['age'], 'gender': data['gender'], 'ethnicity': data['ethnicity'], 'label': data['label'], 'insurance': data['insurance']}
1577
+ demo = demo.append(new_row, ignore_index=True)
1578
+
1579
+ ##########COND#########
1580
+ #get all conds
1581
+
1582
+ features=pd.DataFrame(np.zeros([1,len(self.full_cond)]),columns=self.full_cond['COND'])
1583
+
1584
+ #onehot encode
1585
+
1586
+ cond_df = pd.DataFrame(cond,columns=['COND'])
1587
+ cond_df['val'] = 1
1588
+ cond_df = (cond_df.drop_duplicates()).pivot(columns='COND',values='val').reset_index(drop=True)
1589
+ cond_df = cond_df.fillna(0)
1590
+ oneh = cond_df.sum().to_frame().T
1591
+ combined_df = pd.concat([features,oneh],ignore_index=True).fillna(0)
1592
+ combined_oneh = combined_df.sum().to_frame().T
1593
+ cond_df = combined_oneh
1594
+ for c in cond_df.columns :
1595
+ if c not in features:
1596
+ cond_df = cond_df.drop(columns=[c])
1597
+
1598
+ ##########PROC#########
1599
+
1600
+
1601
+ feat=proc.keys()
1602
+ proc_val=[proc[key] for key in feat]
1603
+ procedures=pd.DataFrame(self.full_proc["PROC"],columns=['PROC'])
1604
+ features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC'])
1605
+ features.columns=pd.MultiIndex.from_product([["PROC"], features.columns])
1606
+ procs=pd.DataFrame(columns=feat)
1607
+ for p,v in zip(feat,proc_val):
1608
+ procs[p]=v
1609
+ procs.columns=pd.MultiIndex.from_product([["PROC"], procs.columns])
1610
+ proc_df = pd.concat([features,procs],ignore_index=True).fillna(0)
1611
+
1612
+ ##########CHART#########
1613
+
1614
+
1615
+
1616
+ feat=chart.keys()
1617
+ chart_val=[chart[key] for key in feat]
1618
+ charts=pd.DataFrame(self.chartDic,columns=['CHART'])
1619
+ features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART'])
1620
+ features.columns=pd.MultiIndex.from_product([["CHART"], features.columns])
1621
+
1622
+ chart=pd.DataFrame(columns=feat)
1623
+ for c,v in zip(feat,chart_val):
1624
+ chart[c]=v
1625
+ chart.columns=pd.MultiIndex.from_product([["CHART"], chart.columns])
1626
+ chart_df = pd.concat([features,chart],ignore_index=True).fillna(0)
1627
+
1628
+ ###MEDS
1629
+
1630
+ feat=[str(x) for x in meds.keys()]
1631
+ med_val=[meds[int(key)] for key in feat]
1632
+ meds=[str(x) for x in self.full_meds["MEDS"]]
1633
+ features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds)
1634
+ features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns])
1635
+ med=pd.DataFrame(columns=feat)
1636
+ for m,v in zip(feat,med_val):
1637
+ med[m]=v
1638
+ med.columns=pd.MultiIndex.from_product([["MEDS"], med.columns])
1639
+
1640
+ meds_df = pd.concat([features,med],ignore_index=True).fillna(0)
1641
+ dyn_df = pd.concat([meds_df,proc_df,chart_df], axis=1)
1642
+ return dyn_df,cond_df,demo
1643
+
1644
+ def _generate_ml(self,dyn,stat,demo,concat_cols,concat):
1645
+ X_df=pd.DataFrame()
1646
+ if concat:
1647
+ dyna=dyn.copy()
1648
+ dyna.columns=dyna.columns.droplevel(0)
1649
+ dyna=dyna.to_numpy()
1650
+ dyna=np.nan_to_num(dyna, copy=False)
1651
+ dyna=dyna.reshape(1,-1)
1652
+ #dyn_df=pd.DataFrame(data=dyna,columns=concat_cols)
1653
+ dyn_df=pd.DataFrame(data=dyna)
1654
+ else:
1655
+ dyn_df=pd.DataFrame()
1656
+ for key in dyn.columns.levels[0]:
1657
+ dyn_temp=dyn[key]
1658
+ if ((key=="CHART") or (key=="MEDS")):
1659
+ agg=dyn_temp.aggregate("mean")
1660
+ agg=agg.reset_index()
1661
+ else:
1662
+ agg=dyn_temp.aggregate("max")
1663
+ agg=agg.reset_index()
1664
+
1665
+ if dyn_df.empty:
1666
+ dyn_df=agg
1667
+ else:
1668
+ dyn_df=pd.concat([dyn_df,agg],axis=0)
1669
+ dyn_df=dyn_df.T
1670
+ dyn_df.columns = dyn_df.iloc[0]
1671
+ dyn_df=dyn_df.iloc[1:,:]
1672
+
1673
+ X_df=pd.concat([dyn_df,stat],axis=1)
1674
+ X_df=pd.concat([X_df,demo],axis=1)
1675
+ return X_df
1676
+
1677
+
1678
+ def _generate_examples_encoded(self, filepath, concat):
1679
+ self.load_vocab()
1680
+
1681
+ gen_encoder,eth_encoder,ins_encoder = LabelEncoder(),LabelEncoder(),LabelEncoder()
1682
+
1683
+ gen_encoder.fit(self.full_gens)
1684
+ eth_encoder.fit(self.full_eths)
1685
+ ins_encoder.fit(self.full_ins)
1686
+ with open(filepath, 'rb') as fp:
1687
+ dico = pickle.load(fp)
1688
+ df = pd.DataFrame(dico)
1689
+
1690
+ for i, data in df.iterrows():
1691
+ concat_cols=[]
1692
+ dyn_df,cond_df,demo=self.concat_data(data)
1693
+ dyn=dyn_df.copy()
1694
+ dyn.columns=dyn.columns.droplevel(0)
1695
+ cols=dyn.columns
1696
+ time=dyn.shape[0]
1697
+ # for t in range(time):
1698
+ # cols_t = [str(x) + "_"+str(t) for x in cols]
1699
+ # concat_cols.extend(cols_t)
1700
+ demo['gender']=gen_encoder.transform(demo['gender'])
1701
+ demo['ethnicity']=eth_encoder.transform(demo['ethnicity'])
1702
+ demo['insurance']=ins_encoder.transform(demo['insurance'])
1703
+ label = data['label']
1704
+ demo = demo.drop(['label'],axis=1)
1705
+ X = self._generate_ml(dyn = dyn_df, stat = cond_df, demo = demo, concat_cols = concat_cols, concat = concat)
1706
+ columns = X.columns
1707
+ X = X.values.tolist()[0]
1708
+
1709
+ yield int(i), {
1710
+ "label": label,
1711
+ "features": X,
1712
+ "columns":columns
1713
+ }
1714
+
1715
+ def _generate_examples_text(self, filepath):
1716
+ self.load_vocab()
1717
+
1718
+ with open(filepath, 'rb') as fp:
1719
+ dico = pickle.load(fp)
1720
+
1721
+ for i, data in enumerate(dico):
1722
+
1723
+
1724
+
1725
+
1726
+
1727
+ #adding demos informations
1728
+ age = str(round(data['age']))
1729
+ gender = str(data['gender'])
1730
+ if gender == "M":
1731
+ gender = "male"
1732
+ elif gender == "F":
1733
+ gender = "female"
1734
+ ethnicity = str(data['ethnicity'])
1735
+ insurance = str(data['insurance'])
1736
+ X = ""
1737
+ if self.mimic4_text_demos or self.mimic4_text_cond:
1738
+ X = "The patient "
1739
+ if self.mimic4_text_demos:
1740
+ if self.mimic4_text_cond:
1741
+ X += "("+ethnicity+" "+gender+", "+age+" years old, covered by "+insurance+") "
1742
+ else:
1743
+ X += "is "+ethnicity+" "+gender+", "+age+" years old, covered by "+insurance+". "
1744
+
1745
+
1746
+ #adding diagnosis
1747
+ if self.mimic4_text_cond:
1748
+ X += "was diagnosed with "
1749
+ cond = data['Cond']['fids']
1750
+ for idx,c in enumerate(cond):
1751
+ X += self.full_cond.loc[self.full_cond["COND"] == str(c)]["LONG"].values[0]+("; " if idx+1 < len(cond) else ". ")
1752
+
1753
+ #removing nan charts and aggregation
1754
+
1755
+ if self.mimic4_text_charts:
1756
+ for x in data["Chart"]:
1757
+ data["Chart"][x] = [xi for xi in data["Chart"][x] if not (xi == "" or (isinstance(xi,float) and np.isnan(xi)))]
1758
+ data["Chart"] = codes_to_int(data["Chart"])
1759
+ chart = {x:round(np.mean([it for it in data['Chart'][x]]),3) for x in data["Chart"] if len(data["Chart"][x]) > 0}
1760
+
1761
+ #specials columns for chartevents
1762
+ for col in ["Glascow coma scale eye opening","Glascow coma scale motor response","Glascow coma scale verbal response"]:
1763
+ if not col in chart:
1764
+ continue
1765
+ chart[col] = int(round(chart[col]))
1766
+ for dtem in discretizer[col]:
1767
+ if dtem[1] == chart[col]:
1768
+ chart[col] = dtem[0][-1]
1769
+ for col in ["Glascow coma scale total"]:
1770
+ if not col in chart:
1771
+ continue
1772
+ chart[col] = int(round(chart[col]))
1773
+
1774
+ X += "The chart events measured were : "
1775
+ for idx,c in enumerate(chart):
1776
+ X += str(chart[c]) + " for " + c + ("; " if (idx+1 < len(chart.keys())) else ". ")
1777
+
1778
+ #medications
1779
+ if self.mimic4_text_meds:
1780
+ meds = data['Med']
1781
+ if len(meds.keys()) != 0:
1782
+ X += "The mean amounts of medications administered during the episode were : "
1783
+ meds = {x:round(np.mean([it for it in meds[x]]),3) for x in meds if len(meds[x]) > 0}
1784
+ for idx,c in enumerate(meds):
1785
+ if meds[c] != 0:
1786
+ short = self.full_meds.loc[self.full_meds["MEDS"] == int(c)]["SHORT"].values[0]
1787
+ long = self.full_meds.loc[self.full_meds["MEDS"] == int(c)]["LONG"].values[0]
1788
+ name = long if (long != "nan" and not (isinstance(long,float) and np.isnan(long))) else short
1789
+ if (name != "nan" and not (isinstance(name,float) and np.isnan(name))):
1790
+ X += str(meds[c]) + " of " + name + ("; " if (idx+1 < len(meds.keys())) else ". ")
1791
+ else:
1792
+ X += "No medication was administered."
1793
+
1794
+ #procedures
1795
+ if self.mimic4_text_procs:
1796
+ proc = data['Proc']
1797
+ if len(proc.keys()) != 0:
1798
+ X += "The procedures performed were: "
1799
+ for idx,c in enumerate(proc):
1800
+ short = self.full_proc.loc[self.full_proc["PROC"] == int(c)]["SHORT"].values[0]
1801
+ long = self.full_proc.loc[self.full_proc["PROC"] == int(c)]["LONG"].values[0]
1802
+ name = long if (long != "nan" and not (isinstance(long,float) and np.isnan(long))) else short
1803
+ if (name != "nan" and not (isinstance(name,float) and np.isnan(name))):
1804
+ X += str(name) + ("; " if (idx+1 < len(meds.keys())) else ". ")
1805
+ else:
1806
+ X += "No procedure was performed."
1807
+ yield int(i), {
1808
+ "label": data['label'],
1809
+ "features": X,
1810
+ }
1811
+
1812
+
1813
+ #### GENERATION D'EXEMPLES ###############################################################
1814
+
1815
+ def _generate_examples(self, filepath, split):
1816
+ if "mimic4" in self.config.name:
1817
+ if self.mode == "mimic4-aggreg":
1818
+ yield from self._generate_examples_encoded(filepath,False)
1819
+ elif self.mode == "mimic4-tensor":
1820
+ yield from self._generate_examples_deep(filepath)
1821
+ elif self.mode == "mimic4-naive-prompt":
1822
+ yield from self._generate_examples_text(filepath)
1823
+ else:
1824
+ yield from self._generate_exemples_CHARTONLY(filepath)