mimic3-benchmarks-irit / mimic3-benchmarks-irit.py
Liouss's picture
Upload mimic3-benchmarks-irit.py
2873ac9
import csv
import os
import datasets
import numpy as np
from datetime import datetime
import pandas as pd
from datasets import IterableDataset
from scipy.stats import skew
import sys
import pickle
from sklearn.preprocessing import LabelEncoder
DATASET_SAVE_PATH = os.path.join(os.path.expanduser('~'),"mimic3_dataset")
os.makedirs(DATASET_SAVE_PATH,exist_ok=True)
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(suppress=True)
###################################
# SOME UTILS #
###################################
def get_progression(current,total,length=20,filled_str="=",empty_str="-"):
nb = round(length*current/total)
return "["+(nb*filled_str)+((length-nb)*empty_str)+"]"
def is_empty_value(value,empty_value):
"""
Returns if value is an empty value (for exemple np.nan if empty_value is np.nan)
value must not be a list
"""
return (isinstance(value,float) and np.isnan(empty_value) and np.isnan(value)) or ((type(value) != list) and (value == empty_value))
def is_empty_list(l,empty_value):
"""
Returns if list is filled only with empty values (for exemple empty_value==np.nan and empty_value==[np.nan,np.nan])
value must be a list
"""
if isinstance(l,float) or isinstance(l,str) or isinstance(l,int):
return False
for elem in l:
if not is_empty_value(elem,empty_value):
return False
return True
def dtc(x):
"""
string to datetime
"""
return datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
def bic(x):
"""
string to int
"""
try:
return (-1 if x == "" else int(x))
except:
print("error",x)
return -1
def bfc(x):
"""
string to float
"""
try:
return (-1 if x == "" else float(x))
except:
print("error",x)
return -1
def id_to_string(id):
"""
id (string or float) to float
"""
if (isinstance(id,float) and np.isnan(id)) or not id or id == "":
return id
try:
return str(int(float(id)))
except:
return str(id)
################################################################################
################################################################################
## ##
## DATASET TO NUMPY ARRAY ##
## ##
################################################################################
################################################################################
###################################
# ABOUT DATA NORMALIZATION #
###################################
def calculate_normalization(iterator):
"""
calculates means and stds over every columns of every episode given by iterator\n
"""
nb = 0
sum_x = None
sum_x_sq = None
#feeding data
for batch in iterator:
x = np.array(batch[0])
nb += x.shape[0]*x.shape[1]
if sum_x is None:
sum_x = np.sum(x, axis=(0,1))
sum_x_sq = np.sum(x**2, axis=(0,1))
else:
sum_x += np.sum(x, axis=(0,1))
sum_x_sq += np.sum(x**2, axis=(0,1))
#Computing mean
means = (1.0 / nb) * sum_x
eps = 1e-7
#Computing stds
stds = np.sqrt((1.0/(nb - 1)) * (sum_x_sq - (2.0 * sum_x * means) + (nb * means**2)))
stds[stds < eps] = eps
return means,stds
def normalize(X, means, stds, columns=[]):
"""
normalizes X with means and stds. Columns is the list of columns you want to normalize. if no columns given everything is normalized\n
"""
ret = 1.0 * X
if len(columns) > 0:
for col in columns:
ret[:,:,col] = (X[:,:,col] - means[col]) / stds[col]
else:
for col in range(X.shape[2]):
ret[:,:,col] = (X[:,:,col] - means[col]) / stds[col]
return ret
def try_load_normalizer(path, nb_columns):
"""
Tries to load means and stds from saved file.\n
If files (path) doesn't exist returns empty means and stds lists
nb_columns is the number of columns in the dataset (not the number of columns you load)
"""
means,stds = np.zeros(nb_columns),np.ones(nb_columns)
if not os.path.isfile(path):
return [],[]
with open(path, newline='') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=',')
for row in spamreader:
means[int(row["column"])] = float(row["mean"])
stds[int(row["column"])] = float(row["std"])
return means,stds
###################################
# THE DICTIONARIES / CONSTANTS #
###################################
#The default values for some columns
normal_values = {
"Capillary refill rate": 0.0,
"Diastolic blood pressure": 59.0,
"Fraction inspired oxygen": 0.21,
"Glascow coma scale eye opening": "4 Spontaneously",
"Glascow coma scale motor response": "6 Obeys Commands",
"Glascow coma scale total": "15.0",
"Glascow coma scale verbal response": "5 Oriented",
"Glucose": 128.0,
"Heart Rate": 86,
"Height": 170.0,
"Mean blood pressure": 77.0,
"Oxygen saturation": 98.0,
"Respiratory rate": 19,
"Systolic blood pressure": 118.0,
"Temperature": 36.6,
"Weight": 81.0,
"pH": 7.4
}
#Dictionary to transform some string values in columns to integers or indexes
discretizer = {
"Glascow coma scale eye opening": [
(["None"],0),
(["1 No Response"],1),
(["2 To pain","To Pain"],2),
(["3 To speech","To Speech"],3),
(["4 Spontaneously","Spontaneously"],4),
],
"Glascow coma scale motor response": [
(["1 No Response","No response"],1),
(["2 Abnorm extensn","Abnormal extension"],2),
(["3 Abnorm flexion","Abnormal Flexion"],3),
(["4 Flex-withdraws","Flex-withdraws"],4),
(["5 Localizes Pain","Localizes Pain"],5),
(["6 Obeys Commands","Obeys Commands"],6),
],
"Glascow coma scale total": [
(["3.0"],3),
(["4.0"],4),
(["5.0"],5),
(["6.0"],6),
(["7.0"],7),
(["8.0"],8),
(["9.0"],9),
(["10.0"],10),
(["11.0"],11),
(["12.0"],12),
(["13.0"],13),
(["14.0"],14),
(["15.0"],15),
],
"Glascow coma scale verbal response": [
(["1 No Response","No Response-ETT","1.0 ET/Trach","No Response"],1),
(["2 Incomp sounds","Incomprehensible sounds"],2),
(["3 Inapprop words","Inappropriate Words"],3),
(["4 Confused","Confused"],4),
(["5 Oriented","Oriented"],5),
]
}
#The loaded files dictionaries
itemiddict = {}
######################################################################
# NORMALIZATION TYPE "WINDOW" WITH AMOUNT/RATE PROBLEM #
######################################################################
def normalize_onehot_episodes_window(row, code_column="", value_column=False, period_length=48.0, window_size=1e-1):
"""
returns a dict which keys are the items of code_column, and values lists representing the sliding window over period_length of size window_size
made for hot encodings
"""
N_bins = int(period_length / window_size + 1.0 - 0.000001)
returned_rates = {}
for idx,starttime in enumerate(row["STARTTIME"]):
if not pd.isnull(row["ENDTIME"][idx]) and row["ENDTIME"][idx] != None and row["ENDTIME"][idx] != "":
endtime = row["ENDTIME"][idx]
isRate = True
else:
endtime = starttime
isRate = False
code = row[code_column][idx]
if code == "" or (isinstance(code,float) and np.isnan(code)) or pd.isnull(code):
continue
first_bin_id = int(starttime / window_size - 0.000001)
last_bin_id = min(N_bins-1,int(endtime / window_size - 0.000001))
val = 1
if value_column:
val = row["RATE"][idx]*60 if isRate else row["AMOUNT"][idx]*60
#If code not in dict we add an array of size N_bins containing zeros
if not code in returned_rates:
returned_rates[code] = [0]*N_bins
#We add the current value to the good timestamp in the rates array
for bin_id in range(first_bin_id,last_bin_id+1):
returned_rates[code][bin_id] += val
return returned_rates
#######################################
# NORMALIZATION TYPE "WINDOW" #
#######################################
def normalize_episodes_window(row, period_length=48.0, window_size=1e-1):
"""
returns a window for the first period_length hours with window_size hours
values in the dict "row" must not be lists
"""
#Getting types in every columns
types = {}
for e in row["episode"]:
if isinstance(row["episode"][e][0],float):
types[e] = float
else:
types[e] = str
episode = {}
#Number of rows
N_bins = int(period_length / window_size + 1.0 - 0.000001)
#Building every column with empty values
for e in row["episode"]:
if e != "Hours":
episode[e] = [np.nan]*N_bins
#Filling with avaible data in the episode
for idx,time in enumerate(row["episode"]["Hours"]):
#Calculating row of the current data
bin_id = int(time / window_size - 0.000001)
#Filling for every column
for col in episode:
v = row["episode"][col][idx]
#If data is not empty we add it
if v != "" and not (isinstance(v,float) and np.isnan(v)) and not v == None:
episode[col][bin_id] = v
return episode
#######################################
# NORMALIZATION TYPE "STATISTICS" #
#######################################
def normalize_episodes_statistics(row, column_scale=True,windows = [(0,1),(0,0.10),(0,0.25),(0,0.50),(0.90,1),(0.75,1),(0.50,1)],functions = [(min,"min"), (max,"max"), (np.mean,"mean"), (np.std,"std"), (skew,"skew"), (len,"len")]):
"""
Doing statistics over episode (row["episode"]) and returning array of it
windows is an array containing all the periods to do statistics on (tuples of percentages, ex: (0.5,0.6) means "between 50% and 60% of the episode")\n
functions are the functions to apply to compute statistics\n
column_scale=True means we calculate the percentages between first and last value for every column. False means we calculate the pourcentages between first and last hours in episode.
"""
episode = row["episode"]
returned_episode = {x:[] for _,x in functions}
#First and last hour (we will keep it if column_scale=False)
L = row["episode"]["Hours"][0]
R = row["episode"]["Hours"][-1]
length = R - L
#For every column in episode
for e in episode:
#If column_scale we find first and last hour that has value (!= np.nan)
if column_scale:
Li = 0
Ri = len(row["episode"]["Hours"])-1
while Li < len(row["episode"]["Hours"])-1 and (np.isnan(row["episode"][e][Li]) or row["episode"][e][Li] == ""):
Li += 1
while Ri >= 0 and (np.isnan(row["episode"][e][Ri]) or row["episode"][e][Ri] == ""):
Ri -= 1
if Ri < 0 or Li >= len(row["episode"]["Hours"]):
Li,Ri = 0,0
L = row["episode"]["Hours"][Li]
R = row["episode"]["Hours"][Ri]
length = R - L
#We ignore Hour column
if e == "Hours":
continue
#For every statistics windows
for window in windows:
#We calculate first and last hour for current column
start_index,end_index = window
start_index,end_index = L + start_index*length,L + end_index*length
onepiece = []
#For every value in the column, if is on the window we add it to statistics
for i,x in enumerate(row["episode"][e]):
if not np.isnan(x) and end_index+1e-6 > row["episode"]["Hours"][i] > start_index-1e-6:
onepiece.append(x)
#If there are no values to do statistics on, we return array of np.nan
if len(onepiece) == 0:
for function,fname in functions:
returned_episode[fname].append(np.nan)
#else we compute every functions on the list
else:
for function,fname in functions:
returned_episode[fname].append(function(onepiece))
return returned_episode
#######################################
# SINGLE VALUE TRANSFORMATION #
#######################################
def convert_CODE_to_onehot(itemid, d_path, field):
"""
returns a oneshot encoding for item of itemid
the dict is found in (d_path)
the fields the itemid are in the dict are in columns field
"""
global itemiddict
#If itemiddict doesn't contain the field we load id
if not field in itemiddict:
itemiddict[field] = pd.DataFrame()
for e in d_path:
itemiddict[field] = pd.concat([itemiddict[field],pd.read_csv(e,converters={field:lambda x:str(x)})],ignore_index=True)
itemiddict[field] = itemiddict[field].sort_values(by=field,ignore_index=True).reset_index(drop=True)
#We build the oneshot encoding of size of the field column
length = len(itemiddict[field].index)
one_hot = np.zeros((length))
#Filling the onehot encoding
if itemid != "" and itemid != 0:
idx = itemiddict[field][field].searchsorted(str(itemid))
if idx > 0:
one_hot[idx-1] = 1
return one_hot
def codes_to_onehot(episode):
"""
returns the episode with every not float value as onehot encodings
"""
episode = episode.copy()
#For every column in the episode
for e in episode:
#If the column is in the local discretizer
if e in discretizer:
#Computing size of the onehot encoding
size = 0
for die in discretizer[e]:
size += len(die[0])
#for every value in the column
for i in range(len(episode[e])):
v = episode[e][i]
#If the value we are transforming means something
if (not isinstance(v,float) or not np.isnan(v)) and v != "" and v != 0:
#Transforming the value to onehot encoding
episode[e][i] = np.zeros(size,dtype=int)
index = 0
#Finding the index in the onehot encoding to put 1
for die in discretizer[e]:
for item in die[0]:
if str(v) == item:
episode[e][i][index] = 1
index += 1
#If the value is empty returns a full 0 array
else:
episode[e][i] = np.full(size,fill_value=np.nan)
#Special column that may contain floats but must be converted to onehot encoding
elif e == "Capillary refill rate":
for i in range(len(episode[e])):
v = episode[e][i]
episode[e][i] = np.zeros(2,dtype=int)
if v != "" and float(v) == 1:
episode[e][i][1] = 1
elif v != "" and float(v) == 0:
episode[e][i][0] = 1
return episode
def convert_CODE_to_int(itemid, d_path, field):
"""
returns an int encoding for item of itemid
the dict is found in (d_path)
the fields the itemid are in the dict are in columns field
"""
global itemiddict
#If the field is not avaible in local, we load it from d_path
if not field in itemiddict:
itemiddict[field] = pd.DataFrame()
for e in d_path:
itemiddict[field] = pd.concat([itemiddict[field],pd.read_csv(e,converters={field:lambda x:str(x)})],ignore_index=True)
itemiddict[field] = itemiddict[field].sort_values(by=field,ignore_index=True).reset_index(drop=True)
#If the itemid is avaible we return the associated value we find
if itemid != "" and itemid != 0:
idx = itemiddict[field][field].searchsorted(str(itemid))
if idx > 0:
return idx-1
return np.nan
def codes_to_int(episode):
"""
returns the episode with every not float value as int encodings
"""
episode = episode.copy()
#For every column in episode
for e in episode:
#If the column is avaible in local discretizer
if e in discretizer:
#For every value in the column
for i in range(len(episode[e])):
v = episode[e][i]
#If the current value is not None or NaN, we find the encoding
if not isinstance(v,float) or not np.isnan(v):
#If the value is not empty or 0 we find in the encoder
if v != "" and v != 0:
value = np.nan
for die in discretizer[e]:
if str(v) in die[0]:
value = die[1]
episode[e][i] = value
#Else we said it's not found
else:
episode[e][i] = np.nan
return episode
#######################################
# FULL EPISODE TRANSFORM UTILS #
#######################################
def convert_to_numpy_arrays(episode, empty_value=np.nan):
"""
returns the episode as numpy array of shape (row_number,features_width(=features are the keys in episode, can contain arrays,list or values))
"""
#Computing features length
features_width = 0
row_number = 0
for e in episode["episode"]:
x = episode["episode"][e][0]
if isinstance(x,int) or isinstance(x,float) or x == "":
features_width += 1
else:
features_width += len(x)
row_number = len(episode["episode"][e])
#Computing y_true length
y_length = 0
for e in episode:
if e != "episode":
y_length += 1
#Computing y_true
y_true = np.empty(y_length)
index = 0
for e in episode:
if e != "episode":
y_true[index] = episode[e]
index+=1
#Computing features
features = np.empty((row_number,features_width))
index = 0
#For every column in episode
for e in episode["episode"]:
#For every row in the column
for line,x in enumerate(episode["episode"][e]):
#If the value is empty, we fill with empty_value
if (isinstance(x,float) and np.isnan(x)) or x == "":
features[line,index] = empty_value
#Else we fill the array with the numeric value
elif isinstance(x,int) or isinstance(x,float):
features[line,index] = x
#Else (is array or list)
else:
is_empty_array = True
#We check if the array contains only np.nan (is empty)
for elem in x:
if not is_empty_value(elem,np.nan):
is_empty_array = False
break
#If the array is not empty, if we copy the value of it in the right place in the returned array
if not is_empty_array:
features[line,index:index+len(x)] = x
#Else we fill the part of the returned array with empty_value so user knows the data is missing here
else:
features[line,index:index+len(x)] = np.full(len(x),empty_value)
#checking the number of elements we added in the returned array
column_exemple = episode["episode"][e][0]
if isinstance(column_exemple,int) or isinstance(column_exemple,float) or x == "":
index += 1
else:
index += len(x)
return features,y_true
def filter_episode(row, episode_filter):
"""
Row contains an episode and the y_trues.
Filters row["episode"] to remove rows within it that satisfies the episode_filter
"""
episode = {col:[] for col in row["episode"]}
for i in range(len(row["episode"]["Hours"])):
#Calculating a row (dico) (= row["episode"][:][i])
dico = {header:row["episode"][header][i] for header in row["episode"]}
#If episode_filter returns true we add the row
if episode_filter(dico):
for col in episode:
episode[col].append(row["episode"][col][i])
#Building returned episode
returned = {}
for col in row:
if col != "episode":
returned[col] = row[col]
returned["episode"] = episode
return returned
#######################################
# ABOUT IMPUTING VALUES #
#######################################
def input_values(features, empty_value=np.nan, strategy="previous"):
"""
Inputing values in the features (to replace empty_value values in features) with strategy
strategy is in ["previous", "previous-next"]
"""
features = features.copy()
#Inputing previous value if exists, next else, empty_value if no next
if strategy == "previous-next":
for col in features:
col_vals = features[col]
for i in range(len(col_vals)):
#If current value if the empty_value
if is_empty_list(col_vals[i],np.nan) or is_empty_value(col_vals[i], empty_value):
prev_index = i-1
#We find the previous value
while prev_index >= 0 and (is_empty_list(col_vals[prev_index],np.nan) or is_empty_value(col_vals[prev_index], empty_value)):
prev_index -= 1
#If found we input it
if prev_index >= 0:
features[col][i] = col_vals[prev_index]
#Else we check next value
else:
prev_index = i+1
while prev_index < len(col_vals) and (is_empty_list(col_vals[prev_index],np.nan) or is_empty_value(col_vals[prev_index], empty_value)):
prev_index += 1
if prev_index >= i+1 and prev_index < len(col_vals):
features[col][i] = col_vals[prev_index]
elif col in normal_values:
features[col][i] = normal_values[col]
elif strategy == "previous":
for col in features:
col_vals = features[col]
for i in range(len(col_vals)):
#If current value if the empty_value
if is_empty_list(col_vals[i],np.nan) or is_empty_value(col_vals[i], empty_value):
prev_index = i-1
#We find the previous value
while prev_index >= 0 and (is_empty_list(col_vals[prev_index],np.nan) or is_empty_value(col_vals[prev_index], empty_value)):
prev_index -= 1
#If found we input it
if prev_index >= 0:
features[col][i] = col_vals[prev_index]
#Else we input normal value if found
elif col in normal_values:
features[col][i] = normal_values[col]
return features
def add_mask(episode):
"""
Adding special features to the episode for every column, which is an array of 1 for every not null value
Can be used before DataImputer to know where data were imputed
"""
keys = [key for key in episode.keys()]
for e in keys:
episode["mask_"+e] = []
for el in episode[e]:
if el == "" or (isinstance(el,float) and np.isnan(el)):
episode["mask_"+e].append(0)
else:
episode["mask_"+e].append(1)
return episode
#######################################
# DATASET TO READABLE DATA FOR ML #
#######################################
def preprocess_to_learn(
episode,
code_to_onehot=True,
episode_filter=None,
mode="full",
window_period_length=48.0,
window_size=0.7,
statistics_mode_column_scale=True,
empty_value=np.nan,
input_strategy=None,
add_mask_columns=False,
):
"""
Main function to transform dataset rows to numpy arrays\n
episode is the episode to transform\n
code_to_onehot is True if you want to transform non-float data to onehot, else it is converted to int\n
episode_filter is a filter function you want to apply to episodes to remove rows\n
mode is the mode of transformation. Avaible : statistics (for randomforest), window (for LSTM)\n\n
window_period_length is the length of episode to do windows in (for window mode)\n
window_size is the size of the window (for window mode)\n\n
statistics_mode_column_scale is the column mode for statistics mode (see normalize_episodes_statistics)\n
empty_value is the value to put where no data\n
input_strategy can be "previous" or "previous-next" or "None" (see input_values)\n
add_mask_columns adds mask features before imputing missing data (see add_mask) \n
episode_length is the episode length for window mode\n
"""
#Filtering rows from the episode
if episode_filter == None:
discr_episode = episode
else:
discr_episode = filter_episode(episode, episode_filter)
#Discretization of data
if mode == "statistics":
discr_episode["episode"] = codes_to_int(discr_episode["episode"])
discr_episode["episode"] = normalize_episodes_statistics(discr_episode,column_scale=statistics_mode_column_scale)
elif mode == "window":
discr_episode["episode"] = normalize_episodes_window(discr_episode, window_period_length, window_size)
#Adding mask
if add_mask_columns:
discr_episode["episode"] = add_mask(discr_episode["episode"])
#Trying to input some missing values
discr_episode["episode"] = input_values(discr_episode["episode"],empty_value=empty_value,strategy=input_strategy)
#Transforming text to integer (index of string in file) or onehot vector
if mode != "statistics":
if code_to_onehot:
discr_episode["episode"] = codes_to_onehot(discr_episode["episode"])
else:
discr_episode["episode"] = codes_to_int(discr_episode["episode"])
#Transforming to numpy array from dict
returned = convert_to_numpy_arrays(discr_episode, empty_value=empty_value)
return returned
#######################################
# ITERATOR FROM DATASET #
#######################################
def my_generator(dataset,transform):
iterator = iter(dataset)
for x in iterator:
yield transform(x)
def mapped_iterabledataset(dataset, function):
return IterableDataset.from_generator(my_generator, gen_kwargs={"dataset": dataset,"transform":function})
################################################################################
################################################################################
## ##
## DATASET CREATION AND DOWNLOADING ##
## ##
################################################################################
################################################################################
def do_listfile(task,subfolder,mimic3_benchmark_data_folder,mimic3_benchmark_new_data_folder,stays,inputevents,procedurevents,diagnoses,insurances):
file = subfolder+"_listfile.csv"
print("working on",task+"/"+file)
listfile = pd.read_csv(os.path.join(mimic3_benchmark_data_folder,file),sep=',')
listfile = listfile.sort_values(by=["stay"]) if not "period_length" in listfile else listfile.sort_values(by=["stay","period_length"])
subfolder = "train"
if "test" in file:
subfolder = "test"
to_save = []
if task == "mimic4-in-hospital-mortality":
for idx,(_,x) in enumerate(listfile.iterrows()):
print(get_progression(idx,len(listfile.index),length=20),str(round(100*idx/len(listfile.index),2))+"%",file,end="\r")
current_dict = {}
#Getting episode/subject ids
fname = x["stay"].split("_")
subject_id = fname[0]
episode_number = int(fname[1][7:])
#Getting current episode start date
current_ep_desc = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"root",subfolder,subject_id,"episode"+str(episode_number)+".csv"))
icustay_id = current_ep_desc.at[current_ep_desc.index[0],"Icustay"]
deathtime = stays.loc[stays["ICUSTAY_ID"] == icustay_id]
dt = np.nan
bd = np.nan
#Doing basic data (age ethnicity and gender)
for _,y in deathtime.iterrows():
if isinstance(y["DEATHTIME"], str) and y["DEATHTIME"] != "":
dt = dtc(y["DEATHTIME"])
bd = dtc(y["INTIME"])
current_dict["age"] = y["AGE"]
current_dict["ethnicity"] = y["ETHNICITY"]
current_dict["gender"] = y["GENDER"]
current_dict["insurance"] = insurances.loc[insurances["HADM_ID"] == y["HADM_ID"]]["INSURANCE"].iloc[0]
#checking if is dead or not, and if data is valid
valid = True
if isinstance(dt, datetime):
sec = (dt - bd).total_seconds() >= 54*3600
if sec:
current_dict["label"] = 1
else:
valid = False
else:
current_dict["label"] = 0
if not valid:
continue
#Building diagnoses
current_diags = diagnoses[diagnoses["ICUSTAY_ID"] == icustay_id]
ICD9_list = []
for _,icd_code in current_diags.iterrows():
ICD9_list.append(icd_code["ICD9_CODE"])
current_dict["Cond"] = {"fids":ICD9_list}
def map_date(date):
if isinstance(date,datetime):
return (date - bd).total_seconds()/3600.0
else:
return date
#Building procedurevents
pde = procedurevents[procedurevents["ICUSTAY_ID"] == icustay_id].applymap(map_date,na_action="ignore")
current_dict["Proc"] = normalize_onehot_episodes_window(pde.to_dict(orient='list'), value_column=False, code_column="ITEMID", period_length=48.0, window_size=1)
#Building inputevents
ie = inputevents[inputevents["ICUSTAY_ID"] == icustay_id].applymap(map_date,na_action="ignore")
current_dict["Med"] = normalize_onehot_episodes_window(ie.to_dict(orient='list'), value_column=True, code_column="ITEMID", period_length=48.0, window_size=1)
#Building chartevents
current_ep_charts = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"in-hospital-mortality",subfolder,x["stay"])).to_dict(orient='list')
current_dict["Chart"] = normalize_episodes_window({"episode":current_ep_charts})
#The output events are in the chartevents
current_dict["Out"] = {}
to_save.append(current_dict)
else:
for idx,(_,x) in enumerate(listfile.iterrows()):
print(get_progression(idx,len(listfile.index),length=20),str(round(100*idx/len(listfile.index),2))+"%",file,end="\r")
to_save.append(x)
os.makedirs(mimic3_benchmark_new_data_folder,exist_ok=True)
with open(os.path.join(mimic3_benchmark_new_data_folder,file[:-3]+"pkl"), "wb+") as fp:
pickle.dump(to_save,fp,pickle.HIGHEST_PROTOCOL)
def generate_dics(diagnoses, inputevents, procedurevents, insurances, stays, mimic3_path):
#Diagnoses dictionary
if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,"icd_dict.csv")):
print("creating icd indexes")
#Loading Diagnoses
used_col = ["ICD9_CODE","SHORT_TITLE","LONG_TITLE"]
dtype = {"ICD9_CODE":str,"SHORT_TITLE":str,"LONG_TITLE":str}
dcsv = pd.read_csv(mimic3_path+"/D_ICD_DIAGNOSES.csv",sep=',',usecols=used_col,dtype=dtype)
print("icd ressources loaded")
dic = {}
for _,row in diagnoses.iterrows():
if not row["ICD9_CODE"] in dic:
fif = dcsv.loc[dcsv["ICD9_CODE"] == row["ICD9_CODE"]]
dic[row["ICD9_CODE"]] = {"SHORT_TITLE":fif["SHORT_TITLE"].values[0],"LONG_TITLE":fif["LONG_TITLE"].values[0]}
with open(os.path.join(DATASET_SAVE_PATH,'icd_dict.csv'), 'w') as f:
f.write("ICD9_CODE,SHORT_TITLE,LONG_TITLE\n")
for key in dic.keys():
f.write("%s,\"%s\",\"%s\"\n"%(key,dic[key]["SHORT_TITLE"],dic[key]["LONG_TITLE"]))
#itemids dictionary
if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,"ie_itemid_dict.csv")):
print("creating itemid indexes")
#Loading itemids
used_col = ["ITEMID","LABEL","ABBREVIATION"]
dtype = {"ITEMID":int,"LABEL":str,"ABBREVIATION":str}
itemidcsv = pd.read_csv(mimic3_path+"/D_ITEMS.csv",sep=',',usecols=used_col,dtype=dtype)
print("itemid ressources loaded")
dic = {}
for _,row in inputevents.iterrows():
if not row["ITEMID"] in dic:
fif = itemidcsv.loc[itemidcsv["ITEMID"] == row["ITEMID"]]
dic[row["ITEMID"]] = {"LABEL":fif["LABEL"].values[0],"ABBREVIATION":fif["ABBREVIATION"].values[0]}
with open(os.path.join(DATASET_SAVE_PATH,'ie_itemid_dict.csv'), 'w') as f:
f.write("ITEMID,LABEL,ABBREVIATION\n")
for key in dic.keys():
f.write("%s,\"%s\",\"%s\"\n"%(key,dic[key]["ABBREVIATION"],dic[key]["LABEL"]))
dic = {}
for _,row in procedurevents.iterrows():
if not row["ITEMID"] in dic:
fif = itemidcsv.loc[itemidcsv["ITEMID"] == row["ITEMID"]]
dic[row["ITEMID"]] = {"LABEL":fif["LABEL"].values[0],"ABBREVIATION":fif["ABBREVIATION"].values[0]}
with open(os.path.join(DATASET_SAVE_PATH,'pe_itemid_dict.csv'), 'w') as f:
f.write("ITEMID,LABEL,ABBREVIATION\n")
for key in dic.keys():
f.write("%s,\"%s\",\"%s\"\n"%(key,dic[key]["ABBREVIATION"],dic[key]["LABEL"]))
#insurances dictionary
if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,"insurances_dict.csv")):
print("creating insurances indexes")
dic = {}
index = 0
for _,row in insurances.iterrows():
if not row["INSURANCE"] in dic:
dic[row["INSURANCE"]] = index
index += 1
with open(os.path.join(DATASET_SAVE_PATH,'insurances_dict.csv'), 'w') as f:
f.write("INSURANCE,INDEX\n")
for key in dic.keys():
f.write("\"%s\",%s\n"%(key,dic[key]))
#gender dictionary
if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,"genders_dict.csv")):
print("creating genders indexes")
dic = {}
index = 0
for _,row in stays.iterrows():
if not row["GENDER"] in dic:
dic[row["GENDER"]] = index
index += 1
with open(os.path.join(DATASET_SAVE_PATH,'genders_dict.csv'), 'w') as f:
f.write("GENDER,INDEX\n")
for key in dic.keys():
f.write("\"%s\",%s\n"%(key,dic[key]))
#age dictionary
if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,"ages_dict.csv")):
print("creating ages indexes")
dic = {}
index = 0
for _,row in stays.iterrows():
if not round(row["AGE"]) in dic:
dic[round(row["AGE"])] = index
index += 1
with open(os.path.join(DATASET_SAVE_PATH,'ages_dict.csv'), 'w') as f:
f.write("AGE,INDEX\n")
for key in dic.keys():
f.write("%s,%s\n"%(key,dic[key]))
#ethny dictionary
if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,"ethnicities_dict.csv")):
print("creating ethnicities indexes")
dic = {}
index = 0
for _,row in stays.iterrows():
if not row["ETHNICITY"] in dic:
dic[row["ETHNICITY"]] = index
index += 1
with open(os.path.join(DATASET_SAVE_PATH,'ethnicities_dict.csv'), 'w') as f:
f.write("ETHNICITY,INDEX\n")
for key in dic.keys():
f.write("\"%s\",%s\n"%(key,dic[key]))
def clean_units(df):
df.loc[df["AMOUNTUOM"].isin(["grams","L"]),"AMOUNT"] = df.loc[df["AMOUNTUOM"].isin(["grams","L"]),"AMOUNT"].apply((lambda x:x*1000))
df.loc[df["AMOUNTUOM"].isin(["ounces"]),"AMOUNT"] = df.loc[df["AMOUNTUOM"].isin(["ounces"]),"AMOUNT"].apply((lambda x:x*28.3495*1000))
df.loc[df["AMOUNTUOM"].isin(["uL"]),"AMOUNT"] = df.loc[df["AMOUNTUOM"].isin(["uL"]),"AMOUNT"].apply((lambda x:x/1000))
df.loc[df["AMOUNTUOM"].isin(["mlhr","Hours"]),"AMOUNT"] = df.loc[df["AMOUNTUOM"].isin(["mlhr","Hours"]),"AMOUNT"].apply((lambda x:x/60))
df.loc[df["RATEUOM"].isin(["mLhour","unitshour","mcghour","mcgkghour","mgkghour","mLkghour","mEq.hour"]),"RATE"] = df.loc[df["RATEUOM"].isin(["mLhour","unitshour","mcghour","mcgkghour","mgkghour","mLkghour","mEq.hour"]),"RATE"].apply((lambda x:x/60))
df.loc[df["RATEUOM"].isin(["gramshour"]),"RATE"] = df.loc[df["RATEUOM"].isin(["gramshour"]),"RATE"].apply((lambda x:x*1000/60))
df.loc[df["RATEUOM"].isin(["gramsmin","gramskgmin"]),"RATE"] = df.loc[df["RATEUOM"].isin(["gramsmin","gramskgmin"]),"RATE"].apply((lambda x:x*1000))
def load_mimic3_files(mimic3_dir):
#Loading inputevents
used_col = ["SUBJECT_ID","ICUSTAY_ID","CHARTTIME","ITEMID","AMOUNT","AMOUNTUOM","RATE","RATEUOM"]
dtype = {"AMOUNTUOM":str,"RATEUOM":str}
converters={"SUBJECT_ID":bic,"ICUSTAY_ID":bic,"CHARTTIME":dtc,"ITEMID":bic,"AMOUNT":bfc,"RATE":bfc}
inputevents = pd.read_csv(mimic3_dir+"/INPUTEVENTS_CV.csv",sep=',',usecols=used_col,dtype=dtype,converters=converters)
inputevents.rename(columns={"CHARTTIME": "STARTTIME"}, inplace=True)
print("inputevents 1/2 loaded")
used_col = ["SUBJECT_ID","ICUSTAY_ID","STARTTIME","ENDTIME","ITEMID","AMOUNT","AMOUNTUOM","RATE","RATEUOM"]
dtype = {"AMOUNTUOM":str,"RATEUOM":str}
converters={"SUBJECT_ID":bic,"ICUSTAY_ID":bic,"STARTTIME":dtc,"ENDTIME":dtc,"ITEMID":bic,"AMOUNT":bfc,"RATE":bfc}
inputevents_2 = pd.read_csv(mimic3_dir+"/INPUTEVENTS_MV.csv",sep=',',usecols=used_col,dtype=dtype,converters=converters)
inputevents = pd.concat([inputevents,inputevents_2])
inputevents.drop(inputevents[(inputevents["SUBJECT_ID"] == -1) | (inputevents["ICUSTAY_ID"] == -1)].index, inplace=True)
clean_units(inputevents)
print("inputevents 2/2 loaded")
#Loading procedurevents
used_col = ["SUBJECT_ID","ICUSTAY_ID","STARTTIME","ENDTIME","ITEMID"]
converters={"SUBJECT_ID":bic,"ICUSTAY_ID":bic,"STARTTIME":dtc,"ENDTIME":dtc,"ITEMID":bic}
procedurevents = pd.read_csv(mimic3_dir+"/PROCEDUREEVENTS_MV.csv",sep=',',usecols=used_col,converters=converters)
procedurevents.drop(procedurevents[(procedurevents["SUBJECT_ID"] == -1) | (procedurevents["ICUSTAY_ID"] == -1)].index, inplace=True)
print("procedurevents loaded")
#Loading Diagnoses
used_col = ["SUBJECT_ID","SEQ_NUM","ICD9_CODE","ICUSTAY_ID"]
dtype = {"ICD9_CODE":str}
converters={"SUBJECT_ID":bic,"SEQ_NUM":bic,"ICUSTAY_ID":bic}
diagnoses = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"root","all_diagnoses.csv"),sep=',',usecols=used_col,dtype=dtype,converters=converters)
print("diagnoses loaded")
#Loading stays
used_col = ["SUBJECT_ID","HADM_ID","ICUSTAY_ID","INTIME","DEATHTIME","ETHNICITY","GENDER","AGE"]
dtype = {"INTIME":str,"DEATHTIME":str,"ETHNICITY":str,"GENDER":str}
converters={"SUBJECT_ID":bic,"HADM_ID":bic,"ICUSTAY_ID":bic,"AGE":bfc}
stays = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"root","all_stays.csv"),sep=',',usecols=used_col,dtype=dtype,converters=converters)
print("stays loaded")
#Loading insurances
used_col = ["SUBJECT_ID","HADM_ID","INSURANCE"]
dtype = {"INSURANCE":str}
converters={"SUBJECT_ID":bic,"HADM_ID":bic}
insurances = pd.read_csv(mimic3_dir+"/ADMISSIONS.csv",sep=',',usecols=used_col,dtype=dtype,converters=converters)
print("insurances loaded")
generate_dics(diagnoses, inputevents, procedurevents, insurances, stays, mimic3_dir)
diagnoses.drop(diagnoses[(diagnoses["SUBJECT_ID"] == -1) | (diagnoses["ICUSTAY_ID"] == -1)].index, inplace=True)
diagnoses.drop(diagnoses[(diagnoses["ICD9_CODE"] == 7981) | (diagnoses["ICD9_CODE"] == 7982) | (diagnoses["ICD9_CODE"] == 7989)].index, inplace=True)
diagnoses["Hours"] = 0
diagnoses = diagnoses.sort_values(by="SEQ_NUM")
return stays,inputevents,procedurevents,diagnoses,insurances
def do_directory_cleaning(current_file):
if "IC9_CODE" in current_file:
current_file["ICD9_CODE"] = current_file["ICD9_CODE"].apply(id_to_string)
#Cleaning
current_file.loc[current_file["AMOUNT"] == -1, "AMOUNT"] = np.nan
current_file.loc[current_file["RATE"] == -1, "RATE"] = np.nan
current_file["ITEMID"] = current_file["ITEMID"].astype(pd.Int64Dtype())
if "SEQ_NUM" in current_file:
current_file["SEQ_NUM"] = current_file["SEQ_NUM"].astype(pd.Int64Dtype())
clean_units(current_file)
current_file = current_file.drop(["AMOUNTUOM","RATEUOM"], axis=1)
return current_file
def load_mimic3_benchmark(mimic3_path):
mimic3_path = os.path.join(os.getcwd(),mimic3_path)
starting_dir = os.getcwd()
os.chdir(DATASET_SAVE_PATH)
print("Starting preprocessing of raw mimic3 data...")
if not os.path.isdir("mimic3-benchmarks"):
print("MIMIC3-BENCHMARK Data not found... Loading mimic3-benchmark github...")
os.system('git clone https://github.com/YerevaNN/mimic3-benchmarks.git')
if not os.path.isdir("mimic3-benchmarks"):
print("Could not load the github... Exiting...")
exit(1)
os.chdir("mimic3-benchmarks")
print("Preprocessing of data... This step may take hours.")
print("Extracting subjects...")
os.system("python -m mimic3benchmark.scripts.extract_subjects "+mimic3_path+" ../root/")
print("Fixing issues...")
os.system("python -m mimic3benchmark.scripts.validate_events ../root/")
print("Extracting episodes...")
os.system("python -m mimic3benchmark.scripts.extract_episodes_from_subjects ../root/")
print("Spliting train and test...")
os.system("python -m mimic3benchmark.scripts.split_train_and_test ../root/")
print("Creating specific tasks")
os.system("python -m mimic3benchmark.scripts.create_in_hospital_mortality ../root/ ../in-hospital-mortality/")
os.system("python -m mimic3benchmark.scripts.create_decompensation ../root/ ../decompensation/")
os.system("python -m mimic3benchmark.scripts.create_length_of_stay ../root/ ../length-of-stay/")
os.system("python -m mimic3benchmark.scripts.create_phenotyping ../root/ ../phenotyping/")
os.system("python -m mimic3benchmark.scripts.create_multitask ../root/ ../multitask/")
print("Spliting validation...")
os.system("python -m mimic3models.split_train_val ../in-hospital-mortality/")
os.system("python -m mimic3models.split_train_val ../decompensation/")
os.system("python -m mimic3models.split_train_val ../length-of-stay/")
os.system("python -m mimic3models.split_train_val ../phenotyping/")
os.system("python -m mimic3models.split_train_val ../multitask/")
os.chdir(starting_dir)
def preprocess(task,mimic3_dir=None):
origin_task = task
if "mimic4-" in task:
origin_task = task[7:]
original_task_path = os.path.join(DATASET_SAVE_PATH,origin_task)
print("need of",original_task_path,"to generate new task...")
if not os.path.isdir(original_task_path):
if mimic3_dir == None:
mimic3_dir = input("Preprocessing has to be done, please enter mimic3's path : ")
if not os.path.isdir(mimic3_dir):
print("Could not load mimic3 files...")
exit(1)
load_mimic3_benchmark(mimic3_dir)
loaded,inputevents,procedurevents,diagnoses = False,None,None,None
mimic3_benchmark_data_folder,mimic3_benchmark_new_data_folder = None,None
if "mimic4-" in task:
print("the requested task is a mimic4-benchmark task...")
#Data folder
mimic3_benchmark_data_folder = os.path.join(DATASET_SAVE_PATH,task[7:])
#New data folder
mimic3_benchmark_new_data_folder = os.path.join(DATASET_SAVE_PATH,task)
for subfolder in ["train","test","val"]:
print("checking subfolder",subfolder)
#Chargement des fichiers mimic3 pour modification
if not os.path.isfile(os.path.join(DATASET_SAVE_PATH,task,subfolder+"_listfile.pkl")):
if not loaded:
if mimic3_dir == None:
mimic3_dir = input("preprocessing has to be done, please enter mimic3's path : ")
if not os.path.isdir(mimic3_dir):
print("Could not load mimic3 files...")
exit(1)
print("this task does not exist yet... loading required files to create the task. this may take 20 minutes")
stays,inputevents,procedurevents,diagnoses,insurances = load_mimic3_files(mimic3_dir)
loaded = True
print("creating the subfolder",subfolder,"| estimated time : 1h")
do_listfile(task, subfolder, mimic3_benchmark_data_folder, mimic3_benchmark_new_data_folder, stays, inputevents, procedurevents, diagnoses, insurances)
if not os.path.isfile("icd_dict.csv"):
if mimic3_dir == None:
mimic3_dir = input("preprocessing has to be done, please enter mimic3's path : ")
if not os.path.isdir(mimic3_dir):
print("Could not load mimic3 files...")
exit(1)
print("loading data and creating dicts...")
load_mimic3_files(mimic3_dir)
################################################################################
################################################################################
## ##
## HUGGING FACE DATASET ##
## ##
################################################################################
################################################################################
class Mimic3DatasetConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class Mimic3Benchmark_Dataset(datasets.GeneratorBasedBuilder):
def __init__(self, **kwargs):
self.code_to_onehot=kwargs.pop("code_to_onehot",True)
self.episode_filter=kwargs.pop("episode_filter",None)
self.mode=kwargs.pop("mode","statistics")
self.window_period_length=kwargs.pop("window_period_length",48.0)
self.window_size=kwargs.pop("window_size",0.7)
self.empty_value=kwargs.pop("empty_value",np.nan)
self.input_strategy=kwargs.pop("input_strategy",None)
self.add_mask_columns=kwargs.pop("add_mask_columns",False)
self.statistics_mode_column_scale=kwargs.pop("statistics_mode_column_scale",True)
self.mimic3_path=kwargs.pop("mimic3_path",None)
self.mimic4_text_demos = kwargs.pop("mimic4_text_demos",True)
self.mimic4_text_charts = kwargs.pop("mimic4_text_charts",True)
self.mimic4_text_meds = kwargs.pop("mimic4_text_meds",True)
self.mimic4_text_cond = kwargs.pop("mimic4_text_cond",True)
self.mimic4_text_procs = kwargs.pop("mimic4_text_procs",True)
self.full_meds_loaded = False
self.full_proc_loaded = False
self.full_cond_loaded = False
self.full_gens_loaded = False
self.full_ages_loaded = False
self.full_eths_loaded = False
self.full_ins_loaded = False
super().__init__(**kwargs)
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
Mimic3DatasetConfig(name="in-hospital-mortality", version=VERSION, description="This datasets covers the in-hospital-mortality benchmark of mimiciii-benchmark"),
Mimic3DatasetConfig(name="decompensation", version=VERSION, description="This datasets covers the decompensation benchmark of mimiciii-benchmark"),
Mimic3DatasetConfig(name="length-of-stay", version=VERSION, description="This datasets covers the length-of-stay benchmark of mimiciii-benchmark"),
Mimic3DatasetConfig(name="multitask", version=VERSION, description="This datasets covers the multitask benchmark of mimiciii-benchmark"),
Mimic3DatasetConfig(name="phenotyping", version=VERSION, description="This datasets covers the in phenotyping benchmark of mimiciii-benchmark"),
Mimic3DatasetConfig(name="mimic4-in-hospital-mortality", version=VERSION, description="This datasets covers the mimic4-in-hospital-mortality benchmark of mimiciii-benchmark"),
]
def _info(self):
if self.config.name in ["in-hospital-mortality", "decompensation", "phenotyping", "mimic4-in-hospital-mortality", "length-of-stay"]:
if self.config.name == "phenotyping":
return datasets.DatasetInfo(
description="Dataset "+self.config.name,
features=datasets.Features(
{
"Acute and unspecified renal failure": datasets.Value("float"),
"Acute cerebrovascular disease": datasets.Value("float"),
"Acute myocardial infarction": datasets.Value("float"),
"Cardiac dysrhythmias": datasets.Value("float"),
"Chronic kidney disease": datasets.Value("float"),
"Chronic obstructive pulmonary disease and bronchiectasis": datasets.Value("float"),
"Complications of surgical procedures or medical care": datasets.Value("float"),
"Conduction disorders": datasets.Value("float"),
"Congestive heart failure; nonhypertensive": datasets.Value("float"),
"Coronary atherosclerosis and other heart disease": datasets.Value("float"),
"Diabetes mellitus with complications": datasets.Value("float"),
"Diabetes mellitus without complication": datasets.Value("float"),
"Disorders of lipid metabolism": datasets.Value("float"),
"Essential hypertension": datasets.Value("float"),
"Fluid and electrolyte disorders": datasets.Value("float"),
"Gastrointestinal hemorrhage": datasets.Value("float"),
"Hypertension with complications and secondary hypertension": datasets.Value("float"),
"Other liver diseases": datasets.Value("float"),
"Other lower respiratory disease": datasets.Value("float"),
"Other upper respiratory disease": datasets.Value("float"),
"Pleurisy; pneumothorax; pulmonary collapse": datasets.Value("float"),
"Pneumonia (except that caused by tuberculosis or sexually transmitted disease)": datasets.Value("float"),
"Respiratory failure; insufficiency; arrest (adult)": datasets.Value("float"),
"Septicemia (except in labor)": datasets.Value("float"),
"Shock": datasets.Value("float"),
"episode": datasets.Array2D(shape=(None,None), dtype=float)
}),
homepage="",
license="",
citation="",
)
elif self.config.name == "mimic4-in-hospital-mortality" and self.mode in ["mimic4-aggreg"]:
return datasets.DatasetInfo(
description="Dataset "+self.config.name,
features = datasets.Features(
{
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
"features" : datasets.Sequence(datasets.Value("float32")),
"columns": datasets.Squence(datasets.value("string"))
}
),
homepage="",
license="",
citation="",)
elif self.config.name == "mimic4-in-hospital-mortality" and self.mode == "mimic4-naive-prompt":
return datasets.DatasetInfo(
description="Dataset "+self.config.name,
features = datasets.Features(
{
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
"features" : datasets.Value(dtype='string', id=None),
}
),
homepage="",
license="",
citation="",)
elif self.config.name == "mimic4-in-hospital-mortality" and self.mode == "mimic4-tensor":
return datasets.DatasetInfo(
description="Dataset "+self.config.name,
features = datasets.Features(
{
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
"DEMO": datasets.Sequence(datasets.Value("int64")),
"COND" : datasets.Sequence(datasets.Value("int64")),
"MEDS" : datasets.Array2D(shape=(None, None), dtype='int64') ,
"PROC" : datasets.Array2D(shape=(None, None), dtype='int64') ,
"CHART/LAB" : datasets.Array2D(shape=(None, None), dtype='int64')
}
),
homepage="",
license="",
citation="",)
return datasets.DatasetInfo(
description="Dataset "+self.config.name,
features=datasets.Features(
{
"y_true": datasets.Value("float"),
"episode": datasets.Array2D(shape=(None,None), dtype=float)
}),
homepage="",
license="",
citation="",
)
def _split_generators(self, dl_manager):
self.path = os.path.join(DATASET_SAVE_PATH,self.config.name)
preprocess(self.config.name,self.mimic3_path)
if "mimic4" in self.config.name:
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath":os.path.join(self.path,"train_listfile.pkl"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath":os.path.join(self.path,"val_listfile.pkl"),
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath":os.path.join(self.path,"test_listfile.pkl"),
"split": "test"
},
),
]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath":os.path.join(self.path,"train_listfile.csv"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath":os.path.join(self.path,"val_listfile.csv"),
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath":os.path.join(self.path,"test_listfile.csv"),
"split": "test"
},
),
]
def _generate_exemples_CHARTONLY(self, filepath):
key = 0
with open(filepath, encoding="utf-8") as f:
reader1 = csv.DictReader(f)
for data in reader1:
y_trues = {}
for e in data:
if e != "period_length" and e != "stay":
y_trues[e] = data[e]
if "period_length" in data:
period_length = float(data["period_length"])
else:
period_length = self.window_period_length
stay = data["stay"]
if os.path.isfile(os.path.join(self.path,"test",stay)):
stay = os.path.join(self.path,"test",stay)
else:
stay = os.path.join(self.path,"train",stay)
# stay = self.path+"/train/30820_episode1_timeseries.csv"
# period_length = 42.0
episode = {
"Hours": [],
"Capillary refill rate": [],
"Diastolic blood pressure": [],
"Fraction inspired oxygen": [],
"Glascow coma scale eye opening": [],
"Glascow coma scale motor response": [],
"Glascow coma scale total": [],
"Glascow coma scale verbal response": [],
"Glucose": [],
"Heart Rate": [],
"Height": [],
"Mean blood pressure": [],
"Oxygen saturation": [],
"Respiratory rate": [],
"Systolic blood pressure": [],
"Temperature": [],
"Weight": [],
"pH": [],
}
with open(stay, encoding="utf-8") as f2:
reader2 = csv.DictReader(f2)
for data2 in reader2:
if self.config.name in ["length-of-stay","decompensation"] and float(data2["Hours"]) > period_length + 1e-6:
break
episode["Hours"].append(float(data2["Hours"]) if data2["Hours"] else 0.0)
episode["Capillary refill rate"].append(float(data2["Capillary refill rate"]) if data2["Capillary refill rate"] else np.nan)
episode["Diastolic blood pressure"].append(float(data2["Diastolic blood pressure"]) if data2["Diastolic blood pressure"] else np.nan)
episode["Fraction inspired oxygen"].append(float(data2["Fraction inspired oxygen"]) if data2["Fraction inspired oxygen"] else np.nan)
episode["Glascow coma scale eye opening"].append(data2["Glascow coma scale eye opening"])
episode["Glascow coma scale motor response"].append(data2["Glascow coma scale motor response"])
episode["Glascow coma scale total"].append(float(data2["Glascow coma scale total"]) if data2["Glascow coma scale total"] else np.nan)
episode["Glascow coma scale verbal response"].append(data2["Glascow coma scale verbal response"])
episode["Glucose"].append(float(data2["Glucose"]) if data2["Glucose"] else np.nan)
episode["Heart Rate"].append(float(data2["Heart Rate"]) if data2["Heart Rate"] else np.nan)
episode["Height"].append(float(data2["Height"]) if data2["Height"] else np.nan)
episode["Mean blood pressure"].append(float(data2["Mean blood pressure"]) if data2["Mean blood pressure"] else np.nan)
episode["Oxygen saturation"].append(float(data2["Oxygen saturation"]) if data2["Oxygen saturation"] else np.nan)
episode["Respiratory rate"].append(float(data2["Respiratory rate"]) if data2["Respiratory rate"] else np.nan)
episode["Systolic blood pressure"].append(float(data2["Systolic blood pressure"]) if data2["Systolic blood pressure"] else np.nan)
episode["Temperature"].append(float(data2["Temperature"]) if data2["Temperature"] else np.nan)
episode["Weight"].append(float(data2["Weight"]) if data2["Weight"] else np.nan)
episode["pH"].append(float(data2["pH"]) if data2["pH"] else np.nan)
X,Y = preprocess_to_learn(
{
"episode":episode
},
code_to_onehot=self.code_to_onehot,
episode_filter=self.episode_filter,
mode=self.mode,
window_size=self.window_size,
empty_value=self.empty_value,
input_strategy=self.input_strategy,
add_mask_columns=self.add_mask_columns,
statistics_mode_column_scale=self.statistics_mode_column_scale,
window_period_length=period_length
)
# print(np.around(X.flatten(),4).tolist())
# exit(0)
y_trues["episode"] = X
yield key, y_trues
key += 1
##################################################################################################################################################
#### GENERATION D'EXEMPLES COMPLETS MODE TENSOR (CHARTS + INPUTEVENTS + DIAGNOSES) ##### DE THOURIA ##############################################
##################################################################################################################################################
def load_vocab(self):
if self.full_gens_loaded == False:
self.full_gens = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"genders_dict.csv"))["GENDER"].tolist()
self.full_gens_loaded = True
self.full_gens_len = len(self.full_gens)
self.full_gens_reverse = {k: v for v, k in enumerate(self.full_gens)}
if self.full_eths_loaded == False:
self.full_eths = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"ethnicities_dict.csv"))["ETHNICITY"].tolist()
self.full_eths_loaded = True
self.full_eths_len = len(self.full_eths)
self.full_eths_reverse = {k: v for v, k in enumerate(self.full_eths)}
if self.full_ins_loaded == False:
self.full_ins = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"insurances_dict.csv"))["INSURANCE"].tolist()
self.full_ins_loaded = True
self.full_ins_len = len(self.full_ins)
self.full_ins_reverse = {k: v for v, k in enumerate(self.full_ins)}
if self.full_cond_loaded == False:
self.full_cond = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"icd_dict.csv"),names=["COND","SHORT","LONG"],skiprows=1)
self.full_cond_loaded = True
self.full_cond_len = len(self.full_cond)
if self.full_proc_loaded == False:
self.full_proc = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"pe_itemid_dict.csv"),names=["PROC","SHORT","LONG"],skiprows=1)
self.full_proc_loaded = True
self.full_proc_len = len(self.full_proc["PROC"])
if self.full_meds_loaded == False:
self.full_meds = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"ie_itemid_dict.csv"),names=["MEDS","LONG","SHORT"],skiprows=1)
self.full_meds_loaded = True
self.full_meds_len = len(self.full_meds["MEDS"])
if self.full_ages_loaded == False:
self.full_ages = pd.read_csv(os.path.join(DATASET_SAVE_PATH,"ages_dict.csv"),names=["AGE","INDEX"],skiprows=1)["AGE"]
self.full_ages_loaded = True
self.full_ages_len = len(self.full_ages)
self.full_ages_reverse = {k: v for v, k in enumerate(self.full_ages)}
self.chartDic = pd.DataFrame({"CHART":["Capillary refill rate","Diastolic blood pressure","Fraction inspired oxygen","Glascow coma scale eye opening","Glascow coma scale motor response","Glascow coma scale total","Glascow coma scale verbal response","Glucose","Heart Rate","Height","Mean blood pressure","Oxygen saturation","Respiratory rate","Systolic blood pressure","Temperature","Weight","pH"]})
def generate_deep(self,data):
dyn,cond_df,demo=self.concat_data(data)
charts = dyn['CHART'].values
meds = dyn['MEDS'].values
proc = dyn['PROC'].values
stat = cond_df.values[0]
y = int(demo['label'])
demo["gender"].replace(self.full_gens_reverse, inplace=True)
demo["ethnicity"].replace(self.full_eths_reverse, inplace=True)
demo["insurance"].replace(self.full_ins_reverse, inplace=True)
demo["Age"] = demo["Age"].round()
demo["insurance"].replace(self.full_ages_reverse, inplace=True)
demo = demo[["gender","ethnicity","insurance","Age"]].values[0]
return stat, demo, meds, charts, proc, y
def _generate_examples_deep(self, filepath):
self.load_vocab()
with open(filepath, 'rb') as fp:
dico = pickle.load(fp)
for key, data in enumerate(dico):
stat, demo, meds, chart, proc, y = self.generate_deep(data)
yielded = {
'label': y,
'DEMO': demo,
'COND': stat,
'MEDS': meds,
'PROC': proc,
'CHART/LAB': chart,
}
yield int(key), yielded
##################################################################################################################################################
#### GENERATION D'EXEMPLES COMPLETS MODE CONCAT/AGGREG (CHARTS + INPUTEVENTS + DIAGNOSES) ##### DE THOURIA #######################################
##################################################################################################################################################
def concat_data(self,data):
meds = data['Med']
proc = data['Proc']
chart = codes_to_int(input_values(data['Chart']))
cond = data['Cond']['fids']
cond_df,proc_df,chart_df,meds_df=pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),pd.DataFrame()
#demographic
demo=pd.DataFrame(columns=['Age','gender','ethnicity','label','insurance'])
new_row = {'Age': data['age'], 'gender': data['gender'], 'ethnicity': data['ethnicity'], 'label': data['label'], 'insurance': data['insurance']}
demo = demo.append(new_row, ignore_index=True)
##########COND#########
#get all conds
features=pd.DataFrame(np.zeros([1,len(self.full_cond)]),columns=self.full_cond['COND'])
#onehot encode
cond_df = pd.DataFrame(cond,columns=['COND'])
cond_df['val'] = 1
cond_df = (cond_df.drop_duplicates()).pivot(columns='COND',values='val').reset_index(drop=True)
cond_df = cond_df.fillna(0)
oneh = cond_df.sum().to_frame().T
combined_df = pd.concat([features,oneh],ignore_index=True).fillna(0)
combined_oneh = combined_df.sum().to_frame().T
cond_df = combined_oneh
for c in cond_df.columns :
if c not in features:
cond_df = cond_df.drop(columns=[c])
##########PROC#########
feat=proc.keys()
proc_val=[proc[key] for key in feat]
procedures=pd.DataFrame(self.full_proc["PROC"],columns=['PROC'])
features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC'])
features.columns=pd.MultiIndex.from_product([["PROC"], features.columns])
procs=pd.DataFrame(columns=feat)
for p,v in zip(feat,proc_val):
procs[p]=v
procs.columns=pd.MultiIndex.from_product([["PROC"], procs.columns])
proc_df = pd.concat([features,procs],ignore_index=True).fillna(0)
##########CHART#########
feat=chart.keys()
chart_val=[chart[key] for key in feat]
charts=pd.DataFrame(self.chartDic,columns=['CHART'])
features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART'])
features.columns=pd.MultiIndex.from_product([["CHART"], features.columns])
chart=pd.DataFrame(columns=feat)
for c,v in zip(feat,chart_val):
chart[c]=v
chart.columns=pd.MultiIndex.from_product([["CHART"], chart.columns])
chart_df = pd.concat([features,chart],ignore_index=True).fillna(0)
###MEDS
feat=[str(x) for x in meds.keys()]
med_val=[meds[int(key)] for key in feat]
meds=[str(x) for x in self.full_meds["MEDS"]]
features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds)
features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns])
med=pd.DataFrame(columns=feat)
for m,v in zip(feat,med_val):
med[m]=v
med.columns=pd.MultiIndex.from_product([["MEDS"], med.columns])
meds_df = pd.concat([features,med],ignore_index=True).fillna(0)
dyn_df = pd.concat([meds_df,proc_df,chart_df], axis=1)
return dyn_df,cond_df,demo
def _generate_ml(self,dyn,stat,demo,concat_cols,concat):
X_df=pd.DataFrame()
if concat:
dyna=dyn.copy()
dyna.columns=dyna.columns.droplevel(0)
dyna=dyna.to_numpy()
dyna=np.nan_to_num(dyna, copy=False)
dyna=dyna.reshape(1,-1)
#dyn_df=pd.DataFrame(data=dyna,columns=concat_cols)
dyn_df=pd.DataFrame(data=dyna)
else:
dyn_df=pd.DataFrame()
for key in dyn.columns.levels[0]:
dyn_temp=dyn[key]
if ((key=="CHART") or (key=="MEDS")):
agg=dyn_temp.aggregate("mean")
agg=agg.reset_index()
else:
agg=dyn_temp.aggregate("max")
agg=agg.reset_index()
if dyn_df.empty:
dyn_df=agg
else:
dyn_df=pd.concat([dyn_df,agg],axis=0)
dyn_df=dyn_df.T
dyn_df.columns = dyn_df.iloc[0]
dyn_df=dyn_df.iloc[1:,:]
X_df=pd.concat([dyn_df,stat],axis=1)
X_df=pd.concat([X_df,demo],axis=1)
return X_df
def _generate_examples_encoded(self, filepath, concat):
self.load_vocab()
gen_encoder,eth_encoder,ins_encoder = LabelEncoder(),LabelEncoder(),LabelEncoder()
gen_encoder.fit(self.full_gens)
eth_encoder.fit(self.full_eths)
ins_encoder.fit(self.full_ins)
with open(filepath, 'rb') as fp:
dico = pickle.load(fp)
df = pd.DataFrame(dico)
for i, data in df.iterrows():
concat_cols=[]
dyn_df,cond_df,demo=self.concat_data(data)
dyn=dyn_df.copy()
dyn.columns=dyn.columns.droplevel(0)
cols=dyn.columns
time=dyn.shape[0]
# for t in range(time):
# cols_t = [str(x) + "_"+str(t) for x in cols]
# concat_cols.extend(cols_t)
demo['gender']=gen_encoder.transform(demo['gender'])
demo['ethnicity']=eth_encoder.transform(demo['ethnicity'])
demo['insurance']=ins_encoder.transform(demo['insurance'])
label = data['label']
demo = demo.drop(['label'],axis=1)
X = self._generate_ml(dyn = dyn_df, stat = cond_df, demo = demo, concat_cols = concat_cols, concat = concat)
columns = X.columns
X = X.values.tolist()[0]
yield int(i), {
"label": label,
"features": X,
"columns":columns
}
def _generate_examples_text(self, filepath):
self.load_vocab()
with open(filepath, 'rb') as fp:
dico = pickle.load(fp)
for i, data in enumerate(dico):
#adding demos informations
age = str(round(data['age']))
gender = str(data['gender'])
if gender == "M":
gender = "male"
elif gender == "F":
gender = "female"
ethnicity = str(data['ethnicity'])
insurance = str(data['insurance'])
X = ""
if self.mimic4_text_demos or self.mimic4_text_cond:
X = "The patient "
if self.mimic4_text_demos:
if self.mimic4_text_cond:
X += "("+ethnicity+" "+gender+", "+age+" years old, covered by "+insurance+") "
else:
X += "is "+ethnicity+" "+gender+", "+age+" years old, covered by "+insurance+". "
#adding diagnosis
if self.mimic4_text_cond:
X += "was diagnosed with "
cond = data['Cond']['fids']
for idx,c in enumerate(cond):
X += self.full_cond.loc[self.full_cond["COND"] == str(c)]["LONG"].values[0]+("; " if idx+1 < len(cond) else ". ")
#removing nan charts and aggregation
if self.mimic4_text_charts:
for x in data["Chart"]:
data["Chart"][x] = [xi for xi in data["Chart"][x] if not (xi == "" or (isinstance(xi,float) and np.isnan(xi)))]
data["Chart"] = codes_to_int(data["Chart"])
chart = {x:round(np.mean([it for it in data['Chart'][x]]),3) for x in data["Chart"] if len(data["Chart"][x]) > 0}
#specials columns for chartevents
for col in ["Glascow coma scale eye opening","Glascow coma scale motor response","Glascow coma scale verbal response"]:
if not col in chart:
continue
chart[col] = int(round(chart[col]))
for dtem in discretizer[col]:
if dtem[1] == chart[col]:
chart[col] = dtem[0][-1]
for col in ["Glascow coma scale total"]:
if not col in chart:
continue
chart[col] = int(round(chart[col]))
X += "The chart events measured were : "
for idx,c in enumerate(chart):
X += str(chart[c]) + " for " + c + ("; " if (idx+1 < len(chart.keys())) else ". ")
#medications
if self.mimic4_text_meds:
meds = data['Med']
if len(meds.keys()) != 0:
X += "The mean amounts of medications administered during the episode were : "
meds = {x:round(np.mean([it for it in meds[x]]),3) for x in meds if len(meds[x]) > 0}
for idx,c in enumerate(meds):
if meds[c] != 0:
short = self.full_meds.loc[self.full_meds["MEDS"] == int(c)]["SHORT"].values[0]
long = self.full_meds.loc[self.full_meds["MEDS"] == int(c)]["LONG"].values[0]
name = long if (long != "nan" and not (isinstance(long,float) and np.isnan(long))) else short
if (name != "nan" and not (isinstance(name,float) and np.isnan(name))):
X += str(meds[c]) + " of " + name + ("; " if (idx+1 < len(meds.keys())) else ". ")
else:
X += "No medication was administered."
#procedures
if self.mimic4_text_procs:
proc = data['Proc']
if len(proc.keys()) != 0:
X += "The procedures performed were: "
for idx,c in enumerate(proc):
short = self.full_proc.loc[self.full_proc["PROC"] == int(c)]["SHORT"].values[0]
long = self.full_proc.loc[self.full_proc["PROC"] == int(c)]["LONG"].values[0]
name = long if (long != "nan" and not (isinstance(long,float) and np.isnan(long))) else short
if (name != "nan" and not (isinstance(name,float) and np.isnan(name))):
X += str(name) + ("; " if (idx+1 < len(meds.keys())) else ". ")
else:
X += "No procedure was performed."
yield int(i), {
"label": data['label'],
"features": X,
}
#### GENERATION D'EXEMPLES ###############################################################
def _generate_examples(self, filepath, split):
if "mimic4" in self.config.name:
if self.mode == "mimic4-aggreg":
yield from self._generate_examples_encoded(filepath,False)
elif self.mode == "mimic4-tensor":
yield from self._generate_examples_deep(filepath)
elif self.mode == "mimic4-naive-prompt":
yield from self._generate_examples_text(filepath)
else:
yield from self._generate_exemples_CHARTONLY(filepath)