|
import os |
|
import pandas as pd |
|
import datasets |
|
import sys |
|
import pickle |
|
import subprocess |
|
import shutil |
|
from urllib.request import urlretrieve |
|
from sklearn.model_selection import train_test_split |
|
from sklearn.preprocessing import LabelEncoder |
|
import numpy as np |
|
from tqdm import tqdm |
|
import yaml |
|
import time |
|
|
|
|
|
_DESCRIPTION = """\ |
|
Dataset for mimic4 data, by default for the Mortality task. |
|
Available tasks are: Mortality, Length of Stay, Readmission, Phenotype, Mortality Custom, Length of Stay Custom, Readmission Custom, Phenotype Custom. |
|
The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main' |
|
mimic path should have this form : "path/to/mimic4data/from/username/mimiciv/2.2" |
|
If you choose a Custom task provide a configuration file for the Time series. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset" |
|
_CITATION = "https://proceedings.mlr.press/v193/gupta22a.html" |
|
_URL = "https://github.com/healthylaife/MIMIC-IV-Data-Pipeline" |
|
_DATA_GEN = 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/data_generation_icu_modify.py' |
|
_DAY_INT= 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/day_intervals_cohort_v22.py' |
|
_CONFIG_URLS = {'los' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/los.config', |
|
'mortality' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/mortality.config', |
|
'phenotype' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/phenotype.config', |
|
'readmission' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/readmission.config' |
|
} |
|
|
|
|
|
|
|
def check_config(task,config_file): |
|
with open(config_file) as f: |
|
config = yaml.safe_load(f) |
|
|
|
if task=='Phenotype': |
|
disease_label = config['disease_label'] |
|
else : |
|
disease_label = "" |
|
time = config['timePrediction'] |
|
label = task |
|
timeW = config['timeWindow'] |
|
include=int(timeW.split()[1]) |
|
bucket = config['timebucket'] |
|
radimp = config['radimp'] |
|
predW = config['predW'] |
|
disease_filter = config['disease_filter'] |
|
icu_no_icu = config['icu_no_icu'] |
|
groupingICD = config['groupingICD'] |
|
|
|
chart_flag = config['chart'] |
|
output_flag = config['output'] |
|
diag_flag= config['diagnosis'] |
|
proc_flag = config['proc'] |
|
meds_flag = config['meds'] |
|
|
|
select_diag= config['select_diag'] |
|
select_med= config['select_med'] |
|
select_proc= config['select_proc'] |
|
select_out = config['select_out'] |
|
select_chart = config['select_chart'] |
|
|
|
outlier_removal=config['outlier_removal'] |
|
thresh=config['outlier'] |
|
left_thresh=config['left_outlier'] |
|
|
|
assert (isinstance(select_diag,bool) and isinstance(select_med,bool) and isinstance(select_proc,bool) and isinstance(select_out,bool) and isinstance(select_chart,bool), " select_diag, select_chart, select_med, select_proc, select_out should be boolean") |
|
assert (isinstance(chart_flag,bool) and isinstance(output_flag,bool) and isinstance(diag_flag,bool) and isinstance(proc_flag,bool) and isinstance(meds_flag,bool), "chart_flag, output_flag, diag_flag, proc_flag, meds_flag should be boolean") |
|
if task=='Phenotype': |
|
if disease_label=='Heart Failure': |
|
label='Readmission' |
|
time=30 |
|
disease_label='I50' |
|
elif disease_label=='CAD': |
|
label='Readmission' |
|
time=30 |
|
disease_label='I25' |
|
elif disease_label=='CKD': |
|
label='Readmission' |
|
time=30 |
|
disease_label='N18' |
|
elif disease_label=='COPD': |
|
label='Readmission' |
|
time=30 |
|
disease_label='J44' |
|
else : |
|
raise ValueError('Disease label not correct provide one in the list: Heart Failure, CAD, CKD, COPD') |
|
predW=0 |
|
assert (timeW[0]=='Last' and include<=72 and include>=24, "Time window should be between Last 24 and Last 72") |
|
|
|
elif task=='Mortality': |
|
time=0 |
|
label= 'Mortality' |
|
assert (predW<=8 and predW>=2, "Prediction window should be between 2 and 8") |
|
assert (timeW[0]=='Fisrt' and include<=72 and include>=24, "Time window should be between First 24 and First 72") |
|
|
|
elif task=='Length of Stay': |
|
label= 'Length of Stay' |
|
assert (timeW[0]=='Fisrt' and include<=72 and include>=24, "Time window should be between Fisrt 24 and Fisrt 72") |
|
assert (time<=10 and time>=1, "Length of stay should be between 1 and 10") |
|
predW=0 |
|
|
|
elif task=='Readmission': |
|
label= 'Readmission' |
|
assert (timeW[0]=='Last' and include<=72 and include>=24, "Time window should be between Last 24 and Last 72") |
|
assert (time<=150 and time>=10 and time%10==0, "Readmission window should be between 10 and 150 with a step of 10") |
|
predW=0 |
|
|
|
else: |
|
raise ValueError('Task not correct') |
|
|
|
assert( disease_filter in ['Heart Failure','COPD','CKD','CAD',""], "Disease filter should be one of the following: Heart Failure, COPD, CKD, CAD or empty") |
|
assert( icu_no_icu in ['ICU'], "Dataset currently only supports ICU data") |
|
assert( groupingICD in ['Convert ICD-9 to ICD-10 and group ICD-10 codes','Keep both ICD-9 and ICD-10 codes','Convert ICD-9 to ICD-10 codes'], "Grouping ICD should be one of the following: Convert ICD-9 to ICD-10 and group ICD-10 codes, Keep both ICD-9 and ICD-10 codes, Convert ICD-9 to ICD-10 codes") |
|
assert (bucket<=6 and bucket>=1 and isinstance(bucket, int), "Time bucket should be between 1 and 6 and an integer") |
|
assert (radimp in ['No Imputation', 'forward fill and mean','forward fill and median'], "imputation should be one of the following: No Imputation, forward fill and mean, forward fill and median") |
|
if chart_flag: |
|
assert (left_thresh>=0 and left_thresh<=10 and isinstance(left_thresh, int), "Left outlier threshold should be between 0 and 10 and an integer") |
|
assert (thresh>=90 and thresh<=99 and isinstance(thresh, int), "Outlier threshold should be between 90 and 99 and an integer") |
|
assert (outlier_removal in ['No outlier detection','Impute Outlier (default:98)','Remove outliers (default:98)'], "Outlier removal should be one of the following: No outlier detection, Impute Outlier (default:98), Remove outliers (default:98)") |
|
|
|
return label, time, disease_label, predW |
|
|
|
def create_vocab(file,task): |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
condVocab = pickle.load(fp) |
|
condVocabDict={} |
|
condVocabDict[0]=0 |
|
for val in range(len(condVocab)): |
|
condVocabDict[condVocab[val]]= val+1 |
|
|
|
return condVocabDict |
|
|
|
def gender_vocab(): |
|
genderVocabDict={} |
|
genderVocabDict['<PAD>']=0 |
|
genderVocabDict['M']=1 |
|
genderVocabDict['F']=2 |
|
|
|
return genderVocabDict |
|
|
|
def vocab(task,diag_flag,proc_flag,out_flag,chart_flag,med_flag,lab_flag): |
|
condVocabDict={} |
|
procVocabDict={} |
|
medVocabDict={} |
|
outVocabDict={} |
|
chartVocabDict={} |
|
labVocabDict={} |
|
ethVocabDict={} |
|
ageVocabDict={} |
|
genderVocabDict={} |
|
insVocabDict={} |
|
|
|
ethVocabDict=create_vocab('ethVocab',task) |
|
with open('./data/dict/'+task+'/ethVocabDict', 'wb') as fp: |
|
pickle.dump(ethVocabDict, fp) |
|
|
|
ageVocabDict=create_vocab('ageVocab',task) |
|
with open('./data/dict/'+task+'/ageVocabDict', 'wb') as fp: |
|
pickle.dump(ageVocabDict, fp) |
|
|
|
genderVocabDict=gender_vocab() |
|
with open('./data/dict/'+task+'/genderVocabDict', 'wb') as fp: |
|
pickle.dump(genderVocabDict, fp) |
|
|
|
insVocabDict=create_vocab('insVocab',task) |
|
with open('./data/dict/'+task+'/insVocabDict', 'wb') as fp: |
|
pickle.dump(insVocabDict, fp) |
|
|
|
if diag_flag: |
|
file='condVocab' |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
condVocabDict = pickle.load(fp) |
|
if proc_flag: |
|
file='procVocab' |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
procVocabDict = pickle.load(fp) |
|
if med_flag: |
|
file='medVocab' |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
medVocabDict = pickle.load(fp) |
|
if out_flag: |
|
file='outVocab' |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
outVocabDict = pickle.load(fp) |
|
if chart_flag: |
|
file='chartVocab' |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
chartVocabDict = pickle.load(fp) |
|
if lab_flag: |
|
file='labsVocab' |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
labVocabDict = pickle.load(fp) |
|
|
|
return ethVocabDict,genderVocabDict,ageVocabDict,insVocabDict |
|
|
|
|
|
def concat_data(data,task,feat_cond,feat_proc,feat_out,feat_chart,feat_meds): |
|
meds=data['Med'] |
|
proc = data['Proc'] |
|
out = data['Out'] |
|
chart = data['Chart'] |
|
cond= data['Cond']['fids'] |
|
|
|
cond_df=pd.DataFrame() |
|
proc_df=pd.DataFrame() |
|
out_df=pd.DataFrame() |
|
chart_df=pd.DataFrame() |
|
meds_df=pd.DataFrame() |
|
|
|
|
|
demo=pd.DataFrame(columns=['Age','gender','ethnicity','label','insurance']) |
|
new_row = {'Age': data['age'], 'gender': data['gender'], 'ethnicity': data['ethnicity'], 'label': data['label'], 'insurance': data['insurance']} |
|
demo = demo.append(new_row, ignore_index=True) |
|
|
|
|
|
if (feat_cond): |
|
|
|
with open("./data/dict/"+task+"/condVocab", 'rb') as fp: |
|
conDict = pickle.load(fp) |
|
conds=pd.DataFrame(conDict,columns=['COND']) |
|
features=pd.DataFrame(np.zeros([1,len(conds)]),columns=conds['COND']) |
|
|
|
|
|
if(cond ==[]): |
|
cond_df=pd.DataFrame(np.zeros([1,len(features)]),columns=features['COND']) |
|
cond_df=cond_df.fillna(0) |
|
else: |
|
cond_df=pd.DataFrame(cond,columns=['COND']) |
|
cond_df['val']=1 |
|
cond_df=(cond_df.drop_duplicates()).pivot(columns='COND',values='val').reset_index(drop=True) |
|
cond_df=cond_df.fillna(0) |
|
oneh = cond_df.sum().to_frame().T |
|
combined_df = pd.concat([features,oneh],ignore_index=True).fillna(0) |
|
combined_oneh=combined_df.sum().to_frame().T |
|
cond_df=combined_oneh |
|
|
|
|
|
if (feat_proc): |
|
with open("./data/dict/"+task+"/procVocab", 'rb') as fp: |
|
procDic = pickle.load(fp) |
|
|
|
if proc : |
|
feat=proc.keys() |
|
proc_val=[proc[key] for key in feat] |
|
procedures=pd.DataFrame(procDic,columns=['PROC']) |
|
features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC']) |
|
features.columns=pd.MultiIndex.from_product([["PROC"], features.columns]) |
|
procs=pd.DataFrame(columns=feat) |
|
for p,v in zip(feat,proc_val): |
|
procs[p]=v |
|
procs.columns=pd.MultiIndex.from_product([["PROC"], procs.columns]) |
|
proc_df = pd.concat([features,procs],ignore_index=True).fillna(0) |
|
else: |
|
procedures=pd.DataFrame(procDic,columns=['PROC']) |
|
features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC']) |
|
features.columns=pd.MultiIndex.from_product([["PROC"], features.columns]) |
|
proc_df=features.fillna(0) |
|
|
|
|
|
if (feat_out): |
|
with open("./data/dict/"+task+"/outVocab", 'rb') as fp: |
|
outDic = pickle.load(fp) |
|
|
|
if out : |
|
feat=out.keys() |
|
out_val=[out[key] for key in feat] |
|
outputs=pd.DataFrame(outDic,columns=['OUT']) |
|
features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT']) |
|
features.columns=pd.MultiIndex.from_product([["OUT"], features.columns]) |
|
outs=pd.DataFrame(columns=feat) |
|
for o,v in zip(feat,out_val): |
|
outs[o]=v |
|
outs.columns=pd.MultiIndex.from_product([["OUT"], outs.columns]) |
|
out_df = pd.concat([features,outs],ignore_index=True).fillna(0) |
|
else: |
|
outputs=pd.DataFrame(outDic,columns=['OUT']) |
|
features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT']) |
|
features.columns=pd.MultiIndex.from_product([["OUT"], features.columns]) |
|
out_df=features.fillna(0) |
|
|
|
|
|
if (feat_chart): |
|
with open("./data/dict/"+task+"/chartVocab", 'rb') as fp: |
|
chartDic = pickle.load(fp) |
|
|
|
if chart: |
|
charts=chart['val'] |
|
feat=charts.keys() |
|
chart_val=[charts[key] for key in feat] |
|
charts=pd.DataFrame(chartDic,columns=['CHART']) |
|
features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART']) |
|
features.columns=pd.MultiIndex.from_product([["CHART"], features.columns]) |
|
|
|
chart=pd.DataFrame(columns=feat) |
|
for c,v in zip(feat,chart_val): |
|
chart[c]=v |
|
chart.columns=pd.MultiIndex.from_product([["CHART"], chart.columns]) |
|
chart_df = pd.concat([features,chart],ignore_index=True).fillna(0) |
|
else: |
|
charts=pd.DataFrame(chartDic,columns=['CHART']) |
|
features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART']) |
|
features.columns=pd.MultiIndex.from_product([["CHART"], features.columns]) |
|
chart_df=features.fillna(0) |
|
|
|
|
|
if (feat_meds): |
|
with open("./data/dict/"+task+"/medVocab", 'rb') as fp: |
|
medDic = pickle.load(fp) |
|
|
|
if meds: |
|
feat=meds['signal'].keys() |
|
med_val=[meds['amount'][key] for key in feat] |
|
meds=pd.DataFrame(medDic,columns=['MEDS']) |
|
features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS']) |
|
features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns]) |
|
|
|
med=pd.DataFrame(columns=feat) |
|
for m,v in zip(feat,med_val): |
|
med[m]=v |
|
med.columns=pd.MultiIndex.from_product([["MEDS"], med.columns]) |
|
meds_df = pd.concat([features,med],ignore_index=True).fillna(0) |
|
else: |
|
meds=pd.DataFrame(medDic,columns=['MEDS']) |
|
features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS']) |
|
features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns]) |
|
meds_df=features.fillna(0) |
|
|
|
dyn_df = pd.concat([meds_df,proc_df,out_df,chart_df], axis=1) |
|
return dyn_df,cond_df,demo |
|
|
|
def getXY_deep(data,task,feat_cond,feat_proc,feat_out,feat_chart,feat_meds): |
|
meds, chart, out, proc, lab =[],[],[],[],[] |
|
eth_vocab,gender_vocab,age_vocab,ins_vocab=vocab(task.replace(" ","_"),feat_cond,feat_proc,feat_out,feat_chart,feat_meds,False) |
|
dyn_df,cond_df,demo=concat_data(data,task.replace(" ","_"),feat_cond,feat_proc,feat_out,feat_chart,feat_meds) |
|
keys=dyn_df.columns.levels[0] |
|
dyn = dict.fromkeys(keys) |
|
|
|
for key in range(len(keys)): |
|
dyn_temp=dyn_df[keys[key]] |
|
dyn_temp=dyn_temp.to_numpy() |
|
dyn_temp=np.nan_to_num(dyn_temp,copy=False) |
|
dyn_temp=dyn_temp.tolist() |
|
dyn[key]=dyn_temp |
|
|
|
for k in range(len(keys)): |
|
if keys[k]=='MEDS': |
|
meds=dyn[k] |
|
if keys[k]=='CHART': |
|
chart=dyn[k] |
|
if keys[k]=='OUT': |
|
out=dyn[k] |
|
if keys[k]=='PROC': |
|
proc=dyn[k] |
|
if keys[k]=='LAB': |
|
lab=dyn[k] |
|
|
|
stat=cond_df |
|
stat=stat.to_numpy() |
|
stat=stat.tolist() |
|
|
|
y = int(demo['label']) |
|
|
|
demo["gender"].replace(gender_vocab, inplace=True) |
|
demo["ethnicity"].replace(eth_vocab, inplace=True) |
|
demo["insurance"].replace(ins_vocab, inplace=True) |
|
demo["Age"].replace(age_vocab, inplace=True) |
|
demo=demo[["gender","ethnicity","insurance","Age"]] |
|
demo=demo.values.tolist() |
|
|
|
return stat, demo, meds, chart, out, proc, lab, y |
|
|
|
|
|
def getXY(dyn,stat,demo,concat_cols,concat): |
|
X_df=pd.DataFrame() |
|
if concat: |
|
dyna=dyn.copy() |
|
dyna.columns=dyna.columns.droplevel(0) |
|
dyna=dyna.to_numpy() |
|
dyna=np.nan_to_num(dyna, copy=False) |
|
dyna=list(dyna) |
|
dyna=dyna.reshape(1,-1) |
|
dyn_df=pd.DataFrame(data=dyna,columns=concat_cols) |
|
else: |
|
dyn_df=pd.DataFrame() |
|
for key in dyn.columns.levels[0]: |
|
dyn_temp=dyn[key] |
|
if ((key=="CHART") or (key=="MEDS")): |
|
agg=dyn_temp.aggregate("mean") |
|
agg=agg.reset_index() |
|
else: |
|
agg=dyn_temp.aggregate("max") |
|
agg=agg.reset_index() |
|
|
|
if dyn_df.empty: |
|
dyn_df=agg |
|
else: |
|
dyn_df=pd.concat([dyn_df,agg],axis=0) |
|
dyn_df=dyn_df.T |
|
dyn_df.columns = dyn_df.iloc[0] |
|
dyn_df=dyn_df.iloc[1:,:] |
|
|
|
X_df=pd.concat([dyn_df,stat],axis=1) |
|
X_df=pd.concat([X_df,demo],axis=1) |
|
return X_df |
|
|
|
def encoding(X_data): |
|
gen_encoder = LabelEncoder() |
|
eth_encoder = LabelEncoder() |
|
ins_encoder = LabelEncoder() |
|
gen_encoder.fit(X_data['gender']) |
|
eth_encoder.fit(X_data['ethnicity']) |
|
ins_encoder.fit(X_data['insurance']) |
|
X_data['gender']=gen_encoder.transform(X_data['gender']) |
|
X_data['ethnicity']=eth_encoder.transform(X_data['ethnicity']) |
|
X_data['insurance']=ins_encoder.transform(X_data['insurance']) |
|
return X_data |
|
|
|
def generate_split(path,task,concat,feat_cond,feat_chart,feat_proc, feat_meds, feat_out): |
|
with open(path, 'rb') as fp: |
|
dico = pickle.load(fp) |
|
df = pd.DataFrame.from_dict(dico, orient='index') |
|
X_df=pd.DataFrame() |
|
taskf=task.replace(" ","_") |
|
for _, data in tqdm(df.iterrows(),desc='Encoding Splits Data for '+task+' task'): |
|
concat_cols=[] |
|
sample=data |
|
dyn_df,cond_df,demo=concat_data(sample,taskf,feat_cond,feat_chart,feat_proc, feat_meds, feat_out) |
|
dyn=dyn_df.copy() |
|
dyn.columns=dyn.columns.droplevel(0) |
|
cols=dyn.columns |
|
time=dyn.shape[0] |
|
for t in range(time): |
|
cols_t = [str(x) + "_"+str(t) for x in cols] |
|
concat_cols.extend(cols_t) |
|
|
|
X= getXY(dyn_df,cond_df,demo,concat_cols,concat) |
|
if X_df.empty: |
|
X_df=pd.concat([X_df,X],axis=1) |
|
else: |
|
X_df = pd.concat([X_df, X], axis=0) |
|
X_df=X_df.fillna(0) |
|
X_df = encoding(X_df) |
|
return X_df |
|
|
|
def generate_split_deep(path,task,feat_cond,feat_chart,feat_proc, feat_meds, feat_out): |
|
with open(path, 'rb') as fp: |
|
dico = pickle.load(fp) |
|
X = pd.DataFrame.from_dict(dico, orient='index') |
|
X_dict = {} |
|
taskf=task.replace(" ","_") |
|
for hid, data in tqdm(X.iterrows(),desc='Encoding Splits Data for '+task+' task'): |
|
stat, demo, meds, chart, out, proc, lab, y = getXY_deep(data, taskf, feat_cond, feat_proc, feat_out, feat_chart,feat_meds) |
|
X_dict[hid] = {'stat': stat, 'demo': demo, 'meds': meds, 'chart': chart, 'out': out, 'proc': proc, 'lab': lab, 'label': y} |
|
|
|
return X_dict |
|
|
|
|
|
def task_cohort(task, mimic_path, config_path): |
|
sys.path.append('./preprocessing/day_intervals_preproc') |
|
sys.path.append('./utils') |
|
sys.path.append('./preprocessing/hosp_module_preproc') |
|
sys.path.append('./model') |
|
import day_intervals_cohort_v22 |
|
import day_intervals_cohort |
|
import feature_selection_icu |
|
import data_generation_icu_modify |
|
|
|
root_dir = os.path.dirname(os.path.abspath('UserInterface.ipynb')) |
|
config_path='./config/'+config_path |
|
with open(config_path) as f: |
|
config = yaml.safe_load(f) |
|
version_path = mimic_path+'/' |
|
version = mimic_path.split('/')[-1][0] |
|
start = time.time() |
|
|
|
label, tim, disease_label, predW = check_config(task,config_path) |
|
|
|
timeW = config['timeWindow'] |
|
include=int(timeW.split()[1]) |
|
bucket = config['timebucket'] |
|
radimp = config['radimp'] |
|
diag_flag = config['diagnosis'] |
|
out_flag = config['output'] |
|
chart_flag = config['chart'] |
|
proc_flag= config['proc'] |
|
med_flag = config['meds'] |
|
disease_filter = config['disease_filter'] |
|
print("disease_label: ", label) |
|
icu_no_icu = config['icu_no_icu'] |
|
groupingICD = config['groupingICD'] |
|
|
|
select_diag= config['select_diag'] |
|
select_med= config['select_med'] |
|
select_proc= config['select_proc'] |
|
|
|
select_out= config['select_out'] |
|
select_chart= config['select_chart'] |
|
|
|
|
|
|
|
data_icu=icu_no_icu=="ICU" |
|
data_mort=label=="Mortality" |
|
data_admn=label=='Readmission' |
|
data_los=label=='Length of Stay' |
|
|
|
if (disease_filter=="Heart Failure"): |
|
icd_code='I50' |
|
elif (disease_filter=="CKD"): |
|
icd_code='N18' |
|
elif (disease_filter=="COPD"): |
|
icd_code='J44' |
|
elif (disease_filter=="CAD"): |
|
icd_code='I25' |
|
else: |
|
icd_code='No Disease Filter' |
|
|
|
|
|
if version == '2': |
|
cohort_output = day_intervals_cohort_v22.extract_data(icu_no_icu,label,tim,icd_code, root_dir,version_path,disease_label) |
|
|
|
elif version == '1': |
|
cohort_output = day_intervals_cohort.extract_data(icu_no_icu,label,tim,icd_code, root_dir,version_path,disease_label) |
|
|
|
print(data_icu) |
|
if data_icu : |
|
feature_selection_icu.feature_icu(cohort_output, version_path,diag_flag,out_flag,chart_flag,proc_flag,med_flag) |
|
|
|
if data_icu: |
|
if diag_flag: |
|
group_diag=groupingICD |
|
feature_selection_icu.preprocess_features_icu(cohort_output, diag_flag, group_diag,False,False,False,0,0) |
|
|
|
if data_icu: |
|
feature_selection_icu.generate_summary_icu(diag_flag,proc_flag,med_flag,out_flag,chart_flag) |
|
|
|
|
|
feature_selection_icu.features_selection_icu(cohort_output, diag_flag,proc_flag,med_flag,out_flag, chart_flag,select_diag,select_med,select_proc,select_out,select_chart) |
|
|
|
thresh=0 |
|
if data_icu: |
|
if chart_flag: |
|
outlier_removal=config['outlier_removal'] |
|
clean_chart=outlier_removal!='No outlier detection' |
|
impute_outlier_chart=outlier_removal=='Impute Outlier (default:98)' |
|
thresh=config['outlier'] |
|
left_thresh=config['left_outlier'] |
|
feature_selection_icu.preprocess_features_icu(cohort_output, False, False,chart_flag,clean_chart,impute_outlier_chart,thresh,left_thresh) |
|
|
|
if radimp == 'forward fill and mean' : |
|
impute='Mean' |
|
elif radimp =='forward fill and median': |
|
impute = 'Median' |
|
else : |
|
impute = False |
|
|
|
if data_icu: |
|
gen=data_generation_icu_modify.Generator(task,cohort_output,data_mort,data_admn,data_los,diag_flag,proc_flag,out_flag,chart_flag,med_flag,impute,include,bucket,predW) |
|
end = time.time() |
|
print("Time elapsed : ", round((end - start)/60,2),"mins") |
|
print("[============TASK COHORT SUCCESSFULLY CREATED============]") |
|
|
|
|
|
|
|
class Mimic4DatasetConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Mimic4Dataset.""" |
|
|
|
def __init__( |
|
self, |
|
**kwargs, |
|
): |
|
super().__init__(**kwargs) |
|
|
|
class Mimic4Dataset(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def __init__(self, **kwargs): |
|
self.mimic_path = kwargs.pop("mimic_path", None) |
|
self.encoding = kwargs.pop("encoding",'raw') |
|
self.config_path = kwargs.pop("config_path",None) |
|
self.test_size = kwargs.pop("test_size",0.2) |
|
self.val_size = kwargs.pop("val_size",0.1) |
|
|
|
super().__init__(**kwargs) |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
Mimic4DatasetConfig( |
|
name="Phenotype", |
|
version=VERSION, |
|
description="Dataset for mimic4 Phenotype task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Readmission", |
|
version=VERSION, |
|
description="Dataset for mimic4 Readmission task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Length of Stay", |
|
version=VERSION, |
|
description="Dataset for mimic4 Length of Stay task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Mortality", |
|
version=VERSION, |
|
description="Dataset for mimic4 Mortality task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Phenotype Custom", |
|
version=VERSION, |
|
description="Dataset for mimic4 Custom Phenotype task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Readmission Custom", |
|
version=VERSION, |
|
description="Dataset for mimic4 Custom Readmission task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Length of Stay Custom", |
|
version=VERSION, |
|
description="Dataset for mimic4 Custom Length of Stay task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Mortality Custom", |
|
version=VERSION, |
|
description="Dataset for mimic4 Custom Mortality task" |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "Mortality" |
|
|
|
def map_dtype(self,dtype): |
|
if pd.api.types.is_integer_dtype(dtype): |
|
return datasets.Value('int64') |
|
elif pd.api.types.is_float_dtype(dtype): |
|
return datasets.Value('float64') |
|
elif pd.api.types.is_string_dtype(dtype): |
|
return datasets.Value('string') |
|
else: |
|
raise ValueError(f"Unsupported dtype: {dtype}") |
|
|
|
def create_cohort(self): |
|
if self.config.name == 'Phenotype' : self.config_path = _CONFIG_URLS['phenotype'] |
|
if self.config.name == 'Readmission' : self.config_path = _CONFIG_URLS['readmission'] |
|
if self.config.name == 'Length of Stay' : self.config_path = _CONFIG_URLS['los'] |
|
if self.config.name == 'Mortality' : self.config_path = _CONFIG_URLS['mortality'] |
|
if self.config.name in ['Phenotype Custom','Readmission Custom','Length of Stay Custom','Mortality Custom'] and self.config.name==None: |
|
raise ValueError('Please provide a config file') |
|
|
|
version = self.mimic_path.split('/')[-1] |
|
mimic_folder= self.mimic_path.split('/')[-2] |
|
mimic_complete_path='/'+mimic_folder+'/'+version |
|
|
|
current_directory = os.getcwd() |
|
if os.path.exists(os.path.dirname(current_directory)+'/MIMIC-IV-Data-Pipeline-main'): |
|
dir =os.path.dirname(current_directory) |
|
os.chdir(dir) |
|
else: |
|
|
|
dir = self.mimic_path.replace(mimic_complete_path,'') |
|
if dir[-1]!='/': |
|
dir=dir+'/' |
|
elif dir=='': |
|
dir="./" |
|
parent_dir = os.path.dirname(self.mimic_path) |
|
os.chdir(parent_dir) |
|
|
|
|
|
repo_url='https://github.com/healthylaife/MIMIC-IV-Data-Pipeline' |
|
if os.path.exists('MIMIC-IV-Data-Pipeline-main'): |
|
path_bench = './MIMIC-IV-Data-Pipeline-main' |
|
else: |
|
path_bench ='./MIMIC-IV-Data-Pipeline-main' |
|
subprocess.run(["git", "clone", repo_url, path_bench]) |
|
os.makedirs(path_bench+'/mimic-iv') |
|
shutil.move(version,path_bench+'/mimic-iv') |
|
|
|
os.chdir(path_bench) |
|
self.mimic_path = './mimic-iv/'+version |
|
|
|
|
|
|
|
if self.config_path[0:4] == 'http': |
|
c = self.config_path.split('/')[-1] |
|
file_path, head = urlretrieve(self.config_path,c) |
|
else : |
|
file_path = self.config_path |
|
|
|
if not os.path.exists('./config'): |
|
os.makedirs('config') |
|
|
|
conf='./config/'+file_path.split('/')[-1] |
|
if not os.path.exists(conf): |
|
shutil.move(file_path,'./config') |
|
with open(conf) as f: |
|
config = yaml.safe_load(f) |
|
feat_cond, feat_chart, feat_proc, feat_meds, feat_out = config['diagnosis'], config['chart'], config['proc'], config['meds'], config['output'] |
|
|
|
|
|
|
|
if not os.path.exists('./model/data_generation_icu_modify.py'): |
|
file_path, head = urlretrieve(_DATA_GEN, "data_generation_icu_modify.py") |
|
shutil.move(file_path, './model') |
|
|
|
if not os.path.exists('./preprocessing/day_intervals_preproc/day_intervals_cohort_v22.py'): |
|
file_path, head = urlretrieve(_DAY_INT, "day_intervals_cohort_v22.py") |
|
shutil.move(file_path, './preprocessing/day_intervals_preproc') |
|
|
|
data_dir = "./data/dict/"+self.config.name.replace(" ","_")+"/dataDic" |
|
sys.path.append(path_bench) |
|
config = self.config_path.split('/')[-1] |
|
|
|
|
|
task_cohort(self.config.name.replace(" ","_"),self.mimic_path,config) |
|
|
|
|
|
with open(data_dir, 'rb') as fp: |
|
dataDic = pickle.load(fp) |
|
data = pd.DataFrame.from_dict(dataDic) |
|
|
|
data=data.T |
|
train_data, test_data = train_test_split(data, test_size=self.test_size, random_state=42) |
|
train_data, val_data = train_test_split(train_data, test_size=self.val_size, random_state=42) |
|
|
|
dict_dir = "./data/dict/"+self.config.name.replace(" ","_") |
|
train_dic = train_data.to_dict('index') |
|
test_dic = test_data.to_dict('index') |
|
val_dic = val_data.to_dict('index') |
|
|
|
train_path = dict_dir+'/train_data.pkl' |
|
test_path = dict_dir+'/test_data.pkl' |
|
val_path = dict_dir+'/val_data.pkl' |
|
|
|
with open(train_path, 'wb') as f: |
|
pickle.dump(train_dic, f) |
|
with open(val_path, 'wb') as f: |
|
pickle.dump(val_dic, f) |
|
with open(test_path, 'wb') as f: |
|
pickle.dump(test_dic, f) |
|
|
|
return feat_cond, feat_chart, feat_proc, feat_meds, feat_out, dict_dir |
|
|
|
|
|
|
|
def _info_raw(self): |
|
features = datasets.Features( |
|
{ |
|
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]), |
|
"gender": datasets.Value("string"), |
|
"ethnicity": datasets.Value("string"), |
|
"insurance": datasets.Value("string"), |
|
"age": datasets.Value("int32"), |
|
"COND": datasets.Sequence(datasets.Value("string")), |
|
"MEDS": { |
|
"signal": |
|
{ |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
} |
|
, |
|
"rate": |
|
{ |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
} |
|
, |
|
"amount": |
|
{ |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
} |
|
|
|
}, |
|
"PROC": { |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
}, |
|
"CHART": |
|
{ |
|
"signal" : { |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
}, |
|
"val" : { |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
}, |
|
}, |
|
"OUT": { |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
}, |
|
|
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def __split_generators_raw(self): |
|
|
|
csv_dir = "./data/dict/"+self.config.name.replace(" ","_") |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data.pkl'}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": csv_dir+'/val_data.pkl'}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data.pkl'}), |
|
] |
|
|
|
def _generate_examples_raw(self, filepath): |
|
with open(filepath, 'rb') as fp: |
|
dataDic = pickle.load(fp) |
|
for hid, data in dataDic.items(): |
|
proc_features = data['Proc'] |
|
chart_features = data['Chart'] |
|
meds_features = data['Med'] |
|
out_features = data['Out'] |
|
cond_features = data['Cond']['fids'] |
|
eth= data['ethnicity'] |
|
age = data['age'] |
|
gender = data['gender'] |
|
label = data['label'] |
|
insurance=data['insurance'] |
|
|
|
items = list(proc_features.keys()) |
|
values =[proc_features[i] for i in items ] |
|
procs = {"id" : items, |
|
"value": values} |
|
|
|
items_outs = list(out_features.keys()) |
|
values_outs =[out_features[i] for i in items_outs ] |
|
outs = {"id" : items_outs, |
|
"value": values_outs} |
|
|
|
|
|
if ('signal' in chart_features): |
|
items_chart_sig = list(chart_features['signal'].keys()) |
|
values_chart_sig =[chart_features['signal'][i] for i in items_chart_sig ] |
|
chart_sig = {"id" : items_chart_sig, |
|
"value": values_chart_sig} |
|
else: |
|
chart_sig = {"id" : [], |
|
"value": []} |
|
|
|
if ('val' in chart_features): |
|
items_chart_val = list(chart_features['val'].keys()) |
|
values_chart_val =[chart_features['val'][i] for i in items_chart_val ] |
|
chart_val = {"id" : items_chart_val, |
|
"value": values_chart_val} |
|
else: |
|
chart_val = {"id" : [], |
|
"value": []} |
|
|
|
charts = {"signal" : chart_sig, |
|
"val" : chart_val} |
|
|
|
|
|
if ('signal' in meds_features): |
|
items_meds_sig = list(meds_features['signal'].keys()) |
|
values_meds_sig =[meds_features['signal'][i] for i in items_meds_sig ] |
|
meds_sig = {"id" : items_meds_sig, |
|
"value": values_meds_sig} |
|
else: |
|
meds_sig = {"id" : [], |
|
"value": []} |
|
|
|
if ('rate' in meds_features): |
|
items_meds_rate = list(meds_features['rate'].keys()) |
|
values_meds_rate =[meds_features['rate'][i] for i in items_meds_rate ] |
|
meds_rate = {"id" : items_meds_rate, |
|
"value": values_meds_rate} |
|
else: |
|
meds_rate = {"id" : [], |
|
"value": []} |
|
|
|
if ('amount' in meds_features): |
|
items_meds_amount = list(meds_features['amount'].keys()) |
|
values_meds_amount =[meds_features['amount'][i] for i in items_meds_amount ] |
|
meds_amount = {"id" : items_meds_amount, |
|
"value": values_meds_amount} |
|
else: |
|
meds_amount = {"id" : [], |
|
"value": []} |
|
|
|
meds = {"signal" : meds_sig, |
|
"rate" : meds_rate, |
|
"amount" : meds_amount} |
|
|
|
yield int(hid), { |
|
"label" : label, |
|
"gender" : gender, |
|
"ethnicity" : eth, |
|
"insurance" : insurance, |
|
"age" : age, |
|
"COND" : cond_features, |
|
"PROC" : procs, |
|
"CHART" : charts, |
|
"OUT" : outs, |
|
"MEDS" : meds |
|
} |
|
|
|
|
|
|
|
|
|
def _info_encoded(self): |
|
X_train_encoded=generate_split(self.path+'/train_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out) |
|
X_test_encoded=generate_split(self.path+'/test_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out) |
|
X_val_encoded=generate_split(self.path+'/val_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out) |
|
|
|
X_train_encoded.to_csv(self.path+"/X_train_encoded.csv", index=False) |
|
X_test_encoded.to_csv(self.path+"/X_test_encoded.csv", index=False) |
|
X_val_encoded.to_csv(self.path+"/X_val_encoded.csv", index=False) |
|
columns = {col: self.map_dtype(X_train_encoded[col].dtype) for col in X_train_encoded.columns} |
|
features = datasets.Features(columns) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def __split_generators_encoded(self): |
|
data_dir = "./data/dict/"+self.config.name.replace(" ","_") |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/X_train_encoded.csv'}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir+'/X_val_encoded.csv'}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/X_test_encoded.csv'}), |
|
] |
|
|
|
def _generate_examples_encoded(self, filepath): |
|
df = pd.read_csv(filepath, header=0) |
|
for i, row in df.iterrows(): |
|
yield i, row.to_dict() |
|
|
|
def _info_deep(self): |
|
X_train_deep = generate_split_deep(self.path+'/train_data.pkl',self.config.name,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out) |
|
X_test_deep = generate_split_deep(self.path+'/test_data.pkl',self.config.name,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out) |
|
X_val_deep = generate_split_deep(self.path+'/val_data.pkl',self.config.name,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out) |
|
|
|
with open(self.path+"/X_train_deep.pkl", 'wb') as f: |
|
pickle.dump(X_train_deep, f) |
|
with open(self.path+"/X_test_deep.pkl", 'wb') as f: |
|
pickle.dump(X_test_deep, f) |
|
with open(self.path+"/X_val_deep.pkl", 'wb') as f: |
|
pickle.dump(X_val_deep, f) |
|
features = datasets.Features( |
|
{ |
|
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]), |
|
"DEMO": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))), |
|
"COND" : datasets.Sequence(datasets.Sequence(datasets.Value("float64"))) , |
|
"MEDS" : datasets.Sequence(datasets.Sequence(datasets.Value("float64"))) , |
|
"PROC" : datasets.Sequence(datasets.Sequence(datasets.Value("float64"))) , |
|
"CHART" : datasets.Sequence(datasets.Sequence(datasets.Value("float64"))) , |
|
"OUT" : datasets.Sequence(datasets.Sequence(datasets.Value("float64"))) , |
|
"LAB" : datasets.Sequence(datasets.Sequence(datasets.Value("float64"))) , |
|
|
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def __split_generators_deep(self): |
|
data_dir = "./data/dict/"+self.config.name.replace(" ","_") |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/X_train_deep.pkl'}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir+'/X_val_deep.pkl'}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/X_test_deep.pkl'}), |
|
] |
|
|
|
def _generate_examples_deep(self, filepath): |
|
with open(filepath, 'rb') as fp: |
|
dico = pickle.load(fp) |
|
|
|
task=self.config.name.replace(" ","_") |
|
if 'Custom' in task: |
|
task = task.rsplit('_', 1)[0] |
|
for key, data in tqdm(dico.items(),desc='Encoding Splits Data for '+task+' task'): |
|
stat, demo, meds, chart, out, proc, lab, y = getXY_deep(data, task, self.feat_cond, self.feat_proc, self.feat_out, self.feat_chart, self.feat_meds) |
|
yield int(key), { |
|
'label': y, |
|
'DEMO': demo, |
|
'COND': stat, |
|
'MEDS': meds, |
|
'PROC': proc, |
|
'CHART': chart, |
|
'OUT': out, |
|
'LAB': lab, |
|
} |
|
|
|
|
|
def _info(self): |
|
self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out,self.path = self.create_cohort() |
|
if self.encoding == 'onehot' : |
|
return self._info_encoded() |
|
|
|
elif self.encoding == 'deep' : |
|
return self._info_deep() |
|
|
|
else: |
|
return self._info_raw() |
|
|
|
def _split_generators(self, dl_manager): |
|
if self.encoding == 'onehot' : |
|
return self.__split_generators_encoded() |
|
|
|
elif self.encoding == 'deep' : |
|
return self.__split_generators_raw() |
|
else: |
|
return self.__split_generators_raw() |
|
|
|
def _generate_examples(self, filepath): |
|
|
|
if self.encoding == 'onehot' : |
|
yield from self._generate_examples_encoded(filepath) |
|
|
|
elif self.encoding == 'deep' : |
|
yield from self._generate_examples_deep(filepath) |
|
else : |
|
yield from self._generate_examples_raw(filepath) |
|
|
|
|