import csv import json import os import pandas as pd import datasets import sys import pickle import subprocess import shutil from urllib.request import urlretrieve from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import numpy as np from tqdm import tqdm import yaml import torch _DESCRIPTION = """\ Dataset for mimic4 data, by default for the Mortality task. Available tasks are: Mortality, Length of Stay, Readmission, Phenotype, Mortality Custom, Length of Stay Custom, Readmission Custom, Phenotype Custom. The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main' mimic path should have this form : "path/to/mimic4data/from/username/mimiciv/2.2" If you choose a Custom task provide a configuration file for the Time series. """ _HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset" _CITATION = "https://proceedings.mlr.press/v193/gupta22a.html" _URL = "https://github.com/healthylaife/MIMIC-IV-Data-Pipeline" _DATA_GEN = 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/data_generation_icu_modify.py' _DAY_INT= 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/day_intervals_cohort_v22.py' _COHORT = 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/cohort.py' _CONFIG_URLS = {'los' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/los.config', 'mortality' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/mortality.config', 'phenotype' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/phenotype.config', 'readmission' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/readmission.config' } def create_vocab(file,task): with open ('./data/dict/'+task+'/'+file, 'rb') as fp: condVocab = pickle.load(fp) condVocabDict={} condVocabDict[0]=0 for val in range(len(condVocab)): condVocabDict[condVocab[val]]= val+1 return condVocabDict def gender_vocab(): genderVocabDict={} genderVocabDict['']=0 genderVocabDict['M']=1 genderVocabDict['F']=2 return genderVocabDict def vocab(task,diag_flag,proc_flag,out_flag,chart_flag,med_flag,lab_flag): condVocabDict={} procVocabDict={} medVocabDict={} outVocabDict={} chartVocabDict={} labVocabDict={} ethVocabDict={} ageVocabDict={} genderVocabDict={} insVocabDict={} ethVocabDict=create_vocab('ethVocab',task) with open('./data/dict/'+task+'/ethVocabDict', 'wb') as fp: pickle.dump(ethVocabDict, fp) ageVocabDict=create_vocab('ageVocab',task) with open('./data/dict/'+task+'/ageVocabDict', 'wb') as fp: pickle.dump(ageVocabDict, fp) genderVocabDict=gender_vocab() with open('./data/dict/'+task+'/genderVocabDict', 'wb') as fp: pickle.dump(genderVocabDict, fp) insVocabDict=create_vocab('insVocab',task) with open('./data/dict/'+task+'/insVocabDict', 'wb') as fp: pickle.dump(insVocabDict, fp) if diag_flag: file='condVocab' with open ('./data/dict/'+task+'/'+file, 'rb') as fp: condVocabDict = pickle.load(fp) if proc_flag: file='procVocab' with open ('./data/dict/'+task+'/'+file, 'rb') as fp: procVocabDict = pickle.load(fp) if med_flag: file='medVocab' with open ('./data/dict/'+task+'/'+file, 'rb') as fp: medVocabDict = pickle.load(fp) if out_flag: file='outVocab' with open ('./data/dict/'+task+'/'+file, 'rb') as fp: outVocabDict = pickle.load(fp) if chart_flag: file='chartVocab' with open ('./data/dict/'+task+'/'+file, 'rb') as fp: chartVocabDict = pickle.load(fp) if lab_flag: file='labsVocab' with open ('./data/dict/'+task+'/'+file, 'rb') as fp: labVocabDict = pickle.load(fp) return len(condVocabDict),len(procVocabDict),len(medVocabDict),len(outVocabDict),len(chartVocabDict),len(labVocabDict),ethVocabDict,genderVocabDict,ageVocabDict,insVocabDict def concat_data(data,task,feat_cond=False,feat_proc=False,feat_out=False,feat_chart=False,feat_meds=False): meds=data['Med'] proc = data['Proc'] out = data['Out'] chart = data['Chart'] cond= data['Cond']['fids'] cond_df=pd.DataFrame() proc_df=pd.DataFrame() out_df=pd.DataFrame() chart_df=pd.DataFrame() meds_df=pd.DataFrame() #demographic demo=pd.DataFrame(columns=['Age','gender','ethnicity','label','insurance']) new_row = {'Age': data['age'], 'gender': data['gender'], 'ethnicity': data['ethnicity'], 'label': data['label'], 'insurance': data['insurance']} demo = demo.append(new_row, ignore_index=True) ##########COND######### if (feat_cond): #get all conds with open("./data/dict/"+task+"/condVocab", 'rb') as fp: conDict = pickle.load(fp) conds=pd.DataFrame(conDict,columns=['COND']) features=pd.DataFrame(np.zeros([1,len(conds)]),columns=conds['COND']) #onehot encode if(cond ==[]): cond_df=pd.DataFrame(np.zeros([1,len(features)]),columns=features['COND']) cond_df=cond_df.fillna(0) else: cond_df=pd.DataFrame(cond,columns=['COND']) cond_df['val']=1 cond_df=(cond_df.drop_duplicates()).pivot(columns='COND',values='val').reset_index(drop=True) cond_df=cond_df.fillna(0) oneh = cond_df.sum().to_frame().T combined_df = pd.concat([features,oneh],ignore_index=True).fillna(0) combined_oneh=combined_df.sum().to_frame().T cond_df=combined_oneh ##########PROC######### if (feat_proc): with open("./data/dict/"+task+"/procVocab", 'rb') as fp: procDic = pickle.load(fp) if proc : feat=proc.keys() proc_val=[proc[key] for key in feat] procedures=pd.DataFrame(procDic,columns=['PROC']) features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC']) features.columns=pd.MultiIndex.from_product([["PROC"], features.columns]) procs=pd.DataFrame(columns=feat) for p,v in zip(feat,proc_val): procs[p]=v procs.columns=pd.MultiIndex.from_product([["PROC"], procs.columns]) proc_df = pd.concat([features,procs],ignore_index=True).fillna(0) else: procedures=pd.DataFrame(procDic,columns=['PROC']) features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC']) features.columns=pd.MultiIndex.from_product([["PROC"], features.columns]) proc_df=features.fillna(0) ##########OUT######### if (feat_out): with open("./data/dict/"+task+"/outVocab", 'rb') as fp: outDic = pickle.load(fp) if out : feat=out.keys() out_val=[out[key] for key in feat] outputs=pd.DataFrame(outDic,columns=['OUT']) features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT']) features.columns=pd.MultiIndex.from_product([["OUT"], features.columns]) outs=pd.DataFrame(columns=feat) for o,v in zip(feat,out_val): outs[o]=v outs.columns=pd.MultiIndex.from_product([["OUT"], outs.columns]) out_df = pd.concat([features,outs],ignore_index=True).fillna(0) else: outputs=pd.DataFrame(outDic,columns=['OUT']) features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT']) features.columns=pd.MultiIndex.from_product([["OUT"], features.columns]) out_df=features.fillna(0) ##########CHART######### if (feat_chart): with open("./data/dict/"+task+"/chartVocab", 'rb') as fp: chartDic = pickle.load(fp) if chart: charts=chart['val'] feat=charts.keys() chart_val=[charts[key] for key in feat] charts=pd.DataFrame(chartDic,columns=['CHART']) features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART']) features.columns=pd.MultiIndex.from_product([["CHART"], features.columns]) chart=pd.DataFrame(columns=feat) for c,v in zip(feat,chart_val): chart[c]=v chart.columns=pd.MultiIndex.from_product([["CHART"], chart.columns]) chart_df = pd.concat([features,chart],ignore_index=True).fillna(0) else: charts=pd.DataFrame(chartDic,columns=['CHART']) features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART']) features.columns=pd.MultiIndex.from_product([["CHART"], features.columns]) chart_df=features.fillna(0) ###MEDS if (feat_meds): with open("./data/dict/"+task+"/medVocab", 'rb') as fp: medDic = pickle.load(fp) if meds: feat=meds['signal'].keys() med_val=[meds['amount'][key] for key in feat] meds=pd.DataFrame(medDic,columns=['MEDS']) features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS']) features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns]) med=pd.DataFrame(columns=feat) for m,v in zip(feat,med_val): med[m]=v med.columns=pd.MultiIndex.from_product([["MEDS"], med.columns]) meds_df = pd.concat([features,med],ignore_index=True).fillna(0) else: meds=pd.DataFrame(medDic,columns=['MEDS']) features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS']) features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns]) meds_df=features.fillna(0) dyn_df = pd.concat([meds_df,proc_df,out_df,chart_df], axis=1) return dyn_df,cond_df,demo def getXY_deep(X_df,task,feat_cond=True,feat_chart=True,feat_proc=True, feat_meds=True, feat_out=False): eth_vocab,gender_vocab,age_vocab,ins_vocab=vocab(task.replace(" ","_"),feat_cond,feat_proc,feat_out,feat_chart,feat_meds,False) dyn_df=[] meds=torch.zeros(size=(0,0)) chart=torch.zeros(size=(0,0)) proc=torch.zeros(size=(0,0)) out=torch.zeros(size=(0,0)) lab=torch.zeros(size=(0,0)) stat_df=torch.zeros(size=(1,0)) demo_df=torch.zeros(size=(1,0)) y_df=[] for index,sample in tqdm(X_df.iterrows(),desc='Encoding Splits Data for '+task+' task'): dyn,stat,demo=concat_data(sample,task,feat_cond,feat_chart,feat_proc, feat_meds, feat_out) dyn_k=dyn.copy() keys=dyn_k.columns.levels[0] if index==0: for i in range(len(keys)): dyn_df.append(torch.zeros(size=(1,0))) y=demo['label'] y_df.append(int(y)) for key in range(len(keys)): dyn_temp=dyn[keys[key]] dyn_temp=dyn_temp.to_numpy() dyn_temp=torch.tensor(dyn_temp) dyn_temp=dyn_temp.unsqueeze(0) dyn_temp=torch.tensor(dyn_temp) dyn_temp=dyn_temp.type(torch.LongTensor) if key