Update Mimic4Dataset.py
Browse files- Mimic4Dataset.py +25 -7
Mimic4Dataset.py
CHANGED
@@ -110,7 +110,7 @@ def vocab(task,diag_flag,proc_flag,out_flag,chart_flag,med_flag,lab_flag):
|
|
110 |
return len(condVocabDict),len(procVocabDict),len(medVocabDict),len(outVocabDict),len(chartVocabDict),len(labVocabDict),ethVocabDict,genderVocabDict,ageVocabDict,insVocabDict
|
111 |
|
112 |
|
113 |
-
def
|
114 |
meds=data['Med']
|
115 |
proc = data['Proc']
|
116 |
out = data['Out']
|
@@ -257,7 +257,7 @@ def getXY_deep(X_df,task,feat_cond=True,feat_chart=True,feat_proc=True, feat_med
|
|
257 |
y_df=[]
|
258 |
|
259 |
for index,sample in tqdm(X_df.iterrows(),desc='Encoding Splits Data for '+task+' task'):
|
260 |
-
dyn,stat,demo=
|
261 |
dyn_k=dyn.copy()
|
262 |
keys=dyn_k.columns.levels[0]
|
263 |
if index==0:
|
@@ -368,7 +368,7 @@ def encoding(X_data):
|
|
368 |
X_data['insurance']=ins_encoder.transform(X_data['insurance'])
|
369 |
return X_data
|
370 |
|
371 |
-
def generate_split(path,task,concat,feat_cond
|
372 |
with open(path, 'rb') as fp:
|
373 |
dico = pickle.load(fp)
|
374 |
df = pd.DataFrame.from_dict(dico, orient='index')
|
@@ -377,7 +377,7 @@ def generate_split(path,task,concat,feat_cond=True,feat_chart=True,feat_proc=Tru
|
|
377 |
for _, data in tqdm(df.iterrows(),desc='Encoding Splits Data for '+task+' task'):
|
378 |
concat_cols=[]
|
379 |
sample=data
|
380 |
-
dyn_df,cond_df,demo=
|
381 |
dyn=dyn_df.copy()
|
382 |
dyn.columns=dyn.columns.droplevel(0)
|
383 |
cols=dyn.columns
|
@@ -395,6 +395,24 @@ def generate_split(path,task,concat,feat_cond=True,feat_chart=True,feat_proc=Tru
|
|
395 |
X_df = encoding(X_df)
|
396 |
return X_df
|
397 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
398 |
|
399 |
class Mimic4DatasetConfig(datasets.BuilderConfig):
|
400 |
"""BuilderConfig for Mimic4Dataset."""
|
@@ -810,9 +828,9 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
810 |
X_val_encoded.to_csv(self.path+"/X_val_encoded.csv", index=False)
|
811 |
return self._info_encoded(X_train_encoded)
|
812 |
elif self.encoding == 'deep' :
|
813 |
-
X_train_deep =
|
814 |
-
X_test_deep =
|
815 |
-
X_val_deep =
|
816 |
|
817 |
X_train_deep.to_csv(self.path+"/X_train_deep.csv", index=False)
|
818 |
X_test_deep.to_csv(self.path+"/X_test_deep.csv", index=False)
|
|
|
110 |
return len(condVocabDict),len(procVocabDict),len(medVocabDict),len(outVocabDict),len(chartVocabDict),len(labVocabDict),ethVocabDict,genderVocabDict,ageVocabDict,insVocabDict
|
111 |
|
112 |
|
113 |
+
def concat_data(data,task,feat_cond=False,feat_proc=False,feat_out=False,feat_chart=False,feat_meds=False):
|
114 |
meds=data['Med']
|
115 |
proc = data['Proc']
|
116 |
out = data['Out']
|
|
|
257 |
y_df=[]
|
258 |
|
259 |
for index,sample in tqdm(X_df.iterrows(),desc='Encoding Splits Data for '+task+' task'):
|
260 |
+
dyn,stat,demo=concat_data(sample,task,feat_cond,feat_chart,feat_proc, feat_meds, feat_out)
|
261 |
dyn_k=dyn.copy()
|
262 |
keys=dyn_k.columns.levels[0]
|
263 |
if index==0:
|
|
|
368 |
X_data['insurance']=ins_encoder.transform(X_data['insurance'])
|
369 |
return X_data
|
370 |
|
371 |
+
def generate_split(path,task,concat,feat_cond,feat_chart,feat_proc, feat_meds, feat_out):
|
372 |
with open(path, 'rb') as fp:
|
373 |
dico = pickle.load(fp)
|
374 |
df = pd.DataFrame.from_dict(dico, orient='index')
|
|
|
377 |
for _, data in tqdm(df.iterrows(),desc='Encoding Splits Data for '+task+' task'):
|
378 |
concat_cols=[]
|
379 |
sample=data
|
380 |
+
dyn_df,cond_df,demo=concat_data(sample,taskf,feat_cond,feat_chart,feat_proc, feat_meds, feat_out)
|
381 |
dyn=dyn_df.copy()
|
382 |
dyn.columns=dyn.columns.droplevel(0)
|
383 |
cols=dyn.columns
|
|
|
395 |
X_df = encoding(X_df)
|
396 |
return X_df
|
397 |
|
398 |
+
def generate_split_deep(path,task,feat_cond,feat_chart,feat_proc, feat_meds, feat_out):
|
399 |
+
with open(path, 'rb') as fp:
|
400 |
+
dico = pickle.load(fp)
|
401 |
+
df = pd.DataFrame.from_dict(dico, orient='index')
|
402 |
+
X_df=pd.DataFrame()
|
403 |
+
taskf=task.replace(" ","_")
|
404 |
+
meds,chart,out,proc,lab ,stat_df, demo_df, y_df = getXY_deep(df,taskf,feat_cond,feat_chart,feat_proc, feat_meds, feat_out)
|
405 |
+
X_df=pd.concat([X_df,meds],axis=1)
|
406 |
+
X_df=pd.concat([X_df,chart],axis=1)
|
407 |
+
X_df=pd.concat([X_df,out],axis=1)
|
408 |
+
X_df=pd.concat([X_df,proc],axis=1)
|
409 |
+
X_df=pd.concat([X_df,lab],axis=1)
|
410 |
+
X_df=pd.concat([X_df,stat_df],axis=1)
|
411 |
+
X_df=pd.concat([X_df,demo_df],axis=1)
|
412 |
+
X_df=pd.concat([X_df,y_df],axis=1)
|
413 |
+
X_df=X_df.fillna(0)
|
414 |
+
X_df = encoding(X_df)
|
415 |
+
return X_df
|
416 |
|
417 |
class Mimic4DatasetConfig(datasets.BuilderConfig):
|
418 |
"""BuilderConfig for Mimic4Dataset."""
|
|
|
828 |
X_val_encoded.to_csv(self.path+"/X_val_encoded.csv", index=False)
|
829 |
return self._info_encoded(X_train_encoded)
|
830 |
elif self.encoding == 'deep' :
|
831 |
+
X_train_deep = generate_split_deep(self.path+'/train_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
832 |
+
X_test_deep = generate_split_deep(self.path+'/test_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
833 |
+
X_val_deep = generate_split_deep(self.path+'/val_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
834 |
|
835 |
X_train_deep.to_csv(self.path+"/X_train_deep.csv", index=False)
|
836 |
X_test_deep.to_csv(self.path+"/X_test_deep.csv", index=False)
|