Update Mimic4Dataset.py
Browse files- Mimic4Dataset.py +51 -23
Mimic4Dataset.py
CHANGED
@@ -887,37 +887,65 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
887 |
|
888 |
|
889 |
###########################################################ENCODED##################################################################
|
890 |
-
|
891 |
-
def _info_encoded(self):
|
892 |
-
X_train_encoded=generate_split(self.path+'/train_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
893 |
-
X_test_encoded=generate_split(self.path+'/test_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
894 |
-
X_val_encoded=generate_split(self.path+'/val_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
895 |
|
896 |
-
|
897 |
-
|
898 |
-
|
899 |
-
|
900 |
-
|
|
|
|
|
901 |
return datasets.DatasetInfo(
|
902 |
description=_DESCRIPTION,
|
903 |
features=features,
|
904 |
homepage=_HOMEPAGE,
|
905 |
citation=_CITATION,
|
906 |
)
|
907 |
-
|
908 |
-
def __split_generators_encoded(self):
|
909 |
-
data_dir = "./data/dict/"+self.config.name.replace(" ","_")
|
910 |
-
|
911 |
-
return [
|
912 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/X_train_encoded.csv'}),
|
913 |
-
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir+'/X_val_encoded.csv'}),
|
914 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/X_test_encoded.csv'}),
|
915 |
-
]
|
916 |
|
917 |
def _generate_examples_encoded(self, filepath):
|
918 |
-
|
919 |
-
|
920 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
921 |
######################################################DEEP###############################################################
|
922 |
def _info_deep(self):
|
923 |
features = datasets.Features(
|
@@ -974,7 +1002,7 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
974 |
return self._info_raw()
|
975 |
|
976 |
|
977 |
-
def _split_generators(self):
|
978 |
csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
|
979 |
|
980 |
return [
|
|
|
887 |
|
888 |
|
889 |
###########################################################ENCODED##################################################################
|
|
|
|
|
|
|
|
|
|
|
890 |
|
891 |
+
def _info_encoded(self):
|
892 |
+
features = datasets.Features(
|
893 |
+
{
|
894 |
+
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
|
895 |
+
"onehot" : datasets.Array2D(shape=(None,None), dtype="float32"),
|
896 |
+
}
|
897 |
+
)
|
898 |
return datasets.DatasetInfo(
|
899 |
description=_DESCRIPTION,
|
900 |
features=features,
|
901 |
homepage=_HOMEPAGE,
|
902 |
citation=_CITATION,
|
903 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
904 |
|
905 |
def _generate_examples_encoded(self, filepath):
|
906 |
+
path= './data/dict/'+self.config.name.replace(" ","_")+'/ethVocab'
|
907 |
+
with open(path, 'rb') as fp:
|
908 |
+
ethVocab = pickle.load(fp)
|
909 |
+
|
910 |
+
path= './data/dict/'+self.config.name.replace(" ","_")+'/insVocab'
|
911 |
+
with open(path, 'rb') as fp:
|
912 |
+
insVocab = pickle.load(fp)
|
913 |
+
|
914 |
+
genVocab = ['<PAD>', 'M', 'F']
|
915 |
+
gen_encoder = LabelEncoder()
|
916 |
+
eth_encoder = LabelEncoder()
|
917 |
+
ins_encoder = LabelEncoder()
|
918 |
+
gen_encoder.fit(genVocab)
|
919 |
+
eth_encoder.fit(ethVocab)
|
920 |
+
ins_encoder.fit(insVocab)
|
921 |
+
with open(filepath, 'rb') as fp:
|
922 |
+
dico = pickle.load(fp)
|
923 |
+
|
924 |
+
df = pd.DataFrame.from_dict(dico, orient='index')
|
925 |
+
task=self.config.name.replace(" ","_")
|
926 |
+
if 'Custom' in task:
|
927 |
+
task = task.rsplit('_', 1)[0]
|
928 |
+
|
929 |
+
for i, data in df.iterrows():
|
930 |
+
concat_cols=[]
|
931 |
+
dyn_df,cond_df,demo=concat_data(data,task,self.feat_cond,self.feat_chart,self.feat_proc, self.feat_meds, self.feat_out)
|
932 |
+
dyn=dyn_df.copy()
|
933 |
+
dyn.columns=dyn.columns.droplevel(0)
|
934 |
+
cols=dyn.columns
|
935 |
+
time=dyn.shape[0]
|
936 |
+
for t in range(time):
|
937 |
+
cols_t = [str(x) + "_"+str(t) for x in cols]
|
938 |
+
concat_cols.extend(cols_t)
|
939 |
+
demo['gender']=gen_encoder.transform(demo['gender'])
|
940 |
+
demo['ethnicity']=eth_encoder.transform(demo['ethnicity'])
|
941 |
+
demo['insurance']=ins_encoder.transform(demo['insurance'])
|
942 |
+
label = data['label']
|
943 |
+
demo=demo.drop(['label'],axis=1)
|
944 |
+
X= getXY(dyn_df,cond_df,demo,concat_cols,True)
|
945 |
+
yield int(i), {
|
946 |
+
"label": label,
|
947 |
+
"onehot": X,
|
948 |
+
}
|
949 |
######################################################DEEP###############################################################
|
950 |
def _info_deep(self):
|
951 |
features = datasets.Features(
|
|
|
1002 |
return self._info_raw()
|
1003 |
|
1004 |
|
1005 |
+
def _split_generators(self, dl_manager):
|
1006 |
csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
|
1007 |
|
1008 |
return [
|