Update Mimic4Dataset.py
Browse files- Mimic4Dataset.py +9 -26
Mimic4Dataset.py
CHANGED
@@ -793,16 +793,6 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
793 |
citation=_CITATION,
|
794 |
)
|
795 |
|
796 |
-
def __split_generators_raw(self):
|
797 |
-
|
798 |
-
csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
|
799 |
-
|
800 |
-
return [
|
801 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data.pkl'}),
|
802 |
-
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": csv_dir+'/val_data.pkl'}),
|
803 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data.pkl'}),
|
804 |
-
]
|
805 |
-
|
806 |
def _generate_examples_raw(self, filepath):
|
807 |
with open(filepath, 'rb') as fp:
|
808 |
dataDic = pickle.load(fp)
|
@@ -950,14 +940,6 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
950 |
citation=_CITATION,
|
951 |
)
|
952 |
|
953 |
-
def __split_generators_deep(self):
|
954 |
-
data_dir = "./data/dict/"+self.config.name.replace(" ","_")
|
955 |
-
|
956 |
-
return [
|
957 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/X_train_deep.pkl'}),
|
958 |
-
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir+'/X_val_deep.pkl'}),
|
959 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/X_test_deep.pkl'}),
|
960 |
-
]
|
961 |
|
962 |
def _generate_examples_deep(self, filepath):
|
963 |
with open(filepath, 'rb') as fp:
|
@@ -965,7 +947,7 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
965 |
task=self.config.name.replace(" ","_")
|
966 |
if 'Custom' in task:
|
967 |
task = task.rsplit('_', 1)[0]
|
968 |
-
for key, data in
|
969 |
stat, demo, meds, chart, out, proc, lab, y = getXY_deep(data, task, self.feat_cond, self.feat_proc, self.feat_out, self.feat_chart, self.feat_meds)
|
970 |
yield int(key), {
|
971 |
'label': y,
|
@@ -991,14 +973,15 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
991 |
else:
|
992 |
return self._info_raw()
|
993 |
|
994 |
-
def _split_generators(self, dl_manager):
|
995 |
-
if self.encoding == 'onehot' :
|
996 |
-
return self.__split_generators_encoded()
|
997 |
|
998 |
-
|
999 |
-
|
1000 |
-
|
1001 |
-
|
|
|
|
|
|
|
|
|
1002 |
|
1003 |
def _generate_examples(self, filepath):
|
1004 |
|
|
|
793 |
citation=_CITATION,
|
794 |
)
|
795 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
796 |
def _generate_examples_raw(self, filepath):
|
797 |
with open(filepath, 'rb') as fp:
|
798 |
dataDic = pickle.load(fp)
|
|
|
940 |
citation=_CITATION,
|
941 |
)
|
942 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
943 |
|
944 |
def _generate_examples_deep(self, filepath):
|
945 |
with open(filepath, 'rb') as fp:
|
|
|
947 |
task=self.config.name.replace(" ","_")
|
948 |
if 'Custom' in task:
|
949 |
task = task.rsplit('_', 1)[0]
|
950 |
+
for key, data in dico.items():
|
951 |
stat, demo, meds, chart, out, proc, lab, y = getXY_deep(data, task, self.feat_cond, self.feat_proc, self.feat_out, self.feat_chart, self.feat_meds)
|
952 |
yield int(key), {
|
953 |
'label': y,
|
|
|
973 |
else:
|
974 |
return self._info_raw()
|
975 |
|
|
|
|
|
|
|
976 |
|
977 |
+
def _split_generators(self,):
|
978 |
+
csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
|
979 |
+
|
980 |
+
return [
|
981 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data.pkl'}),
|
982 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": csv_dir+'/val_data.pkl'}),
|
983 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data.pkl'}),
|
984 |
+
]
|
985 |
|
986 |
def _generate_examples(self, filepath):
|
987 |
|