Update Mimic4Dataset.py
Browse files- Mimic4Dataset.py +2 -2
Mimic4Dataset.py
CHANGED
@@ -136,7 +136,7 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
136 |
if self.config.name == 'Readmission' and self.config_path is None : self.config_path = _CONFIG_URLS['readmission']
|
137 |
if self.config.name == 'Length of Stay' and self.config_path is None : self.config_path = _CONFIG_URLS['los']
|
138 |
if self.config.name == 'Mortality' and self.config_path is None : self.config_path = _CONFIG_URLS['mortality']
|
139 |
-
|
140 |
repo_url='https://github.com/healthylaife/MIMIC-IV-Data-Pipeline'
|
141 |
if os.path.exists(os.path.dirname(os.path.abspath('MIMIC-IV-Data-Pipeline-main'))):
|
142 |
path_bench = os.path.dirname(os.path.abspath('MIMIC-IV-Data-Pipeline-main'))+'/MIMIC-IV-Data-Pipeline-main'
|
@@ -161,7 +161,7 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
161 |
shutil.move(file_path, path_bench+'/model')
|
162 |
|
163 |
data_dir = path_bench + "/data/dataDic"
|
164 |
-
task_cohort(self.name,self.mimic_path, path_bench, self.config_path)
|
165 |
return [
|
166 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir}),
|
167 |
]
|
|
|
136 |
if self.config.name == 'Readmission' and self.config_path is None : self.config_path = _CONFIG_URLS['readmission']
|
137 |
if self.config.name == 'Length of Stay' and self.config_path is None : self.config_path = _CONFIG_URLS['los']
|
138 |
if self.config.name == 'Mortality' and self.config_path is None : self.config_path = _CONFIG_URLS['mortality']
|
139 |
+
|
140 |
repo_url='https://github.com/healthylaife/MIMIC-IV-Data-Pipeline'
|
141 |
if os.path.exists(os.path.dirname(os.path.abspath('MIMIC-IV-Data-Pipeline-main'))):
|
142 |
path_bench = os.path.dirname(os.path.abspath('MIMIC-IV-Data-Pipeline-main'))+'/MIMIC-IV-Data-Pipeline-main'
|
|
|
161 |
shutil.move(file_path, path_bench+'/model')
|
162 |
|
163 |
data_dir = path_bench + "/data/dataDic"
|
164 |
+
task_cohort(self.config.name,self.mimic_path, path_bench, self.config_path)
|
165 |
return [
|
166 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir}),
|
167 |
]
|