thbndi commited on
Commit
ce38201
·
1 Parent(s): c819f7e

Update Mimic4Dataset.py

Browse files
Files changed (1) hide show
  1. Mimic4Dataset.py +29 -21
Mimic4Dataset.py CHANGED
@@ -200,11 +200,11 @@ def encoding(X_data):
200
  X_data['insurance']=ins_encoder.transform(X_data['insurance'])
201
  return X_data
202
 
203
- def generate_split(df,task,concat,feat_cond=True,feat_chart=True,feat_proc=True, feat_meds=True, feat_out=False):
 
204
  task=task.replace(" ","_")
205
  X_df=pd.DataFrame()
206
  #y_df=pd.DataFrame(df['label'],columns=['label'])
207
-
208
  for hid, data in tqdm(df.iterrows()):
209
  concat_cols=[]
210
  sample=data
@@ -222,7 +222,6 @@ def generate_split(df,task,concat,feat_cond=True,feat_chart=True,feat_proc=True,
222
  X_df=pd.concat([X_df,X],axis=1)
223
  else:
224
  X_df = pd.concat([X_df, X], axis=0)
225
-
226
  X_df=X_df.fillna(0)
227
  X_df = encoding(X_df)
228
  #X_df=X_df.drop(['label'], axis=1)
@@ -250,8 +249,15 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
250
 
251
 
252
  super().__init__(**kwargs)
253
- self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out = self.create_cohort()
254
-
 
 
 
 
 
 
 
255
 
256
  BUILDER_CONFIGS = [
257
  Mimic4DatasetConfig(
@@ -399,25 +405,24 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
399
  train_data, test_data = train_test_split(data, test_size=self.test_size, random_state=42)
400
  train_data, val_data = train_test_split(test_data, test_size=self.val_size, random_state=42)
401
 
402
- csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
403
-
404
- train_data.to_csv(csv_dir+'/train_data.csv',index=False)
405
- val_data.to_csv(csv_dir+'/val_data.csv',index=False)
406
- test_data.to_csv(csv_dir+'/test_data.csv',index=False)
407
-
408
  train_dic = train_data.to_dict('index')
409
  test_dic = test_data.to_dict('index')
410
  val_dic = val_data.to_dict('index')
411
 
412
- with open(csv_dir+'/train_data.pkl', 'wb') as f:
 
 
 
 
413
  pickle.dump(train_dic, f)
414
- with open(csv_dir+'/val_data.pkl', 'wb') as f:
415
  pickle.dump(val_dic, f)
416
- with open(csv_dir+'/test_data.pkl', 'wb') as f:
417
  pickle.dump(test_dic, f)
418
 
419
 
420
- return feat_cond, feat_chart, feat_proc, feat_meds, feat_out
421
 
422
  ###########################################################RAW##################################################################
423
 
@@ -585,7 +590,8 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
585
  ###########################################################ENCODED##################################################################
586
 
587
  def _info_encoded(self):
588
- X_df = pd.read_csv("./data/dict/"+self.config.name.replace(" ","_")+'/train_data_encoded.csv', header=0)
 
589
  columns = {col: self.map_dtype(X_df[col].dtype) for col in X_df.columns}
590
  features = datasets.Features(columns)
591
  return datasets.DatasetInfo(
@@ -596,17 +602,19 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
596
  )
597
 
598
  def __split_generators_encoded(self):
599
- csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
600
 
601
  return [
602
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data_encoded.csv'}),
603
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": csv_dir+'/val_data_encoded.csv'}),
604
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data_encoded.csv'}),
605
  ]
606
 
607
  def _generate_examples_encoded(self, filepath):
608
  df = pd.read_csv(filepath, header=0)
609
- X_df=generate_split(df,self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
 
 
610
 
611
  #############################################################################################################################
612
  def _info(self):
 
200
  X_data['insurance']=ins_encoder.transform(X_data['insurance'])
201
  return X_data
202
 
203
+ def generate_split(path,task,concat,feat_cond=True,feat_chart=True,feat_proc=True, feat_meds=True, feat_out=False):
204
+ df = pd.DataFrame.from_dict(path)
205
  task=task.replace(" ","_")
206
  X_df=pd.DataFrame()
207
  #y_df=pd.DataFrame(df['label'],columns=['label'])
 
208
  for hid, data in tqdm(df.iterrows()):
209
  concat_cols=[]
210
  sample=data
 
222
  X_df=pd.concat([X_df,X],axis=1)
223
  else:
224
  X_df = pd.concat([X_df, X], axis=0)
 
225
  X_df=X_df.fillna(0)
226
  X_df = encoding(X_df)
227
  #X_df=X_df.drop(['label'], axis=1)
 
249
 
250
 
251
  super().__init__(**kwargs)
252
+ self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out,path = self.create_cohort()
253
+
254
+ if self.encoding:
255
+ X_train_encoded=generate_split(path+'/train_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
256
+ X_test_encoded=generate_split(path+'/test_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
257
+ X_val_encoded=generate_split(path+'/val_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
258
+ X_train_encoded.to_csv(path+"/X_train_encoded.csv", index=False)
259
+ X_test_encoded.to_csv(path+"/X_test_encoded.csv", index=False)
260
+ X_val_encoded.to_csv(path+"/X_val_encoded.csv", index=False)
261
 
262
  BUILDER_CONFIGS = [
263
  Mimic4DatasetConfig(
 
405
  train_data, test_data = train_test_split(data, test_size=self.test_size, random_state=42)
406
  train_data, val_data = train_test_split(test_data, test_size=self.val_size, random_state=42)
407
 
408
+ dict_dir = "./data/dict/"+self.config.name.replace(" ","_")
 
 
 
 
 
409
  train_dic = train_data.to_dict('index')
410
  test_dic = test_data.to_dict('index')
411
  val_dic = val_data.to_dict('index')
412
 
413
+ train_path = dict_dir+'/train_data.pkl'
414
+ test_path = dict_dir+'/test_data.pkl'
415
+ val_path = dict_dir+'/val_data.pkl'
416
+
417
+ with open(train_path, 'wb') as f:
418
  pickle.dump(train_dic, f)
419
+ with open(val_path, 'wb') as f:
420
  pickle.dump(val_dic, f)
421
+ with open(test_path, 'wb') as f:
422
  pickle.dump(test_dic, f)
423
 
424
 
425
+ return feat_cond, feat_chart, feat_proc, feat_meds, feat_out, dict_dir
426
 
427
  ###########################################################RAW##################################################################
428
 
 
590
  ###########################################################ENCODED##################################################################
591
 
592
  def _info_encoded(self):
593
+ #X_df = pd.read_csv("./data/dict/"+self.config.name.replace(" ","_")+'/train_data_encoded.csv', header=0)
594
+ X_df = pd.read_csv("./data/dict/"+self.config.name.replace(" ","_")+'/X_train_encoded.csv', header=0)
595
  columns = {col: self.map_dtype(X_df[col].dtype) for col in X_df.columns}
596
  features = datasets.Features(columns)
597
  return datasets.DatasetInfo(
 
602
  )
603
 
604
  def __split_generators_encoded(self):
605
+ data_dir = "./data/dict/"+self.config.name.replace(" ","_")
606
 
607
  return [
608
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/X_train_encoded.csv'}),
609
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir+'/X_val_encoded.csv'}),
610
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/X_test_encoded.csv'}),
611
  ]
612
 
613
  def _generate_examples_encoded(self, filepath):
614
  df = pd.read_csv(filepath, header=0)
615
+ for i, row in df.iterrows():
616
+ yield i, row.to_dict('index')
617
+
618
 
619
  #############################################################################################################################
620
  def _info(self):