thbndi commited on
Commit
560aed3
·
1 Parent(s): 49fd409

Update Mimic4Dataset.py

Browse files
Files changed (1) hide show
  1. Mimic4Dataset.py +359 -72
Mimic4Dataset.py CHANGED
@@ -8,6 +8,11 @@ import pickle
8
  import subprocess
9
  import shutil
10
  from urllib.request import urlretrieve
 
 
 
 
 
11
 
12
 
13
  _DESCRIPTION = """\
@@ -29,6 +34,201 @@ _CONFIG_URLS = {'los' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/re
29
  'phenotype' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/phenotype.config',
30
  'readmission' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/readmission.config'
31
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  class Mimic4DatasetConfig(datasets.BuilderConfig):
33
  """BuilderConfig for Mimic4Dataset."""
34
 
@@ -43,9 +243,12 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
43
 
44
  def __init__(self, **kwargs):
45
  self.mimic_path = kwargs.pop("mimic_path", None)
46
-
47
-
48
  self.config_path = kwargs.pop("config_path",None)
 
 
 
 
49
  super().__init__(**kwargs)
50
 
51
 
@@ -94,72 +297,23 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
94
 
95
  DEFAULT_CONFIG_NAME = "Mortality"
96
 
97
- def _info(self):
98
-
99
- features = datasets.Features(
100
- {
101
- "label": datasets.ClassLabel(names=["0", "1"]),
102
- "gender": datasets.Value("string"),
103
- "ethnicity": datasets.Value("string"),
104
- "insurance": datasets.Value("string"),
105
- "age": datasets.Value("int32"),
106
- "COND": datasets.Sequence(datasets.Value("string")),
107
- "MEDS": {
108
- "signal":
109
- {
110
- "id": datasets.Sequence(datasets.Value("int32")),
111
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
112
- }
113
- ,
114
- "rate":
115
- {
116
- "id": datasets.Sequence(datasets.Value("int32")),
117
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
118
- }
119
- ,
120
- "amount":
121
- {
122
- "id": datasets.Sequence(datasets.Value("int32")),
123
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
124
- }
125
-
126
- },
127
- "PROC": {
128
- "id": datasets.Sequence(datasets.Value("int32")),
129
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
130
- },
131
- "CHART":
132
- {
133
- "signal" : {
134
- "id": datasets.Sequence(datasets.Value("int32")),
135
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
136
- },
137
- "val" : {
138
- "id": datasets.Sequence(datasets.Value("int32")),
139
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
140
- },
141
- },
142
- "OUT": {
143
- "id": datasets.Sequence(datasets.Value("int32")),
144
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
145
- },
146
-
147
- }
148
- )
149
- return datasets.DatasetInfo(
150
- description=_DESCRIPTION,
151
- features=features,
152
- homepage=_HOMEPAGE,
153
- citation=_CITATION,
154
- )
155
-
156
- def _split_generators(self, dl_manager: datasets.DownloadManager()):
157
  if self.config.name == 'Phenotype' : self.config_path = _CONFIG_URLS['phenotype']
158
  if self.config.name == 'Readmission' : self.config_path = _CONFIG_URLS['readmission']
159
  if self.config.name == 'Length of Stay' : self.config_path = _CONFIG_URLS['los']
160
  if self.config.name == 'Mortality' : self.config_path = _CONFIG_URLS['mortality']
161
  if self.config.name in ['Phenotype Custom','Readmission Custom','Length of Stay Custom','Mortality Custom'] and self.config.name==None:
162
- raise ValueError('Please select a config file')
163
 
164
  version = self.mimic_path.split('/')[-1]
165
  m = self.mimic_path.split('/')[-2]
@@ -226,18 +380,104 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
226
  config = self.config_path.split('/')[-1]
227
 
228
  script = 'python cohort.py '+ self.config.name.replace(" ","_") +" "+ self.mimic_path+ " "+path_bench+ " "+config
229
- print(script)
 
230
  if not os.path.exists(data_dir) :
231
- os.system(script)
232
- return [
233
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir}),
234
- ]
235
-
 
236
 
237
- def _generate_examples(self, filepath):
238
- with open(filepath, 'rb') as fp:
239
  dataDic = pickle.load(fp)
240
- for hid, data in dataDic.items():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
  proc_features = data['Proc']
242
  chart_features = data['Chart']
243
  meds_features = data['Med']
@@ -326,3 +566,50 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
326
  "MEDS" : meds
327
  }
328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  import subprocess
9
  import shutil
10
  from urllib.request import urlretrieve
11
+ from sklearn.model_selection import train_test_split
12
+ from sklearn.preprocessing import LabelEncoder
13
+ import numpy as np
14
+ import tqdm
15
+ import yaml
16
 
17
 
18
  _DESCRIPTION = """\
 
34
  'phenotype' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/phenotype.config',
35
  'readmission' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/readmission.config'
36
  }
37
+
38
+
39
+
40
+ def onehot(data,task,feat_cond=False,feat_proc=False,feat_out=False,feat_chart=False,feat_meds=False):
41
+ meds=data['MEDS']
42
+ proc = data['PROC']
43
+ out = data['OUT']
44
+ chart = data['CHART']
45
+ cond= data['COND']
46
+
47
+ cond_df=pd.DataFrame()
48
+ proc_df=pd.DataFrame()
49
+ out_df=pd.DataFrame()
50
+ chart_df=pd.DataFrame()
51
+ meds_df=pd.DataFrame()
52
+
53
+ #demographic
54
+ demo=pd.DataFrame(columns=['Age','gender','ethnicity','label','insurance'])
55
+ new_row = {'Age': data['age'], 'gender': data['gender'], 'ethnicity': data['ethnicity'], 'label': data['label'], 'insurance': data['insurance']}
56
+ demo = demo.append(new_row, ignore_index=True)
57
+
58
+ ##########COND#########
59
+ if (feat_cond):
60
+ #get all conds
61
+ with open("./data/dict/"+task+"/condVocab", 'rb') as fp:
62
+ conDict = pickle.load(fp)
63
+ conds=pd.DataFrame(conDict,columns=['COND'])
64
+ features=pd.DataFrame(np.zeros([1,len(conds)]),columns=conds['COND'])
65
+
66
+ #onehot encode
67
+ if(cond ==[]):
68
+ cond_df=pd.DataFrame(np.zeros([1,len(features)]),columns=features['COND'])
69
+ cond_df=cond_df.fillna(0)
70
+ else:
71
+ cond_df=pd.DataFrame(cond,columns=['COND'])
72
+ cond_df['val']=1
73
+ cond_df=(cond_df.drop_duplicates()).pivot(columns='COND',values='val').reset_index(drop=True)
74
+ cond_df=cond_df.fillna(0)
75
+ oneh = cond_df.sum().to_frame().T
76
+ combined_df = pd.concat([features,oneh],ignore_index=True).fillna(0)
77
+ combined_oneh=combined_df.sum().to_frame().T
78
+ cond_df=combined_oneh
79
+
80
+ ##########PROC#########
81
+ if (feat_proc):
82
+ with open("./data/dict/"+task+"/procVocab", 'rb') as fp:
83
+ procDic = pickle.load(fp)
84
+
85
+ feat=proc['id']
86
+ proc_val=proc['value']
87
+ procedures=pd.DataFrame(procDic,columns=['PROC'])
88
+ features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC'])
89
+ features.columns=pd.MultiIndex.from_product([["PROC"], features.columns])
90
+ if feat==[]:
91
+ proc_df=features.fillna(0)
92
+ else:
93
+ procs=pd.DataFrame(columns=feat)
94
+ for p,v in zip(feat,proc_val):
95
+ procs[p]=v
96
+ procs.columns=pd.MultiIndex.from_product([["PROC"], procs.columns])
97
+ proc_df = pd.concat([features,procs],ignore_index=True).fillna(0)
98
+
99
+ ##########OUT#########
100
+ if (feat_out):
101
+ with open("./data/dict/"+task+"/outVocab", 'rb') as fp:
102
+ outDic = pickle.load(fp)
103
+
104
+ feat=out['id']
105
+ out_val=out['value']
106
+ outputs=pd.DataFrame(outDic,columns=['OUT'])
107
+ features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT'])
108
+ features.columns=pd.MultiIndex.from_product([["OUT"], features.columns])
109
+ if feat==[]:
110
+ out_df=features.fillna(0)
111
+ else:
112
+ outs=pd.DataFrame(columns=feat)
113
+ for o,v in zip(feat,out_val):
114
+ outs[o]=v
115
+ outs.columns=pd.MultiIndex.from_product([["OUT"], outs.columns])
116
+ out_df = pd.concat([features,outs],ignore_index=True).fillna(0)
117
+
118
+ ##########CHART#########
119
+ if (feat_chart):
120
+ with open("./data/dict/"+task+"/chartVocab", 'rb') as fp:
121
+ chartDic = pickle.load(fp)
122
+
123
+ charts=chart['val']
124
+ feat=charts['id']
125
+ chart_val=charts['value']
126
+ charts=pd.DataFrame(chartDic,columns=['CHART'])
127
+ features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART'])
128
+ features.columns=pd.MultiIndex.from_product([["CHART"], features.columns])
129
+ if feat==[]:
130
+ chart_df=features.fillna(0)
131
+ else:
132
+ chart=pd.DataFrame(columns=feat)
133
+ for c,v in zip(feat,chart_val):
134
+ chart[c]=v
135
+ chart.columns=pd.MultiIndex.from_product([["CHART"], chart.columns])
136
+ chart_df = pd.concat([features,chart],ignore_index=True).fillna(0)
137
+
138
+ ###MEDS
139
+ if (feat_meds):
140
+ with open("./data/dict/"+task+"/medVocab", 'rb') as fp:
141
+ medDic = pickle.load(fp)
142
+
143
+ feat=meds['signal']['id']
144
+ med_val=meds['amount']['value']
145
+ meds=pd.DataFrame(medDic,columns=['MEDS'])
146
+ features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS'])
147
+ features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns])
148
+ if feat==[]:
149
+ meds_df=features.fillna(0)
150
+ else:
151
+ med=pd.DataFrame(columns=feat)
152
+ for m,v in zip(feat,med_val):
153
+ med[m]=v
154
+ med.columns=pd.MultiIndex.from_product([["MEDS"], med.columns])
155
+ meds_df = pd.concat([features,med],ignore_index=True).fillna(0)
156
+
157
+ dyn_df = pd.concat([meds_df,proc_df,out_df,chart_df], axis=1)
158
+ return dyn_df,cond_df,demo
159
+
160
+ def getXY(dyn,stat,demo,concat_cols,concat):
161
+ X_df=pd.DataFrame()
162
+ if concat:
163
+ dyna=dyn.copy()
164
+ dyna.columns=dyna.columns.droplevel(0)
165
+ dyna=dyna.to_numpy()
166
+ dyna=dyna.reshape(1,-1)
167
+ dyn_df=pd.DataFrame(data=dyna,columns=concat_cols)
168
+ else:
169
+ dyn_df=pd.DataFrame()
170
+ for key in dyn.columns.levels[0]:
171
+ dyn_temp=dyn[key]
172
+ if ((key=="CHART") or (key=="MEDS")):
173
+ agg=dyn_temp.aggregate("mean")
174
+ agg=agg.reset_index()
175
+ else:
176
+ agg=dyn_temp.aggregate("max")
177
+ agg=agg.reset_index()
178
+
179
+ if dyn_df.empty:
180
+ dyn_df=agg
181
+ else:
182
+ dyn_df=pd.concat([dyn_df,agg],axis=0)
183
+ dyn_df=dyn_df.T
184
+ dyn_df.columns = dyn_df.iloc[0]
185
+ dyn_df=dyn_df.iloc[1:,:]
186
+
187
+ X_df=pd.concat([dyn_df,stat],axis=1)
188
+ X_df=pd.concat([X_df,demo],axis=1)
189
+ return X_df
190
+
191
+ def encoding(X_data):
192
+ gen_encoder = LabelEncoder()
193
+ eth_encoder = LabelEncoder()
194
+ ins_encoder = LabelEncoder()
195
+ gen_encoder.fit(X_data['gender'])
196
+ eth_encoder.fit(X_data['ethnicity'])
197
+ ins_encoder.fit(X_data['insurance'])
198
+ X_data['gender']=gen_encoder.transform(X_data['gender'])
199
+ X_data['ethnicity']=eth_encoder.transform(X_data['ethnicity'])
200
+ X_data['insurance']=ins_encoder.transform(X_data['insurance'])
201
+ return X_data
202
+
203
+ def generate_split(df,task,concat,feat_cond=True,feat_chart=True,feat_proc=True, feat_meds=True, feat_out=False):
204
+ task=task.replace(" ","_")
205
+ X_df=pd.DataFrame()
206
+ #y_df=pd.DataFrame(df['label'],columns=['label'])
207
+
208
+ for hid, data in tqdm(df.iterrows()):
209
+ concat_cols=[]
210
+ sample=data
211
+ dyn_df,cond_df,demo=onehot(sample,task,feat_cond,feat_chart,feat_proc, feat_meds, feat_out)
212
+ dyn=dyn_df.copy()
213
+ dyn.columns=dyn.columns.droplevel(0)
214
+ cols=dyn.columns
215
+ time=dyn.shape[0]
216
+ for t in range(time):
217
+ cols_t = [str(x) + "_"+str(t) for x in cols]
218
+ concat_cols.extend(cols_t)
219
+
220
+ X= getXY(dyn_df,cond_df,demo,concat_cols,concat)
221
+ if X_df.empty:
222
+ X_df=pd.concat([X_df,X],axis=1)
223
+ else:
224
+ X_df = pd.concat([X_df, X], axis=0)
225
+
226
+ X_df=X_df.fillna(0)
227
+ X_df = encoding(X_df)
228
+ #X_df=X_df.drop(['label'], axis=1)
229
+ return X_df
230
+
231
+
232
  class Mimic4DatasetConfig(datasets.BuilderConfig):
233
  """BuilderConfig for Mimic4Dataset."""
234
 
 
243
 
244
  def __init__(self, **kwargs):
245
  self.mimic_path = kwargs.pop("mimic_path", None)
246
+ self.encoding = kwargs.pop("encoding",True)
 
247
  self.config_path = kwargs.pop("config_path",None)
248
+ self.test_size = kwargs.pop("test_size",0.2)
249
+ self.val_size = kwargs.pop("val_size",0.1)
250
+
251
+ self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out = self.create_cohort()
252
  super().__init__(**kwargs)
253
 
254
 
 
297
 
298
  DEFAULT_CONFIG_NAME = "Mortality"
299
 
300
+ def map_dtype(self,dtype):
301
+ if pd.api.types.is_integer_dtype(dtype):
302
+ return datasets.Value('int64')
303
+ elif pd.api.types.is_float_dtype(dtype):
304
+ return datasets.Value('float64')
305
+ elif pd.api.types.is_string_dtype(dtype):
306
+ return datasets.Value('string')
307
+ else:
308
+ raise ValueError(f"Unsupported dtype: {dtype}")
309
+
310
+ def create_cohort(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
  if self.config.name == 'Phenotype' : self.config_path = _CONFIG_URLS['phenotype']
312
  if self.config.name == 'Readmission' : self.config_path = _CONFIG_URLS['readmission']
313
  if self.config.name == 'Length of Stay' : self.config_path = _CONFIG_URLS['los']
314
  if self.config.name == 'Mortality' : self.config_path = _CONFIG_URLS['mortality']
315
  if self.config.name in ['Phenotype Custom','Readmission Custom','Length of Stay Custom','Mortality Custom'] and self.config.name==None:
316
+ raise ValueError('Please provide a config file')
317
 
318
  version = self.mimic_path.split('/')[-1]
319
  m = self.mimic_path.split('/')[-2]
 
380
  config = self.config_path.split('/')[-1]
381
 
382
  script = 'python cohort.py '+ self.config.name.replace(" ","_") +" "+ self.mimic_path+ " "+path_bench+ " "+config
383
+
384
+ #####################################CHANGE##########
385
  if not os.path.exists(data_dir) :
386
+ os.system(script)
387
+ #####################################CHANGE##########
388
+ config_path='./config/'+config
389
+ with open(config_path) as f:
390
+ config = yaml.safe_load(f)
391
+ feat_cond, feat_chart, feat_proc, feat_meds, feat_out = config['diagnosis'], config['chart'], config['proc'], config['meds'], config['output']
392
 
393
+ with open(data_dir, 'rb') as fp:
 
394
  dataDic = pickle.load(fp)
395
+ data = pd.DataFrame.from_dict(dataDic)
396
+
397
+ data=data.T
398
+ train_data, test_data = train_test_split(data, test_size=self.test_size, random_state=42)
399
+ train_data, val_data = train_test_split(test_data, test_size=self.val_size, random_state=42)
400
+ csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
401
+
402
+ train_data.to_csv(csv_dir+'/train_data.csv',index=False)
403
+ val_data.to_csv(csv_dir+'/val_data.csv',index=False)
404
+ test_data.to_csv(csv_dir+'/test_data.csv',index=False)
405
+ return feat_cond, feat_chart, feat_proc, feat_meds, feat_out
406
+
407
+ ###########################################################RAW##################################################################
408
+
409
+ def _info_raw(self):
410
+ features = datasets.Features(
411
+ {
412
+ "label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
413
+ "gender": datasets.Value("string"),
414
+ "ethnicity": datasets.Value("string"),
415
+ "insurance": datasets.Value("string"),
416
+ "age": datasets.Value("int32"),
417
+ "COND": datasets.Sequence(datasets.Value("string")),
418
+ "MEDS": {
419
+ "signal":
420
+ {
421
+ "id": datasets.Sequence(datasets.Value("int32")),
422
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
423
+ }
424
+ ,
425
+ "rate":
426
+ {
427
+ "id": datasets.Sequence(datasets.Value("int32")),
428
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
429
+ }
430
+ ,
431
+ "amount":
432
+ {
433
+ "id": datasets.Sequence(datasets.Value("int32")),
434
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
435
+ }
436
+
437
+ },
438
+ "PROC": {
439
+ "id": datasets.Sequence(datasets.Value("int32")),
440
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
441
+ },
442
+ "CHART":
443
+ {
444
+ "signal" : {
445
+ "id": datasets.Sequence(datasets.Value("int32")),
446
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
447
+ },
448
+ "val" : {
449
+ "id": datasets.Sequence(datasets.Value("int32")),
450
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
451
+ },
452
+ },
453
+ "OUT": {
454
+ "id": datasets.Sequence(datasets.Value("int32")),
455
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
456
+ },
457
+
458
+ }
459
+ )
460
+ return datasets.DatasetInfo(
461
+ description=_DESCRIPTION,
462
+ features=features,
463
+ homepage=_HOMEPAGE,
464
+ citation=_CITATION,
465
+ )
466
+
467
+ def _split_generators_raw(self):
468
+
469
+ csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
470
+
471
+ split_generators = {
472
+ "train": datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data.csv'}),
473
+ "val": datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": csv_dir+'/val_data.csv'}),
474
+ "test": datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data.csv'}),
475
+ }
476
+ return split_generators
477
+
478
+ def _generate_examples_raw(self, filepath):
479
+ df = pd.read_csv(filepath, header=0)
480
+ for hid, data in df.iterrows():
481
  proc_features = data['Proc']
482
  chart_features = data['Chart']
483
  meds_features = data['Med']
 
566
  "MEDS" : meds
567
  }
568
 
569
+
570
+ ###########################################################ENCODED##################################################################
571
+
572
+ def _info_encoded(self):
573
+ X_df = pd.read_csv("./data/dict/"+self.config.name.replace(" ","_")+'/train_data_encoded.csv', header=0)
574
+ columns = {col: self.map_dtype(X_df[col].dtype) for col in X_df.columns}
575
+ features = datasets.Features(columns)
576
+ return datasets.DatasetInfo(
577
+ description=_DESCRIPTION,
578
+ features=features,
579
+ homepage=_HOMEPAGE,
580
+ citation=_CITATION,
581
+ )
582
+
583
+ def _split_generators_encoded(self):
584
+ csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
585
+
586
+ split_generators = {
587
+ "train": datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data_encoded.csv'}),
588
+ "val": datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": csv_dir+'/val_data_encoded.csv'}),
589
+ "test": datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data_encoded.csv'}),
590
+ }
591
+ return split_generators
592
+
593
+ def _generate_examples_encoded(self, filepath):
594
+ df = pd.read_csv(filepath, header=0)
595
+ X_df=generate_split(df,self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
596
+
597
+ #############################################################################################################################
598
+ def _info(self):
599
+ if self.encoding :
600
+ return self._info_encoded()
601
+ else:
602
+ return self._info_raw()
603
+
604
+ def _split_generators(self, dl_manager):
605
+ if self.encoding :
606
+ return self._split_generators_encoded()
607
+ else:
608
+ return self._split_generators_raw()
609
+
610
+ def _generate_examples(self, filepath):
611
+ if not self.encoding :
612
+ yield from self._generate_examples_raw(filepath)
613
+ else:
614
+ yield from self._generate_examples_encoded(filepath)
615
+