| import datetime |
| import os |
| import sys |
| import numpy as np |
| import pandas as pd |
| from pathlib import Path |
| from tqdm import tqdm |
| import importlib |
| import disease_cohort |
| importlib.reload(disease_cohort) |
| import disease_cohort |
| sys.path.append(os.path.dirname(os.path.abspath(__file__)) + './../..') |
| if not os.path.exists("./data/cohort"): |
| os.makedirs("./data/cohort") |
| if not os.path.exists("./data/summary"): |
| os.makedirs("./data/summary") |
| |
| def get_visit_pts(mimic4_path:str, group_col:str, visit_col:str, admit_col:str, disch_col:str, adm_visit_col:str, use_mort:bool, use_los:bool, los:int, use_admn:bool, disease_label:str,use_ICU:bool): |
| """Combines the MIMIC-IV core/patients table information with either the icu/icustays or core/admissions data. |
| |
| Parameters: |
| mimic4_path: path to mimic-iv folder containing MIMIC-IV data |
| group_col: patient identifier to group patients (normally subject_id) |
| visit_col: visit identifier for individual patient visits (normally hadm_id or stay_id) |
| admit_col: column for visit start date information (normally admittime or intime) |
| disch_col: column for visit end date information (normally dischtime or outtime) |
| use_ICU: describes whether to speficially look at ICU visits in icu/icustays OR look at general admissions from core/admissions |
| """ |
|
|
| visit = None |
| if use_ICU: |
| visit = pd.read_csv(mimic4_path + "icu/icustays.csv.gz", compression='gzip', header=0, index_col=None, parse_dates=[admit_col, disch_col]) |
| if use_admn: |
| |
| |
| pts = pd.read_csv(mimic4_path + "hosp/patients.csv.gz", compression='gzip', header=0, index_col=None, usecols=['subject_id', 'dod'], parse_dates=['dod']) |
| visit = visit.merge(pts, how='inner', left_on='subject_id', right_on='subject_id') |
| visit = visit.loc[(visit.dod.isna()) | (visit.dod >= visit[disch_col])] |
| if len(disease_label): |
| hids=disease_cohort.extract_diag_cohort(visit['hadm_id'],disease_label,mimic4_path) |
| visit=visit[visit['hadm_id'].isin(hids['hadm_id'])] |
| print("[ READMISSION DUE TO "+disease_label+" ]") |
| |
| else: |
| visit = pd.read_csv(mimic4_path + "hosp/admissions.csv.gz", compression='gzip', header=0, index_col=None, parse_dates=[admit_col, disch_col]) |
| visit['los']=visit[disch_col]-visit[admit_col] |
|
|
| visit[admit_col] = pd.to_datetime(visit[admit_col]) |
| visit[disch_col] = pd.to_datetime(visit[disch_col]) |
| visit['los']=pd.to_timedelta(visit[disch_col]-visit[admit_col],unit='h') |
| visit['los']=visit['los'].astype(str) |
| visit[['days', 'dummy','hours']] = visit['los'].str.split(' ', -1, expand=True) |
| visit['los']=pd.to_numeric(visit['days']) |
| visit=visit.drop(columns=['days', 'dummy','hours']) |
| |
| |
| if use_admn: |
| |
| visit = visit.loc[visit.hospital_expire_flag == 0] |
| if len(disease_label): |
| hids=disease_cohort.extract_diag_cohort(visit['hadm_id'],disease_label,mimic4_path) |
| visit=visit[visit['hadm_id'].isin(hids['hadm_id'])] |
| print("[ READMISSION DUE TO "+disease_label+" ]") |
|
|
| pts = pd.read_csv( |
| mimic4_path + "hosp/patients.csv.gz", compression='gzip', header=0, index_col = None, usecols=[group_col, 'anchor_year', 'anchor_age', 'anchor_year_group', 'dod','gender'] |
| ) |
| pts['yob']= pts['anchor_year'] - pts['anchor_age'] |
| pts['min_valid_year'] = pts['anchor_year'] + (2019 - pts['anchor_year_group'].str.slice(start=-4).astype(int)) |
| |
| |
| |
| |
| if use_ICU: |
| visit_pts = visit[[group_col, visit_col, adm_visit_col, admit_col, disch_col,'los']].merge( |
| pts[[group_col, 'anchor_year', 'anchor_age', 'yob', 'min_valid_year', 'dod','gender']], how='inner', left_on=group_col, right_on=group_col |
| ) |
| else: |
| visit_pts = visit[[group_col, visit_col, admit_col, disch_col,'los']].merge( |
| pts[[group_col, 'anchor_year', 'anchor_age', 'yob', 'min_valid_year', 'dod','gender']], how='inner', left_on=group_col, right_on=group_col |
| ) |
|
|
| |
| |
| |
| visit_pts['Age']=visit_pts['anchor_age'] |
| visit_pts = visit_pts.loc[visit_pts['Age'] >= 18] |
| |
| |
| eth = pd.read_csv(mimic4_path + "hosp/admissions.csv.gz", compression='gzip', header=0, usecols=['hadm_id', 'insurance','race'], index_col=None) |
| visit_pts= visit_pts.merge(eth, how='inner', left_on='hadm_id', right_on='hadm_id') |
| |
| if use_ICU: |
| return visit_pts[[group_col, visit_col, adm_visit_col, admit_col, disch_col,'los', 'min_valid_year', 'dod','Age','gender','race', 'insurance']] |
| else: |
| return visit_pts.dropna(subset=['min_valid_year'])[[group_col, visit_col, admit_col, disch_col,'los', 'min_valid_year', 'dod','Age','gender','race', 'insurance']] |
|
|
|
|
| def validate_row(row, ctrl, invalid, max_year, disch_col, valid_col, gap): |
| """Checks if visit's prediction window potentially extends beyond the dataset range (2008-2019). |
| An 'invalid row' is NOT guaranteed to be outside the range, only potentially outside due to |
| de-identification of MIMIC-IV being done through 3-year time ranges. |
| |
| To be invalid, the end of the prediction window's year must both extend beyond the maximum seen year |
| for a patient AND beyond the year that corresponds to the 2017-2019 anchor year range for a patient""" |
| print("disch_col",row[disch_col]) |
| print(gap) |
| pred_year = (row[disch_col] + gap).year |
| if max_year < pred_year and pred_year > row[valid_col]: |
| invalid = invalid.append(row) |
| else: |
| ctrl = ctrl.append(row) |
| return ctrl, invalid |
|
|
|
|
| def partition_by_los(df:pd.DataFrame, los:int, group_col:str, visit_col:str, admit_col:str, disch_col:str, valid_col:str): |
| |
| invalid = df.loc[(df[admit_col].isna()) | (df[disch_col].isna()) | (df['los'].isna())] |
| cohort = df.loc[(~df[admit_col].isna()) & (~df[disch_col].isna()) & (~df['los'].isna())] |
| |
| |
| |
| pos_cohort=cohort[cohort['los']>los] |
| neg_cohort=cohort[cohort['los']<=los] |
| neg_cohort=neg_cohort.fillna(0) |
| pos_cohort=pos_cohort.fillna(0) |
| |
| pos_cohort['label']=1 |
| neg_cohort['label']=0 |
| |
| cohort=pd.concat([pos_cohort,neg_cohort], axis=0) |
| cohort=cohort.sort_values(by=[group_col,admit_col]) |
| |
| print("[ LOS LABELS FINISHED ]") |
| return cohort, invalid |
| |
| |
| def partition_by_readmit(df:pd.DataFrame, gap:datetime.timedelta, group_col:str, visit_col:str, admit_col:str, disch_col:str, valid_col:str): |
| """Applies labels to individual visits according to whether or not a readmission has occurred within the specified `gap` days. |
| For a given visit, another visit must occur within the gap window for a positive readmission label. |
| The gap window starts from the disch_col time and the admit_col of subsequent visits are considered.""" |
| |
| case = pd.DataFrame() |
| ctrl = pd.DataFrame() |
| invalid = pd.DataFrame() |
|
|
| |
| |
| |
| grouped= df.sort_values(by=[group_col, admit_col]).groupby(group_col) |
| for subject, group in tqdm(grouped): |
| max_year = group.max()[disch_col].year |
|
|
| if group.shape[0] <= 1: |
| |
| ctrl = ctrl.append(group.iloc[0]) |
| else: |
| for idx in range(group.shape[0]-1): |
| visit_time = group.iloc[idx][disch_col] |
| if group.loc[ |
| (group[admit_col] > visit_time) & |
| (group[admit_col] - visit_time <= gap) |
| ].shape[0] >= 1: |
|
|
| case = case.append(group.iloc[idx]) |
| else: |
| |
| |
| |
| |
|
|
| ctrl = ctrl.append(group.iloc[idx]) |
|
|
| |
| ctrl = ctrl.append(group.iloc[-1]) |
| |
|
|
| print("[ READMISSION LABELS FINISHED ]") |
| return case, ctrl, invalid |
|
|
|
|
| def partition_by_mort(df:pd.DataFrame, group_col:str, visit_col:str, admit_col:str, disch_col:str, death_col:str): |
| """Applies labels to individual visits according to whether or not a death has occurred within |
| the times of the specified admit_col and disch_col""" |
|
|
| invalid = df.loc[(df[admit_col].isna()) | (df[disch_col].isna())] |
|
|
| cohort = df.loc[(~df[admit_col].isna()) & (~df[disch_col].isna())] |
| |
| cohort['label']=0 |
| pos_cohort=cohort[~cohort[death_col].isna()] |
| neg_cohort=cohort[cohort[death_col].isna()] |
| neg_cohort=neg_cohort.fillna(0) |
| pos_cohort=pos_cohort.fillna(0) |
| pos_cohort[death_col] = pd.to_datetime(pos_cohort[death_col]) |
|
|
| pos_cohort['label'] = np.where((pos_cohort[death_col] >= pos_cohort[admit_col]) & (pos_cohort[death_col] <= pos_cohort[disch_col]),1,0) |
| |
| pos_cohort['label'] = pos_cohort['label'].astype("Int32") |
| cohort=pd.concat([pos_cohort,neg_cohort], axis=0) |
| cohort=cohort.sort_values(by=[group_col,admit_col]) |
| |
| print("[ MORTALITY LABELS FINISHED ]") |
| return cohort, invalid |
|
|
|
|
| def get_case_ctrls(df:pd.DataFrame, gap:int, group_col:str, visit_col:str, admit_col:str, disch_col:str, valid_col:str, death_col:str, use_mort=False,use_admn=False,use_los=False) -> pd.DataFrame: |
| """Handles logic for creating the labelled cohort based on arguments passed to extract(). |
| |
| Parameters: |
| df: dataframe with patient data |
| gap: specified time interval gap for readmissions |
| group_col: patient identifier to group patients (normally subject_id) |
| visit_col: visit identifier for individual patient visits (normally hadm_id or stay_id) |
| admit_col: column for visit start date information (normally admittime or intime) |
| disch_col: column for visit end date information (normally dischtime or outtime) |
| valid_col: generated column containing a patient's year that corresponds to the 2017-2019 anchor time range |
| dod_col: Date of death column |
| """ |
|
|
| case = None |
| ctrl = None |
| invalid = None |
|
|
| if use_mort: |
| return partition_by_mort(df, group_col, visit_col, admit_col, disch_col, death_col) |
| elif use_admn: |
| gap = datetime.timedelta(days=gap) |
| |
| case, ctrl, invalid = partition_by_readmit(df, gap, group_col, visit_col, admit_col, disch_col, valid_col) |
|
|
| |
| case['label'] = np.ones(case.shape[0]).astype(int) |
| ctrl['label'] = np.zeros(ctrl.shape[0]).astype(int) |
|
|
| return pd.concat([case, ctrl], axis=0), invalid |
| elif use_los: |
| return partition_by_los(df, gap, group_col, visit_col, admit_col, disch_col, death_col) |
|
|
| |
|
|
|
|
| def extract_data(use_ICU:str, label:str, time:int, icd_code:str, root_dir,mimic_path, disease_label, cohort_output=None, summary_output=None): |
| """Extracts cohort data and summary from MIMIC-IV data based on provided parameters. |
| |
| Parameters: |
| cohort_output: name of labelled cohort output file |
| summary_output: name of summary output file |
| use_ICU: state whether to use ICU patient data or not |
| label: Can either be '{day} day Readmission' or 'Mortality', decides what binary data label signifies""" |
| print("===========MIMIC-IV v2============") |
| if not cohort_output: |
| cohort_output="cohort_" + use_ICU.lower() + "_" + label.lower().replace(" ", "_") + "_" + str(time) + "_" + disease_label |
| if not summary_output: |
| summary_output="summary_" + use_ICU.lower() + "_" + label.lower().replace(" ", "_") + "_" + str(time) + "_" + disease_label |
| |
| if icd_code=="No Disease Filter": |
| if len(disease_label): |
| print(f"EXTRACTING FOR: | {use_ICU.upper()} | {label.upper()} DUE TO {disease_label.upper()} | {str(time)} | ") |
| else: |
| print(f"EXTRACTING FOR: | {use_ICU.upper()} | {label.upper()} | {str(time)} |") |
| else: |
| if len(disease_label): |
| print(f"EXTRACTING FOR: | {use_ICU.upper()} | {label.upper()} DUE TO {disease_label.upper()} | ADMITTED DUE TO {icd_code.upper()} | {str(time)} |") |
| else: |
| print(f"EXTRACTING FOR: | {use_ICU.upper()} | {label.upper()} | ADMITTED DUE TO {icd_code.upper()} | {str(time)} |") |
| |
| cohort, invalid = None, None |
| pts = None |
| ICU=use_ICU |
| group_col, visit_col, admit_col, disch_col, death_col, adm_visit_col = "", "", "", "", "", "" |
| |
| use_mort = label == "Mortality" |
| use_admn=label=='Readmission' |
| los=0 |
| use_los= label=='Length of Stay' |
| |
| |
| |
| |
| if use_los: |
| los=time |
| use_ICU = use_ICU == "ICU" |
| use_disease=icd_code!="No Disease Filter" |
| |
| if use_ICU: |
| group_col='subject_id' |
| visit_col='stay_id' |
| admit_col='intime' |
| disch_col='outtime' |
| death_col='dod' |
| adm_visit_col='hadm_id' |
| else: |
| group_col='subject_id' |
| visit_col='hadm_id' |
| admit_col='admittime' |
| disch_col='dischtime' |
| death_col='dod' |
|
|
| pts = get_visit_pts( |
| mimic4_path=mimic_path, |
| group_col=group_col, |
| visit_col=visit_col, |
| admit_col=admit_col, |
| disch_col=disch_col, |
| adm_visit_col=adm_visit_col, |
| use_mort=use_mort, |
| use_los=use_los, |
| los=los, |
| use_admn=use_admn, |
| disease_label=disease_label, |
| use_ICU=use_ICU |
| ) |
|
|
| |
| |
| cols = [group_col, visit_col, admit_col, disch_col, 'Age','gender','ethnicity','insurance','label'] |
|
|
| if use_mort: |
| cols.append(death_col) |
| cohort, invalid = get_case_ctrls(pts, None, group_col, visit_col, admit_col, disch_col,'min_valid_year', death_col, use_mort=True,use_admn=False,use_los=False) |
| elif use_admn: |
| interval = time |
| cohort, invalid = get_case_ctrls(pts, interval, group_col, visit_col, admit_col, disch_col,'min_valid_year', death_col, use_mort=False,use_admn=True,use_los=False) |
| elif use_los: |
| cohort, invalid = get_case_ctrls(pts, los, group_col, visit_col, admit_col, disch_col,'min_valid_year', death_col, use_mort=False,use_admn=False,use_los=True) |
| |
| |
| if use_ICU: |
| cols.append(adm_visit_col) |
|
|
| |
| if use_disease: |
| hids=disease_cohort.extract_diag_cohort(cohort['hadm_id'],icd_code,mimic_path) |
| cohort=cohort[cohort['hadm_id'].isin(hids['hadm_id'])] |
| cohort_output=cohort_output+"_"+icd_code |
| summary_output=summary_output+"_"+icd_code |
| |
| |
| cohort=cohort.rename(columns={"race":"ethnicity"}) |
| cohort[cols].to_csv("./data/cohort/"+cohort_output+".csv.gz", index=False, compression='gzip') |
| print("[ COHORT SUCCESSFULLY SAVED ]") |
|
|
| summary = "\n".join([ |
| f"{label} FOR {ICU} DATA", |
| f"# Admission Records: {cohort.shape[0]}", |
| f"# Patients: {cohort[group_col].nunique()}", |
| f"# Positive cases: {cohort[cohort['label']==1].shape[0]}", |
| f"# Negative cases: {cohort[cohort['label']==0].shape[0]}" |
| ]) |
|
|
| |
| with open(f"./data/cohort/{summary_output}.txt", "w") as f: |
| f.write(summary) |
|
|
| print("[ SUMMARY SUCCESSFULLY SAVED ]") |
|
|
|
|
| return cohort_output |
|
|
|
|