Upload fibropred_model.py
Browse files- scripts/fibropred_model.py +46 -14
scripts/fibropred_model.py
CHANGED
@@ -9,12 +9,18 @@ from sklearn.metrics import classification_report, accuracy_score, roc_curve, au
|
|
9 |
from sklearn.feature_selection import SelectFromModel
|
10 |
import matplotlib.pyplot as plt
|
11 |
import seaborn as sns
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
# Load dataset
|
14 |
def load_data(file_path):
|
15 |
df = pd.read_excel(file_path, header=1)
|
16 |
return df
|
17 |
|
|
|
18 |
# Preprocess data including categorical variables
|
19 |
def preprocess_data_with_categoricals(df):
|
20 |
# Replace -9 with NaN for missing values
|
@@ -24,10 +30,6 @@ def preprocess_data_with_categoricals(df):
|
|
24 |
missing_percentage = df.isnull().sum() / len(df) * 100
|
25 |
df = df.drop(columns=missing_percentage[missing_percentage > 50].index)
|
26 |
|
27 |
-
# Drop specific columns
|
28 |
-
drop_columns = ['ProgressiveDisease', 'Final diagnosis', 'Transplantation date', 'Cause of death', 'Date of death', 'COD NUMBER']
|
29 |
-
df = df.drop(columns=[col for col in drop_columns if col in df.columns])
|
30 |
-
|
31 |
# Impute missing values
|
32 |
imputer = SimpleImputer(strategy='median')
|
33 |
numeric_cols = df.select_dtypes(include=['number']).columns
|
@@ -59,15 +61,22 @@ def apply_one_hot_encoding(df):
|
|
59 |
categorical_cols = df.select_dtypes(include=['object']).columns
|
60 |
df = pd.get_dummies(df, columns=categorical_cols, drop_first=True)
|
61 |
return df
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
# Select predictors using feature importance
|
64 |
def select_important_features(X, y, threshold=0.03):
|
65 |
model = RandomForestClassifier(random_state=42)
|
66 |
model.fit(X, y)
|
67 |
selector = SelectFromModel(model, threshold=threshold, prefit=True)
|
68 |
-
|
69 |
-
selected_features = X.columns[
|
70 |
-
|
|
|
71 |
|
72 |
# Visualize feature importance
|
73 |
def plot_feature_importance(model, features, target):
|
@@ -122,33 +131,56 @@ def plot_roc_auc(model, X_test, y_test, target):
|
|
122 |
|
123 |
# Save trained model
|
124 |
def save_model(model, target, selected_features):
|
125 |
-
|
126 |
if not os.path.exists("models"):
|
127 |
os.makedirs("models")
|
128 |
file_name = f"models/{target}_random_forest_model.pkl"
|
129 |
joblib.dump({'model': model, 'features': selected_features}, file_name)
|
130 |
print(f"Model and features saved to {file_name}")
|
131 |
|
132 |
-
|
133 |
# Main pipeline
|
134 |
def main():
|
135 |
file_path = 'FibroPredCODIFICADA.xlsx'
|
136 |
df = load_data(file_path)
|
137 |
|
138 |
-
#
|
139 |
-
target_columns = ['Death', '
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
|
141 |
# Preprocess data
|
142 |
df, numeric_cols, categorical_cols = preprocess_data_with_categoricals(df)
|
143 |
|
144 |
for target in target_columns:
|
145 |
print(f"Processing target: {target}")
|
146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
y = df[target]
|
148 |
|
149 |
# Split data
|
150 |
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
151 |
|
|
|
|
|
|
|
|
|
|
|
152 |
# Select important features
|
153 |
X_train_selected, selected_features = select_important_features(X_train, y_train)
|
154 |
X_test_selected = X_test[selected_features]
|
@@ -166,9 +198,9 @@ def main():
|
|
166 |
model.fit(X_train_selected, y_train)
|
167 |
|
168 |
# Cross-validation to check overfitting
|
169 |
-
cv = StratifiedKFold(n_splits=
|
170 |
cv_scores = cross_val_score(model, X_train_selected, y_train, cv=cv, scoring='accuracy')
|
171 |
-
train_scores = cross_val_score(model, X_train_selected, y_train, cv=
|
172 |
y_pred_test = model.predict(X_test_selected)
|
173 |
test_score = accuracy_score(y_test, y_pred_test)
|
174 |
|
|
|
9 |
from sklearn.feature_selection import SelectFromModel
|
10 |
import matplotlib.pyplot as plt
|
11 |
import seaborn as sns
|
12 |
+
from scipy.stats import zscore
|
13 |
+
from imblearn.over_sampling import SMOTE
|
14 |
+
import os
|
15 |
+
os.environ["LOKY_MAX_CPU_COUNT"] = "4" # Cambia "4" por el número de núcleos deseado
|
16 |
+
|
17 |
|
18 |
# Load dataset
|
19 |
def load_data(file_path):
|
20 |
df = pd.read_excel(file_path, header=1)
|
21 |
return df
|
22 |
|
23 |
+
|
24 |
# Preprocess data including categorical variables
|
25 |
def preprocess_data_with_categoricals(df):
|
26 |
# Replace -9 with NaN for missing values
|
|
|
30 |
missing_percentage = df.isnull().sum() / len(df) * 100
|
31 |
df = df.drop(columns=missing_percentage[missing_percentage > 50].index)
|
32 |
|
|
|
|
|
|
|
|
|
33 |
# Impute missing values
|
34 |
imputer = SimpleImputer(strategy='median')
|
35 |
numeric_cols = df.select_dtypes(include=['number']).columns
|
|
|
61 |
categorical_cols = df.select_dtypes(include=['object']).columns
|
62 |
df = pd.get_dummies(df, columns=categorical_cols, drop_first=True)
|
63 |
return df
|
64 |
+
# Remove outliers based on Z-score
|
65 |
+
def remove_outliers(df, numeric_cols, z_threshold=4):
|
66 |
+
for col in numeric_cols:
|
67 |
+
z_scores = zscore(df[col])
|
68 |
+
df = df[(np.abs(z_scores) < z_threshold) | (pd.isnull(z_scores))]
|
69 |
+
return df
|
70 |
|
71 |
# Select predictors using feature importance
|
72 |
def select_important_features(X, y, threshold=0.03):
|
73 |
model = RandomForestClassifier(random_state=42)
|
74 |
model.fit(X, y)
|
75 |
selector = SelectFromModel(model, threshold=threshold, prefit=True)
|
76 |
+
selected_mask = selector.get_support()
|
77 |
+
selected_features = X.columns[selected_mask]
|
78 |
+
X_reduced = X.loc[:, selected_features]
|
79 |
+
return X_reduced, selected_features
|
80 |
|
81 |
# Visualize feature importance
|
82 |
def plot_feature_importance(model, features, target):
|
|
|
131 |
|
132 |
# Save trained model
|
133 |
def save_model(model, target, selected_features):
|
|
|
134 |
if not os.path.exists("models"):
|
135 |
os.makedirs("models")
|
136 |
file_name = f"models/{target}_random_forest_model.pkl"
|
137 |
joblib.dump({'model': model, 'features': selected_features}, file_name)
|
138 |
print(f"Model and features saved to {file_name}")
|
139 |
|
|
|
140 |
# Main pipeline
|
141 |
def main():
|
142 |
file_path = 'FibroPredCODIFICADA.xlsx'
|
143 |
df = load_data(file_path)
|
144 |
|
145 |
+
# Include 'ProgressiveDisease' in target columns
|
146 |
+
target_columns = ['Death', 'Binary diagnosis', 'Necessity of transplantation', 'Progressive disease']
|
147 |
+
|
148 |
+
# Define predictors to remove for each target
|
149 |
+
predictors_to_remove_dict = {
|
150 |
+
'Death': ['Final diagnosis', 'Transplantation date', 'Cause of death', 'Date of death', 'COD NUMBER','FVC (L) 1 year after diagnosis',
|
151 |
+
'FVC (%) 1 year after diagnosis','DLCO (%) 1 year after diagnosis'],
|
152 |
+
'Binary diagnosis': ['ProgressiveDisease', 'Final diagnosis', 'Transplantation date', 'Cause of death', 'Date of death', 'COD NUMBER','Pirfenidone','Nintedanib',
|
153 |
+
'Antifibrotic Drug','Prednisone','Mycophenolate','FVC (L) 1 year after diagnosis','FVC (%) 1 year after diagnosis',
|
154 |
+
'DLCO (%) 1 year after diagnosis','RadioWorsening2y'],
|
155 |
+
'Necessity of transplantation': ['ProgressiveDisease', 'Final diagnosis', 'Transplantation date', 'Cause of death', 'Date of death', 'COD NUMBER','Age at diagnosis'],
|
156 |
+
'Progressive disease': ['ProgressiveDisease', 'Final diagnosis', 'Transplantation date', 'Cause of death', 'Date of death', 'COD NUMBER', 'FVC (L) 1 year after diagnosis',
|
157 |
+
'FVC (%) 1 year after diagnosis','DLCO (%) 1 year after diagnosis','RadioWorsening2y']
|
158 |
+
}
|
159 |
|
160 |
# Preprocess data
|
161 |
df, numeric_cols, categorical_cols = preprocess_data_with_categoricals(df)
|
162 |
|
163 |
for target in target_columns:
|
164 |
print(f"Processing target: {target}")
|
165 |
+
# Apply outlier removal only for specific targets
|
166 |
+
if target in ['Necessity of transplantation', 'Progressive disease']:
|
167 |
+
print(f"Removing outliers for target: {target}")
|
168 |
+
df = remove_outliers(df, numeric_cols)
|
169 |
+
|
170 |
+
# Get predictors to remove for the current target
|
171 |
+
predictors_to_remove = predictors_to_remove_dict.get(target, [])
|
172 |
+
|
173 |
+
X = df[numeric_cols].drop(columns=target_columns + predictors_to_remove, errors='ignore')
|
174 |
y = df[target]
|
175 |
|
176 |
# Split data
|
177 |
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
178 |
|
179 |
+
# Apply SMOTE only for specific targets
|
180 |
+
if target in ['Binary diagnosis', 'Necessity of transplantation']:
|
181 |
+
print(f"Applying SMOTE to balance the training set for target: {target}")
|
182 |
+
smote = SMOTE(random_state=42)
|
183 |
+
X_train, y_train = smote.fit_resample(X_train, y_train)
|
184 |
# Select important features
|
185 |
X_train_selected, selected_features = select_important_features(X_train, y_train)
|
186 |
X_test_selected = X_test[selected_features]
|
|
|
198 |
model.fit(X_train_selected, y_train)
|
199 |
|
200 |
# Cross-validation to check overfitting
|
201 |
+
cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
|
202 |
cv_scores = cross_val_score(model, X_train_selected, y_train, cv=cv, scoring='accuracy')
|
203 |
+
train_scores = cross_val_score(model, X_train_selected, y_train, cv=10, scoring='accuracy')
|
204 |
y_pred_test = model.predict(X_test_selected)
|
205 |
test_score = accuracy_score(y_test, y_pred_test)
|
206 |
|