Spaces:
Sleeping
Sleeping
File size: 18,224 Bytes
28da097 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 |
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from scipy.stats import boxcox
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.metrics import classification_report, accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
%matplotlib inline
# Set the resolution of the plotted figures
plt.rcParams['figure.dpi'] = 200
# Configure Seaborn plot styles: Set background color and use dark grid
sns.set(rc={'axes.facecolor': '#faded9'}, style='darkgrid')
df = pd.read_csv("/content/heart.csv")
df
df.info()
# Define the continuous features
continuous_features = ['age', 'trestbps', 'chol', 'thalach', 'oldpeak']
# Identify the features to be converted to object data type
features_to_convert = [feature for feature in df.columns if feature not in continuous_features]
# Convert the identified features to object data type
df[features_to_convert] = df[features_to_convert].astype('object')
df.dtypes
# Get the summary statistics for numerical variables
df.describe().T
# Get the summary statistics for categorical variables
df.describe(include='object')
# Filter out continuous features for the univariate analysis
df_continuous = df[continuous_features]
# Set up the subplot
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(15, 10))
# Loop to plot histograms for each continuous feature
for i, col in enumerate(df_continuous.columns):
x = i // 3
y = i % 3
values, bin_edges = np.histogram(df_continuous[col],
range=(np.floor(df_continuous[col].min()), np.ceil(df_continuous[col].max())))
graph = sns.histplot(data=df_continuous, x=col, bins=bin_edges, kde=True, ax=ax[x, y],
edgecolor='none', color='red', alpha=0.6, line_kws={'lw': 3})
ax[x, y].set_xlabel(col, fontsize=15)
ax[x, y].set_ylabel('Count', fontsize=12)
ax[x, y].set_xticks(np.round(bin_edges, 1))
ax[x, y].set_xticklabels(ax[x, y].get_xticks(), rotation=45)
ax[x, y].grid(color='lightgrey')
for j, p in enumerate(graph.patches):
ax[x, y].annotate('{}'.format(p.get_height()), (p.get_x() + p.get_width() / 2, p.get_height() + 1),
ha='center', fontsize=10, fontweight="bold")
textstr = '\n'.join((
r'$\mu=%.2f$' % df_continuous[col].mean(),
r'$\sigma=%.2f$' % df_continuous[col].std()
))
ax[x, y].text(0.75, 0.9, textstr, transform=ax[x, y].transAxes, fontsize=12, verticalalignment='top',
color='white', bbox=dict(boxstyle='round', facecolor='#ff826e', edgecolor='white', pad=0.5))
ax[1,2].axis('off')
plt.suptitle('Distribution of Continuous Variables', fontsize=20)
plt.tight_layout()
plt.subplots_adjust(top=0.92)
plt.show()
# Filter out categorical features for the univariate analysis
categorical_features = df.columns.difference(continuous_features)
df_categorical = df[categorical_features]
# Set up the subplot for a 4x2 layout
fig, ax = plt.subplots(nrows=5, ncols=2, figsize=(15, 18))
# Loop to plot bar charts for each categorical feature in the 4x2 layout
for i, col in enumerate(categorical_features):
row = i // 2
col_idx = i % 2
# Calculate frequency percentages
value_counts = df[col].value_counts(normalize=True).mul(100).sort_values()
# Plot bar chart
value_counts.plot(kind='barh', ax=ax[row, col_idx], width=0.8, color='red')
# Add frequency percentages to the bars
for index, value in enumerate(value_counts):
ax[row, col_idx].text(value, index, str(round(value, 1)) + '%', fontsize=15, weight='bold', va='center')
ax[row, col_idx].set_xlim([0, 95])
ax[row, col_idx].set_xlabel('Frequency Percentage', fontsize=12)
ax[row, col_idx].set_title(f'{col}', fontsize=20)
ax[4,1].axis('off')
plt.suptitle('Distribution of Categorical Variables', fontsize=22)
plt.tight_layout()
plt.subplots_adjust(top=0.95)
plt.show()
# Set color palette
sns.set_palette(['#ff826e', 'red'])
# Create the subplots
fig, ax = plt.subplots(len(continuous_features), 2, figsize=(15,15), gridspec_kw={'width_ratios': [1, 2]})
# Loop through each continuous feature to create barplots and kde plots
for i, col in enumerate(continuous_features):
# Barplot showing the mean value of the feature for each target category
graph = sns.barplot(data=df, x="target", y=col, ax=ax[i,0])
# KDE plot showing the distribution of the feature for each target category
sns.kdeplot(data=df[df["target"]==0], x=col, fill=True, linewidth=2, ax=ax[i,1], label='0')
sns.kdeplot(data=df[df["target"]==1], x=col, fill=True, linewidth=2, ax=ax[i,1], label='1')
ax[i,1].set_yticks([])
ax[i,1].legend(title='Heart Disease', loc='upper right')
# Add mean values to the barplot
for cont in graph.containers:
graph.bar_label(cont, fmt=' %.3g')
# Set the title for the entire figure
plt.suptitle('Continuous Features vs Target Distribution', fontsize=22)
plt.tight_layout()
plt.show()
# Set color palette
sns.set_palette(['#ff826e', 'red'])
# Create the subplots
fig, ax = plt.subplots(len(continuous_features), 2, figsize=(15,15), gridspec_kw={'width_ratios': [1, 2]})
# Loop through each continuous feature to create barplots and kde plots
for i, col in enumerate(continuous_features):
# Barplot showing the mean value of the feature for each target category
graph = sns.barplot(data=df, x="target", y=col, ax=ax[i,0])
# KDE plot showing the distribution of the feature for each target category
sns.kdeplot(data=df[df["target"]==0], x=col, fill=True, linewidth=2, ax=ax[i,1], label='0')
sns.kdeplot(data=df[df["target"]==1], x=col, fill=True, linewidth=2, ax=ax[i,1], label='1')
ax[i,1].set_yticks([])
ax[i,1].legend(title='Heart Disease', loc='upper right')
# Add mean values to the barplot
for cont in graph.containers:
graph.bar_label(cont, fmt=' %.3g')
# Set the title for the entire figure
plt.suptitle('Continuous Features vs Target Distribution', fontsize=22)
plt.tight_layout()
plt.show()
# Remove 'target' from the categorical_features
categorical_features = [feature for feature in categorical_features if feature != 'target']
fig, ax = plt.subplots(nrows=2, ncols=4, figsize=(15,10))
for i,col in enumerate(categorical_features):
# Create a cross tabulation showing the proportion of purchased and non-purchased loans for each category of the feature
cross_tab = pd.crosstab(index=df[col], columns=df['target'])
# Using the normalize=True argument gives us the index-wise proportion of the data
cross_tab_prop = pd.crosstab(index=df[col], columns=df['target'], normalize='index')
# Define colormap
cmp = ListedColormap(['#ff826e', 'red'])
# Plot stacked bar charts
x, y = i//4, i%4
cross_tab_prop.plot(kind='bar', ax=ax[x,y], stacked=True, width=0.8, colormap=cmp,
legend=False, ylabel='Proportion', sharey=True)
# Add the proportions and counts of the individual bars to our plot
for idx, val in enumerate([*cross_tab.index.values]):
for (proportion, count, y_location) in zip(cross_tab_prop.loc[val],cross_tab.loc[val],cross_tab_prop.loc[val].cumsum()):
ax[x,y].text(x=idx-0.3, y=(y_location-proportion)+(proportion/2)-0.03,
s = f' {count}\n({np.round(proportion * 100, 1)}%)',
color = "black", fontsize=9, fontweight="bold")
# Add legend
ax[x,y].legend(title='target', loc=(0.7,0.9), fontsize=8, ncol=2)
# Set y limit
ax[x,y].set_ylim([0,1.12])
# Rotate xticks
ax[x,y].set_xticklabels(ax[x,y].get_xticklabels(), rotation=0)
plt.suptitle('Categorical Features vs Target Stacked Barplots', fontsize=22)
plt.tight_layout()
plt.show()
# Check for missing values in the dataset
df.isnull().sum().sum()
continuous_features
Q1 = df[continuous_features].quantile(0.25)
Q3 = df[continuous_features].quantile(0.75)
IQR = Q3 - Q1
outliers_count_specified = ((df[continuous_features] < (Q1 - 1.5 * IQR)) | (df[continuous_features] > (Q3 + 1.5 * IQR))).sum()
outliers_count_specified
# Implementing one-hot encoding on the specified categorical features
df_encoded = pd.get_dummies(df, columns=['cp', 'restecg', 'thal'], drop_first=True)
# Convert the rest of the categorical variables that don't need one-hot encoding to integer data type
features_to_convert = ['sex', 'fbs', 'exang', 'slope', 'ca', 'target']
for feature in features_to_convert:
df_encoded[feature] = df_encoded[feature].astype(int)
df_encoded.dtypes
# Displaying the resulting DataFrame after one-hot encoding
df_encoded.head()
# Define the features (X) and the output labels (y)
X = df_encoded.drop('target', axis=1)
y = df_encoded['target']
# Splitting data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0, stratify=y)
continuous_features
# Adding a small constant to 'oldpeak' to make all values positive
X_train['oldpeak'] = X_train['oldpeak'] + 0.001
X_test['oldpeak'] = X_test['oldpeak'] + 0.001
# Checking the distribution of the continuous features
fig, ax = plt.subplots(2, 5, figsize=(15,10))
# Original Distributions
for i, col in enumerate(continuous_features):
sns.histplot(X_train[col], kde=True, ax=ax[0,i], color='#ff826e').set_title(f'Original {col}')
# Applying Box-Cox Transformation
# Dictionary to store lambda values for each feature
lambdas = {}
for i, col in enumerate(continuous_features):
# Only apply box-cox for positive values
if X_train[col].min() > 0:
X_train[col], lambdas[col] = boxcox(X_train[col])
# Applying the same lambda to test data
X_test[col] = boxcox(X_test[col], lmbda=lambdas[col])
sns.histplot(X_train[col], kde=True, ax=ax[1,i], color='red').set_title(f'Transformed {col}')
else:
sns.histplot(X_train[col], kde=True, ax=ax[1,i], color='green').set_title(f'{col} (Not Transformed)')
fig.tight_layout()
plt.show()
X_train.head()
# Define the base DT model
dt_base = DecisionTreeClassifier(random_state=0)
def tune_clf_hyperparameters(clf, param_grid, X_train, y_train, scoring='recall', n_splits=3):
'''
This function optimizes the hyperparameters for a classifier by searching over a specified hyperparameter grid.
It uses GridSearchCV and cross-validation (StratifiedKFold) to evaluate different combinations of hyperparameters.
The combination with the highest recall for class 1 is selected as the default scoring metric.
The function returns the classifier with the optimal hyperparameters.
'''
# Create the cross-validation object using StratifiedKFold to ensure the class distribution is the same across all the folds
cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=0)
# Create the GridSearchCV object
clf_grid = GridSearchCV(clf, param_grid, cv=cv, scoring=scoring, n_jobs=-1)
# Fit the GridSearchCV object to the training data
clf_grid.fit(X_train, y_train)
# Get the best hyperparameters
best_hyperparameters = clf_grid.best_params_
# Return best_estimator_ attribute which gives us the best model that has been fitted to the training data
return clf_grid.best_estimator_, best_hyperparameters
# Hyperparameter grid for DT
param_grid_dt = {
'criterion': ['gini', 'entropy'],
'max_depth': [2,3],
'min_samples_split': [2, 3, 4],
'min_samples_leaf': [1, 2]
}
# Call the function for hyperparameter tuning
best_dt, best_dt_hyperparams = tune_clf_hyperparameters(dt_base, param_grid_dt, X_train, y_train)
print('DT Optimal Hyperparameters: \n', best_dt_hyperparams)
# Evaluate the optimized model on the train data
print(classification_report(y_train, best_dt.predict(X_train)))
# Evaluate the optimized model on the test data
print(classification_report(y_test, best_dt.predict(X_test)))
def evaluate_model(model, X_test, y_test, model_name):
"""
Evaluates the performance of a trained model on test data using various metrics.
"""
# Make predictions
y_pred = model.predict(X_test)
# Get classification report
report = classification_report(y_test, y_pred, output_dict=True)
# Extracting metrics
metrics = {
"precision_0": report["0"]["precision"],
"precision_1": report["1"]["precision"],
"recall_0": report["0"]["recall"],
"recall_1": report["1"]["recall"],
"f1_0": report["0"]["f1-score"],
"f1_1": report["1"]["f1-score"],
"macro_avg_precision": report["macro avg"]["precision"],
"macro_avg_recall": report["macro avg"]["recall"],
"macro_avg_f1": report["macro avg"]["f1-score"],
"accuracy": accuracy_score(y_test, y_pred)
}
# Convert dictionary to dataframe
df = pd.DataFrame(metrics, index=[model_name]).round(2)
return df
dt_evaluation = evaluate_model(best_dt, X_test, y_test, 'DT')
dt_evaluation
rf_base = RandomForestClassifier(random_state=0)
param_grid_rf = {
'n_estimators': [10, 30, 50, 70, 100],
'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4],
'min_samples_split': [2, 3, 4, 5],
'min_samples_leaf': [1, 2, 3],
'bootstrap': [True, False]
}
# Using the tune_clf_hyperparameters function to get the best estimator
best_rf, best_rf_hyperparams = tune_clf_hyperparameters(rf_base, param_grid_rf, X_train, y_train)
print('RF Optimal Hyperparameters: \n', best_rf_hyperparams)
# Evaluate the optimized model on the train data
print(classification_report(y_train, best_rf.predict(X_train)))
# Evaluate the optimized model on the test data
print(classification_report(y_test, best_rf.predict(X_test)))
rf_evaluation = evaluate_model(best_rf, X_test, y_test, 'RF')
rf_evaluation
# Define the base KNN model and set up the pipeline with scaling
knn_pipeline = Pipeline([
('scaler', StandardScaler()),
('knn', KNeighborsClassifier())
])
# Hyperparameter grid for KNN
knn_param_grid = {
'knn__n_neighbors': list(range(1, 12)),
'knn__weights': ['uniform', 'distance'],
'knn__p': [1, 2] # 1: Manhattan distance, 2: Euclidean distance
}
# Hyperparameter tuning for KNN
best_knn, best_knn_hyperparams = tune_clf_hyperparameters(knn_pipeline, knn_param_grid, X_train, y_train)
print('KNN Optimal Hyperparameters: \n', best_knn_hyperparams)
# Evaluate the optimized model on the train data
print(classification_report(y_train, best_knn.predict(X_train)))
# Evaluate the optimized model on the test data
print(classification_report(y_test, best_knn.predict(X_test)))
knn_evaluation = evaluate_model(best_knn, X_test, y_test, 'KNN')
knn_evaluation
svm_pipeline = Pipeline([
('scaler', StandardScaler()),
('svm', SVC(probability=True))
])
param_grid_svm = {
'svm__C': [0.0011, 0.005, 0.01, 0.05, 0.1, 1, 10, 20],
'svm__kernel': ['linear', 'rbf', 'poly'],
'svm__gamma': ['scale', 'auto', 0.1, 0.5, 1, 5],
'svm__degree': [2, 3, 4]
}
# Call the function for hyperparameter tuning
best_svm, best_svm_hyperparams = tune_clf_hyperparameters(svm_pipeline, param_grid_svm, X_train, y_train)
print('SVM Optimal Hyperparameters: \n', best_svm_hyperparams)
# Evaluate the optimized model on the train data
print(classification_report(y_train, best_svm.predict(X_train)))
svm_evaluation = evaluate_model(best_svm, X_test, y_test, 'SVM')
svm_evaluation
# Concatenate the dataframes
all_evaluations = [dt_evaluation, rf_evaluation, knn_evaluation, svm_evaluation]
results = pd.concat(all_evaluations)
# Sort by 'recall_1'
results = results.sort_values(by='recall_1', ascending=False).round(2)
results
# Sort values based on 'recall_1'
results.sort_values(by='recall_1', ascending=True, inplace=True)
recall_1_scores = results['recall_1']
# Plot the horizontal bar chart
fig, ax = plt.subplots(figsize=(12, 7), dpi=70)
ax.barh(results.index, recall_1_scores, color='red')
# Annotate the values and indexes
for i, (value, name) in enumerate(zip(recall_1_scores, results.index)):
ax.text(value + 0.01, i, f"{value:.2f}", ha='left', va='center', fontweight='bold', color='red', fontsize=15)
ax.text(0.1, i, name, ha='left', va='center', fontweight='bold', color='white', fontsize=25)
# Remove yticks
ax.set_yticks([])
# Set x-axis limit
ax.set_xlim([0, 1.2])
# Add title and xlabel
plt.title("Recall for Positive Class across Models", fontweight='bold', fontsize=22)
plt.xlabel('Recall Value', fontsize=16)
plt.show()
!pip install gradio
import gradio as gr
import numpy as np
from sklearn.ensemble import RandomForestClassifier
# Example: Define and train a Random Forest model
model = RandomForestClassifier()
# Dummy training data (replace with your actual data)
X_train = np.random.rand(100, 13) # 100 samples, 12 features (one for each input)
y_train = np.random.randint(2, size=100) # Binary target
# Train the model
model.fit(X_train, y_train)
# Define the prediction function
def predict(*inputs):
try:
# Convert inputs to a numpy array and reshape it to match the model's expected input shape
input_array = np.array(inputs).reshape(1, -1)
prediction = model.predict(input_array) # Make a prediction
return str(prediction[0]) # Return the prediction (single value) as a string for display
except Exception as e:
return str(e) # Return any errors as a string (for debugging)
# Define the features (input fields) for Gradio
features = [
'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach',
'exang', 'oldpeak', 'slope', 'ca','thal'
]
# Create Gradio input components (use gr.Number for numeric inputs)
inputs = [gr.Number(label=feature, value=0) for feature in features]
# Output component (show the prediction result)
outputs = gr.Textbox(label="Prediction Output")
# Create and launch the Gradio interface
gr.Interface(fn=predict, inputs=inputs, outputs=outputs).launch()
|