Spaces:
Sleeping
Sleeping
import streamlit as st | |
import numpy as np | |
import torch | |
import shap | |
import matplotlib.pyplot as plt | |
import joblib | |
import pandas as pd | |
# Load scalers and model | |
def load_resources(): | |
scaler_X = joblib.load("scaler_X_DS.joblib") | |
scaler_y = joblib.load("scaler_y_DS.joblib") | |
model = torch.jit.load("scripted_model_DS.pt") | |
model.eval() | |
return scaler_X, scaler_y, model | |
# Create a wrapper function for SHAP | |
def model_wrapper(X): | |
with torch.no_grad(): | |
X_tensor = torch.tensor(X, dtype=torch.float32) | |
output = model(X_tensor).numpy() | |
return scaler_y.inverse_transform(output) | |
# Streamlit app | |
st.title("Dynamic Stability Predictor") | |
# Load resources | |
scaler_X, scaler_y, model = load_resources() | |
# Define feature names and default values | |
feature_names = [ | |
"25", "19", "12.5", "9.5", "4.75", "2.36", "1.18", "0.6", "0.3", "0.15", "0.075", "CA", "FA", "type" | |
] | |
default_values = [100, 100, 81.593, 68.395, 49.318, 29.283, 17.261, 14.257, 6.041, 3.000, 2.115, 0.600, 0.350, 1.0] | |
# Input features | |
st.sidebar.header("Input Features") | |
input_features = {} | |
for feature, default_value in zip(feature_names, default_values): | |
if feature == "type": | |
type_option = st.sidebar.selectbox(f"Enter {feature}", options=["1 - Limestone", "2 - Basalt"], index=0) | |
input_features[feature] = 1.0 if type_option == "1 - Limestone" else 2.0 | |
else: | |
input_features[feature] = st.sidebar.number_input(f"Enter {feature}", value=default_value) | |
# Create input array | |
input_array = np.array([input_features[feature] for feature in feature_names]).reshape(1, -1) | |
input_scaled = scaler_X.transform(input_array) | |
# Make prediction | |
with torch.no_grad(): | |
prediction = model(torch.tensor(input_scaled, dtype=torch.float32)).numpy() | |
prediction_unscaled = scaler_y.inverse_transform(prediction) | |
st.write(f"Predicted Dynamic Stability: {prediction_unscaled[0][0]:.2f} pass/mm") | |
# SHAP explanation | |
if st.button("Explain Prediction"): | |
# Generate some random background data for SHAP | |
background_data = np.random.randn(100, 14) # 100 samples, 14 features | |
background_data_scaled = scaler_X.transform(background_data) | |
explainer = shap.KernelExplainer(model_wrapper, background_data_scaled) | |
shap_values = explainer.shap_values(input_scaled) | |
shap_values_single = shap_values[0].flatten() | |
expected_value = explainer.expected_value[0] | |
feature_values = [f"{x:.1f}" for x in input_array[0]] | |
explanation = shap.Explanation( | |
values=shap_values_single, | |
base_values=expected_value, | |
data=feature_values, | |
feature_names=feature_names | |
) | |
fig, ax = plt.subplots(figsize=(10, 6)) | |
shap.plots.waterfall(explanation, show=False) | |
st.pyplot(fig) | |
st.write(f"Base value (unscaled): {([[expected_value]])[0][0]:.2f} pass/mm") | |
st.write(f"Output value (unscaled): {prediction_unscaled[0][0]:.2f} pass/mm") | |
st.write("\nFeature contributions (unscaled):") | |
feature_contributions = pd.DataFrame({ | |
'Contribution': shap_values_single | |
}, index=feature_names) | |
feature_contributions['Contribution'] = feature_contributions['Contribution'].round(4) | |
st.table(feature_contributions) |