Spaces:
Runtime error
Runtime error
import torch | |
import torch.nn as nn | |
from PIL import Image | |
from torchvision import transforms | |
import torchvision | |
import gradio as gr | |
agirliklar=torchvision.models.EfficientNet_B2_Weights.DEFAULT | |
eff_don=agirliklar.transforms() | |
model=torchvision.models.efficientnet_b2(weights=agirliklar) | |
model.classifier=nn.Sequential(nn.Dropout(p=0.2),nn.Linear(1408,5)) | |
model.load_state_dict(torch.load("model.pth")) | |
class_names=['a_bir', 'b_iki', 'c_üç', 'd_dört', 'e_beş'] | |
def predict(img): | |
"""Transforms and performs a prediction on img and returns prediction and time taken. | |
""" | |
# Start the timer | |
# img=Image.open(img) | |
# Transform the target image and add a batch dimension | |
img = eff_don(img).unsqueeze(0) | |
# Put model into evaluation mode and turn on inference mode | |
model.eval() | |
with torch.inference_mode(): | |
# Pass the transformed image through the model and turn the prediction logits into prediction probabilities | |
pred_probs = torch.softmax(model(img), dim=1) | |
# Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter) | |
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))} | |
# Return the prediction dictionary and prediction time | |
return pred_labels_and_probs | |