Spaces:
Runtime error
Runtime error
File size: 1,328 Bytes
df90108 7aa6ddc 2c78f06 3b0ede6 df90108 6325f9e 641c717 df90108 3b0ede6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import torch
from torchvision import transforms
import gradio as gr
import requests
from PIL import Image
#load models from pytorch hub
model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-cls.pt').eval() # load from PyTorch Hub
model.classify = True
model.conf = 0.40
# load imagenet 1000 labels
response = requests.get("https://git.io/JJkYN")
labels = response.text.split("\n")
def preprocess_image(inp):
# Define the preprocessing steps
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# Apply the preprocessing steps to the image
image = preprocess(inp)
# Convert the image to a PyTorch tensor
image = torch.tensor(image).unsqueeze(0)
return image
def predict(inp):
with torch.no_grad():
prediction = torch.nn.functional.softmax(model(preprocess_image(inp))[0], dim=0)
print(prediction)
confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
return confidences
gr.Interface(fn=predict,
inputs=gr.Image(type="pil"),
outputs=gr.Label(num_top_classes=7),
examples=["karda3.png", "lion.png"]).launch()
|