Spaces:
Runtime error
Runtime error
File size: 1,220 Bytes
4ae9a9b d06b8a4 0654cd1 353a028 0654cd1 1db5c15 d06b8a4 0654cd1 ac4b420 1db5c15 0654cd1 1db5c15 0654cd1 a4c873c d06b8a4 a4c873c d06b8a4 46c676d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
import transformers
import gradio as gr
import datasets
import torch
from transformers import AutoFeatureExtractor, AutoModelForImageClassification
# from transformers import ViTFeatureExtractor, ViTForImageClassification
dataset = datasets.load_dataset('beans')
extractor = AutoFeatureExtractor.from_pretrained("suresh-subramanian/beans-classification")
model = AutoModelForImageClassification.from_pretrained("suresh-subramanian/beans-classification")
# feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224")
labels = dataset['train'].features['labels'].names
def classify(im):
features = extractor(im, return_tensors='pt')
with torch.no_grad():
logits = model(features["pixel_values"])[-1]
probability = torch.nn.functional.softmax(logits, dim=-1)
probs = probability[0].detach().numpy()
confidences = {label: float(probs[i]) for i, label in enumerate(labels)}
return confidences
# examples = [["powdery mildew.jpg"], ["375010.jpg"]]
# Set gradio interface
gr_interface = gr.Interface(classify, inputs='image', outputs='label', title='Bean Classification', description='Monitor your crops health in easier way')
# Launch gradio
gr_interface.launch(debug=True) |