import torch
model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
import requests
import PIL
from torchvision import transforms
# Download human-readable labels for ImageNet.
response = requests.get("https://git.io/JJkYN")
labels = response.text.split("\n")
def classify_image(image_filepath):
PIL_image = PIL.Image.open(image_filepath).convert('RGB')
transformations = transforms.Compose([
transforms.Resize(size = (224,224)),
transforms.ToTensor(),
])
image_tensors = transformations(PIL_image).unsqueeze(0)
with torch.no_grad():
prediction = torch.nn.functional.softmax(model(image_tensors)[0], dim=0)
confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
return confidences
import gradio as gr
def display_model_details(model_details):
return f"**Model Details:**\n\n{model_details}"
with gr.Blocks(title="Image Classification for 1000 Objects", css=".gradio-container {background:#FFD1DC;}") as demo:
gr.HTML("""
Image Classification for 1000 Objects
""")
gr.Markdown(
"""
# Enter Model Details
Please provide the necessary information about your model in the text box below.
"""
)
input_box = gr.Textbox(placeholder="Enter model details")
output_box = gr.Markdown()
input_box.change(display_model_details, input_box, output_box)
# # Image Upload and Gallery
# uploaded_images = gr.Gallery(label="Uploaded Images", type="image")
# upload_button = gr.Button("Upload Images")
# upload_button.click(fn=lambda images: uploaded_images.update(images), inputs=gr.Image(type="file", label="Upload Images")) # Filter for JPG only
# gr.Interface(
# fn=display_model_details,
# inputs="text",
# outputs="text",
# title="Model Details"
# ).launch()
with gr.Row():
input_image = gr.Image(type="filepath", image_mode="L")
output_label = gr.Label(label="Probabilities", num_top_classes=3)
send_btn = gr.Button("Infer")
send_btn.click(fn=classify_image, inputs=input_image, outputs=output_label)
with gr.Row():
gr.Examples(['./lion.jpg'] , label='Sample images : Lion', inputs=input_image)
gr.Examples(['./cheetah.jpg'], label='Cheetah' , inputs=input_image)
gr.Examples(['./eagle.jpg'], label='Eagle' , inputs=input_image)
gr.Examples(['./indigobird.jpg'], label='Indigo Bird' , inputs=input_image)
gr.Examples(['./aircraftcarrier.jpg'], label='Aircraft Carrier' , inputs=input_image)
gr.Examples(['./acousticguitar.jpg'], label='Acoustic Guitar' , inputs=input_image)
# gr.Interface(fn=predict,
# inputs=gr.Image(type="pil"),
# outputs=gr.Label(num_top_classes=3),
# examples=["lion.jpg", "cheetah.jpg"]).launch()
demo.launch(debug=True, share=True)