### 1. Imports and class names setup ### import gradio as gr import os import torch from torchvision import transforms from timeit import default_timer as timer from typing import Tuple, Dict ### Model and transforms preparation ### model = torch.load(f="smile_classifier.pth") transform = transforms.Compose([ transforms.CenterCrop(size=[178, 178]), transforms.Resize(size=[64, 64]), transforms.ToTensor() ]) ### Predict function ### # Create predict function def predict(img) -> Tuple[Dict, float]: """Transforms and performs a prediction on img and returns prediction and time taken. """ # Start the timer start_time = timer() # Transform the target image and add a batch dimension img = transform(img).unsqueeze(0) # Put model into evaluation mode and turn on inference mode model.eval() with torch.inference_mode(): # Pass the transformed image through the model and turn the prediction logits into prediction probabilities pred_probs = model(img)[:, 0] # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter) if pred_probs >= 0.5: pred_labels_and_probs = {"Smiling": f"{pred_probs.item()}"} else: pred_labels_and_probs = {"Not Smiling": f"{pred_probs.item()}"} # Calculate the prediction time pred_time = round(timer() - start_time, 5) # Return the prediction dictionary and prediction time return pred_labels_and_probs, pred_time ### Gradio app ### # Create title, description and article strings title = "Smile Classifier 🙂😊😃" description = "A Smile classifier computer vision model (trained on [celebA](https://pytorch.org/vision/main/generated/torchvision.datasets.CelebA.html) data) to classify images of people and identify if they are smiling or not." article = "Please select an image from provided examples and submit, the model will predict if the person in the image \ is smiling or not and will also provide prediction probabilities." # Create examples list from "examples/" directory example_list = [["examples/" + example] for example in os.listdir("examples")] #Create the Gradio demo demo = gr.Interface(fn=predict, # mapping function from input to output inputs=gr.Image(type="pil"), # what are the inputs? outputs=[gr.Label(num_top_classes=2, label="Predictions"), # what are the outputs? gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs # Create examples list from "examples/" directory examples=example_list, title=title, description=description, article=article) # Launch the demo! demo.launch()