yuragoithf's picture
Update app.py
436c7ec
raw
history blame
1.92 kB
import gradio as gr
from PIL import Image
from transformers import AutoFeatureExtractor, AutoModelForImageSegmentation
import tensorflow as tf
extractor = AutoFeatureExtractor.from_pretrained("facebook/detr-resnet-50-panoptic")
model = AutoModelForImageSegmentation.from_pretrained("facebook/detr-resnet-50-panoptic")
# Perform image classification for single class output
# def predict_class(image):
# img = tf.cast(image, tf.float32)
# img = tf.image.resize(img, [input_shape[0], input_shape[1]])
# img = tf.expand_dims(img, axis=0)
# prediction = model.predict(img)
# class_index = tf.argmax(prediction[0]).numpy()
# predicted_class = labels[class_index]
# return predicted_class
# Perform image classification for multy class output
def predict_class(image):
img = tf.cast(image, tf.float32)
prediction = model.predict(img)
return prediction
# UI Design for single class output
# def classify_image(image):
# predicted_class = predict_class(image)
# output = f"<h2>Predicted Class: <span style='text-transform:uppercase';>{predicted_class}</span></h2>"
# return output
# UI Design for multy class output
def classify_image(image):
results = predict_class(image)
return results
inputs = gr.inputs.Image(type="pil", label="Upload an image")
# outputs = gr.outputs.HTML() #uncomment for single class output
outputs = gr.outputs.Label(num_top_classes=4)
title = "<h1 style='text-align: center;'>Image Classifier</h1>"
description = "Upload an image and get the predicted class."
# css_code='body{background-image:url("file=wave.mp4");}'
gr.Interface(fn=classify_image,
inputs=inputs,
outputs=outputs,
title=title,
examples=[["00_plane.jpg"], ["01_car.jpg"], ["02_bird.jpg"], ["03_cat.jpg"], ["04_deer.jpg"]],
# css=css_code,
description=description).launch()