Spaces:
Runtime error
Runtime error
File size: 1,736 Bytes
b36de75 ae59ef6 b36de75 d3b5078 b36de75 8c7f54f b36de75 f9a49d7 b36de75 f9a49d7 b36de75 f84fd13 b36de75 f9a49d7 b36de75 3641e30 4f5c3d3 669d48d b36de75 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import gradio as gr
import spaces
import torch
torch.jit.script = lambda f: f
import requests
from PIL import Image
from timm.data import create_transform
from typing import Any
device = "cuda"
# Prepare the model.
import models
model = models.mambaout_femto(pretrained=True).to(device=device) # can change different model name
model.eval()
# Prepare the transform.
transform = create_transform(input_size=224, crop_pct=model.default_cfg['crop_pct'])
# Download human-readable labels for ImageNet.
response = requests.get("https://git.io/JJkYN")
labels = response.text.split("\n")
@spaces.GPU
def predict(inp):
inp = transform(inp).unsqueeze(0).to(device=device)
with torch.no_grad():
prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
return confidences
title="MambaOut: Do We Really Need Mamba for Vision?"
description="Gradio demo for MambaOut model (Femto) proposed by [MambaOut: Do We Really Need Mamba for Vision?](https://arxiv.org/abs/2405.07992). To use it simply upload your image or click on one of the examples to load them. Read more at [arXiv](https://arxiv.org/abs/2405.07992) and [GitHub](https://github.com/yuweihao/MambaOut)."
gr.Interface(title=title,
description=description,
fn=predict,
inputs=gr.Image(type="pil"),
outputs=gr.Label(num_top_classes=3),
examples=["images/Kobe_Bryant_2014.jpg", "images/Kobe_coffee.jpg"]).launch()
# Kobo Bryant image credit: https://en.wikipedia.org/wiki/Kobe_Bryant#/media/File:Kobe_Bryant_2014.jpg
# Kobe coffee image credit: https://aroundsaddleworth.co.uk/wp-content/uploads/2020/01/DSC_0177-scaled.jpg
|