Spaces:
Runtime error
Runtime error
File size: 2,457 Bytes
f8bd957 d08e99b f8bd957 e50de60 edf9b2b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import streamlit as st
from transformers import VisionEncoderDecoderModel, AutoFeatureExtractor, AutoTokenizer
import requests
from PIL import Image
import torch
CHECKPOINT = "adalbertojunior/image_captioning_portuguese"
@st.cache
def get_model():
model = VisionEncoderDecoderModel.from_pretrained(CHECKPOINT)
return model
feature_extractor = AutoFeatureExtractor.from_pretrained(CHECKPOINT)
tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT)
st.title("Image Captioning with ViT & GPT2 π§π·")
st.sidebar.markdown("## Generation parameters")
max_length = st.sidebar.number_input("Max length", value=20, min_value=1)
no_repeat_ngram_size = st.sidebar.number_input("no repeat ngrams size", value=2, min_value=1)
num_return_sequences = st.sidebar.number_input("Generated sequences", value=3, min_value=1)
gen_mode = st.sidebar.selectbox("Generation mode", ["beam search", "sampling"])
if gen_mode == "beam search":
num_beams = st.sidebar.number_input("Beam size", value=5, min_value=1)
early_stopping = st.sidebar.checkbox("Early stopping", value=True)
gen_params = {
"num_beams": num_beams,
"early_stopping": early_stopping
}
elif gen_mode == "sampling":
do_sample = True
top_k = st.sidebar.number_input("top_k", value=30, min_value=0)
top_p = st.sidebar.number_input("top_p", value=0, min_value=0)
temperature = st.sidebar.number_input("temperature", value=0.7, min_value=0.0)
gen_params = {
"do_sample": do_sample,
"top_k": top_k,
"top_p": top_p,
"temperature": temperature
}
def generate_caption(url):
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
inputs = feature_extractor(image, return_tensors="pt")
model = get_model()
model.eval()
generated_ids = model.generate(
inputs["pixel_values"],
max_length=20,
no_repeat_ngram_size=2,
num_return_sequences=3,
**gen_params
)
captions = tokenizer.batch_decode(
generated_ids,
skip_special_tokens=True,
)
return captions[0]
url = st.text_input(
"Insert your URL", "https://static.cdn.pleno.news/2017/09/avi%C3%A3o-e1572374124339.jpg"
)
st.image(url)
if st.button("Run captioning"):
with st.spinner("Processing image..."):
caption = generate_caption(url)
st.text(caption)
st.text("Research supported with Cloud TPUs from Google's TPU Research Cloud (TRC)") |