UGround-V1-2B / app.py
BoyuNLP's picture
Upload 5 files
a3a16bd verified
raw
history blame
15.3 kB
import base64
import json
from datetime import datetime
import gradio as gr
import torch
import spaces
from PIL import Image, ImageDraw
from qwen_vl_utils import process_vision_info
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
import ast
import os
import numpy as np
from huggingface_hub import hf_hub_download, list_repo_files
# Define constants
DESCRIPTION = "[UGround Demo](https://osu-nlp-group.github.io/UGround/)"
_SYSTEM = "You are a very helpful assistant."
MIN_PIXELS = 256 * 28 * 28
MAX_PIXELS = 1344 * 1344
# Specify the model repository and destination folder
# https://huggingface.co/osunlp/UGround-V1-2B
model_repo = "osunlp/UGround-V1-2B"
destination_folder = "./UGround-V1-2B"
# Ensure the destination folder exists
os.makedirs(destination_folder, exist_ok=True)
# List all files in the repository
files = list_repo_files(repo_id=model_repo)
# Download each file to the destination folder
for file in files:
file_path = hf_hub_download(repo_id=model_repo, filename=file, local_dir=destination_folder)
print(f"Downloaded {file} to {file_path}")
model = Qwen2VLForConditionalGeneration.from_pretrained(
destination_folder,
torch_dtype=torch.bfloat16,
device_map="cpu",
)
# Load the processor
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", min_pixels=MIN_PIXELS, max_pixels=MAX_PIXELS)
# Helper functions
def draw_point(image_input, point=None, radius=5):
"""Draw a point on the image."""
if isinstance(image_input, str):
image = Image.open(image_input)
else:
image = Image.fromarray(np.uint8(image_input))
if point:
x, y = round(point[0]/1000 * image.width), round(point[1]/1000 * image.height)
ImageDraw.Draw(image).ellipse((x - radius, y - radius, x + radius, y + radius), fill='red')
return image
def array_to_image_path(image_array, session_id):
"""Save the uploaded image and return its path."""
if image_array is None:
raise ValueError("No image provided. Please upload an image before submitting.")
img = Image.fromarray(np.uint8(image_array))
filename = f"{session_id}.png"
img.save(filename)
return os.path.abspath(filename)
def crop_image(image_path, click_xy, crop_factor=0.5):
"""Crop the image around the click point."""
image = Image.open(image_path)
width, height = image.size
crop_width, crop_height = int(width * crop_factor), int(height * crop_factor)
center_x, center_y = int(click_xy[0] * width), int(click_xy[1] * height)
left = max(center_x - crop_width // 2, 0)
upper = max(center_y - crop_height // 2, 0)
right = min(center_x + crop_width // 2, width)
lower = min(center_y + crop_height // 2, height)
cropped_image = image.crop((left, upper, right, lower))
cropped_image_path = f"cropped_{os.path.basename(image_path)}"
cropped_image.save(cropped_image_path)
return cropped_image_path
@spaces.GPU
def run_showui(image, query, session_id, iterations=1):
"""Main function for iterative inference."""
image_path = array_to_image_path(image, session_id)
click_xy = None
images_during_iterations = [] # List to store images at each step
for _ in range(iterations):
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "You are a very helpful assistant"},
{"type": "image", "image": image_path, "min_pixels": MIN_PIXELS, "max_pixels": MAX_PIXELS},
{"type": "text", "text": f"""Your task is to help the user identify the precise coordinates (x, y) of a specific area/element/object on the screen based on a description.
- Your response should aim to point to the center or a representative point within the described area/element/object as accurately as possible.
- If the description is unclear or ambiguous, infer the most relevant area or element based on its likely context or purpose.
- Your answer should be a single string (x, y) corresponding to the point of the interest.
Description: {query}
Answer:"""}
],
}
]
global model
model = model.to("cuda")
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt"
)
inputs = inputs.to("cuda")
generated_ids = model.generate(**inputs, max_new_tokens=128,temperature=0)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)[0]
click_xy = ast.literal_eval(output_text)
# Draw point on the current image
result_image = draw_point(image_path, click_xy, radius=10)
images_during_iterations.append(result_image) # Store the current image
# Crop the image for the next iteration
image_path = crop_image(image_path, click_xy)
return images_during_iterations, str(click_xy)
def save_and_upload_data(image, query, session_id, is_example_image, votes=None):
"""Save the data to a JSON file and upload to S3."""
if is_example_image == "True":
return
votes = votes or {"upvotes": 0, "downvotes": 0}
# Save image locally
image_file_name = f"{session_id}.png"
image.save(image_file_name)
data = {
"image_path": image_file_name,
"query": query,
"votes": votes,
"timestamp": datetime.now().isoformat()
}
local_file_name = f"{session_id}.json"
with open(local_file_name, "w") as f:
json.dump(data, f)
return data
def update_vote(vote_type, session_id, is_example_image):
"""Update the vote count and re-upload the JSON file."""
if is_example_image == "True":
return "Example image."
local_file_name = f"{session_id}.json"
with open(local_file_name, "r") as f:
data = json.load(f)
if vote_type == "upvote":
data["votes"]["upvotes"] += 1
elif vote_type == "downvote":
data["votes"]["downvotes"] += 1
with open(local_file_name, "w") as f:
json.dump(data, f)
return f"Thank you for your {vote_type}!"
with open("./assets/showui.png", "rb") as image_file:
base64_image = base64.b64encode(image_file.read()).decode("utf-8")
# [
# [f"{cur_dir}/amazon.jpg",f"Search bar at the top of the page"],
# [f"{cur_dir}/shopping.jpg", f"delete button for the second item in the cart list"],
# [f"{cur_dir}/ios.jpg", f"Open Maps"],
# [f"{cur_dir}/toggle.jpg", f"toggle button labeled by VPN"],
# [f"{cur_dir}/semantic.jpg", f"Home"],
# [f"{cur_dir}/accweather.jpg", f"Select May"],
# [f"{cur_dir}/arxiv.jpg", f"Home"],
# [f"{cur_dir}/arxiv.jpg", f"Edit the page"],
# [f"{cur_dir}/ios.jpg", f"icon at the top right corner"],
# [f"{cur_dir}/health.jpg", f"text labeled by 2023/11/26"],
examples = [
["./examples/amazon.jpg", "Search bar at the top of the page", True],
["./examples/shopping.jpg", "delete button for the second item in the cart list", True],
["./examples/ios.jpg", "Open Maps", True],
["./examples/toggle.jpg", "toggle button labeled by VPN", True],
["./examples/semantic.jpg", "Home", True],
["./examples/accweather.jpg", "Select May", True],
["./examples/arxiv.jpg", "Home", True],
["./examples/arxiv.jpg", "Edit the page", True],
["./examples/ios.jpg", "icon at the top right corner", True],
["./examples/health.jpg", "text labeled by 2023/11/26", True],
["./examples/app_store.png", "Download Kindle.", True],
["./examples/ios_setting.png", "Turn off Do not disturb.", True],
# ["./examples/apple_music.png", "Star to favorite.", True],
# ["./examples/map.png", "Boston.", True],
# ["./examples/wallet.png", "Scan a QR code.", True],
# ["./examples/word.png", "More shapes.", True],
# ["./examples/web_shopping.png", "Proceed to checkout.", True],
# ["./examples/web_forum.png", "Post my comment.", True],
# ["./examples/safari_google.png", "Click on search bar.", True],
]
def build_demo(embed_mode, concurrency_count=1):
with gr.Blocks(title="UGround Demo", theme=gr.themes.Default()) as demo:
state_image_path = gr.State(value=None)
state_session_id = gr.State(value=None)
# if not embed_mode:
# gr.HTML(
# f"""
# <div style="text-align: center; margin-bottom: 20px;">
# <div style="display: flex; justify-content: center;">
# <img src="https://raw.githubusercontent.com/showlab/ShowUI/refs/heads/main/assets/showui.jpg" alt="ShowUI" width="320" style="margin-bottom: 10px;"/>
# </div>
# <p>ShowUI is a lightweight vision-language-action model for GUI agents.</p>
# <div style="display: flex; justify-content: center; gap: 15px; font-size: 20px;">
# <a href="https://huggingface.co/showlab/ShowUI-2B" target="_blank">
# <img src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-ShowUI--2B-blue" alt="model"/>
# </a>
# <a href="https://arxiv.org/abs/2411.17465" target="_blank">
# <img src="https://img.shields.io/badge/arXiv%20paper-2411.17465-b31b1b.svg" alt="arXiv"/>
# </a>
# <a href="https://github.com/showlab/ShowUI" target="_blank">
# <img src="https://img.shields.io/badge/GitHub-ShowUI-black" alt="GitHub"/>
# </a>
# </div>
# </div>
# """
# )
with gr.Row():
with gr.Column(scale=3):
imagebox = gr.Image(type="numpy", label="Input Screenshot", placeholder="""#Try UGround with screenshots!
Windows: [Win + Shift + S]
macOS: [Command + Shift + 3]
Then upload/paste from clipboard πŸ€—
""")
# Add a slider for iteration count
iteration_slider = gr.Slider(minimum=1, maximum=3, step=1, value=1, label="Refinement Steps")
textbox = gr.Textbox(
show_label=True,
placeholder="Enter a query (e.g., 'Click Nahant')",
label="Query",
)
submit_btn = gr.Button(value="Submit", variant="primary")
# Examples component
gr.Examples(
examples=[[e[0], e[1]] for e in examples],
inputs=[imagebox, textbox],
outputs=[textbox], # Only update the query textbox
examples_per_page=3,
)
# Add a hidden dropdown to pass the `is_example` flag
is_example_dropdown = gr.Dropdown(
choices=["True", "False"],
value="False",
visible=False,
label="Is Example Image",
)
def set_is_example(query):
# Find the example and return its `is_example` flag
for _, example_query, is_example in examples:
if query.strip() == example_query.strip():
return str(is_example) # Return as string for Dropdown compatibility
return "False"
textbox.change(
set_is_example,
inputs=[textbox],
outputs=[is_example_dropdown],
)
with gr.Column(scale=8):
output_gallery = gr.Gallery(label="Iterative Refinement", object_fit="contain", preview=True)
# output_gallery = gr.Gallery(label="Iterative Refinement")
gr.HTML(
"""
<p><strong>Note:</strong> The <span style="color: red;">red point</span> on the output image represents the predicted clickable coordinates.</p>
"""
)
output_coords = gr.Textbox(label="Final Clickable Coordinates")
gr.HTML(
"""
<p><strong>πŸ€” Good or bad? Rate your experience to help us improve! ⬇️</strong></p>
"""
)
with gr.Row(elem_id="action-buttons", equal_height=True):
upvote_btn = gr.Button(value="πŸ‘ Looks good!", variant="secondary")
downvote_btn = gr.Button(value="πŸ‘Ž Too bad!", variant="secondary")
clear_btn = gr.Button(value="πŸ—‘οΈ Clear", interactive=True)
def on_submit(image, query, iterations, is_example_image):
if image is None:
raise ValueError("No image provided. Please upload an image before submitting.")
session_id = datetime.now().strftime("%Y%m%d_%H%M%S")
images_during_iterations, click_coords = run_showui(image, query, session_id, iterations)
save_and_upload_data(images_during_iterations[0], query, session_id, is_example_image)
return images_during_iterations, click_coords, session_id
submit_btn.click(
on_submit,
[imagebox, textbox, iteration_slider, is_example_dropdown],
[output_gallery, output_coords, state_session_id],
)
clear_btn.click(
lambda: (None, None, None, None),
inputs=None,
outputs=[imagebox, textbox, output_gallery, output_coords, state_session_id],
queue=False
)
upvote_btn.click(
lambda session_id, is_example_image: update_vote("upvote", session_id, is_example_image),
inputs=[state_session_id, is_example_dropdown],
outputs=[],
queue=False
)
downvote_btn.click(
lambda session_id, is_example_image: update_vote("downvote", session_id, is_example_image),
inputs=[state_session_id, is_example_dropdown],
outputs=[],
queue=False
)
return demo
if __name__ == "__main__":
demo = build_demo(embed_mode=False)
demo.queue(api_open=False).launch(
server_name="0.0.0.0",
server_port=7860,
ssr_mode=False,
debug=True,
)