prithivMLmods's picture
Update app.py
d27700b verified
raw
history blame
16.3 kB
import os
import re
import json
import time
import unicodedata
import gc
from io import BytesIO
from typing import Iterable
from typing import Tuple, Optional, List, Dict, Any
import gradio as gr
import numpy as np
import torch
import spaces
from PIL import Image, ImageDraw, ImageFont
# Transformers & Qwen Utils
from transformers import (
Qwen2_5_VLForConditionalGeneration,
AutoProcessor,
AutoModelForImageTextToText
)
from transformers.models.qwen2_vl.image_processing_qwen2_vl import smart_resize
from qwen_vl_utils import process_vision_info
# Gradio Theme
from gradio.themes import Soft
from gradio.themes.utils import colors, fonts, sizes
# -----------------------------------------------------------------------------
# 1. THEME CONFIGURATION
# -----------------------------------------------------------------------------
colors.steel_blue = colors.Color(
name="steel_blue",
c50="#EBF3F8",
c100="#D3E5F0",
c200="#A8CCE1",
c300="#7DB3D2",
c400="#529AC3",
c500="#4682B4",
c600="#3E72A0",
c700="#36638C",
c800="#2E5378",
c900="#264364",
c950="#1E3450",
)
class SteelBlueTheme(Soft):
def __init__(
self,
*,
primary_hue: colors.Color | str = colors.gray,
secondary_hue: colors.Color | str = colors.steel_blue,
neutral_hue: colors.Color | str = colors.slate,
text_size: sizes.Size | str = sizes.text_lg,
font: fonts.Font | str | Iterable[fonts.Font | str] = (
fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
),
font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
),
):
super().__init__(
primary_hue=primary_hue,
secondary_hue=secondary_hue,
neutral_hue=neutral_hue,
text_size=text_size,
font=font,
font_mono=font_mono,
)
super().set(
background_fill_primary="*primary_50",
background_fill_primary_dark="*primary_900",
body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
button_primary_text_color="white",
button_primary_text_color_hover="white",
button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_800)",
button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_500)",
block_title_text_weight="600",
block_border_width="3px",
block_shadow="*shadow_drop_lg",
button_primary_shadow="*shadow_drop_lg",
button_large_padding="11px",
)
steel_blue_theme = SteelBlueTheme()
css = "#main-title h1 { font-size: 2.3em !important; } #out_img { height: 600px; object-fit: contain; }"
# -----------------------------------------------------------------------------
# 2. GLOBAL MODEL LOADING
# -----------------------------------------------------------------------------
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Running on device: {device}")
# --- Load Fara-7B ---
print("🔄 Loading Fara-7B...")
MODEL_ID_V = "microsoft/Fara-7B"
try:
processor_v = AutoProcessor.from_pretrained(MODEL_ID_V, trust_remote_code=True)
model_v = Qwen2_5_VLForConditionalGeneration.from_pretrained(
MODEL_ID_V,
trust_remote_code=True,
torch_dtype=torch.float16
).to(device).eval()
except Exception as e:
print(f"Failed to load Fara: {e}")
model_v = None
processor_v = None
# --- Load UI-TARS-1.5-7B ---
print("🔄 Loading UI-TARS-1.5-7B...")
MODEL_ID_X = "ByteDance-Seed/UI-TARS-1.5-7B"
try:
# Important: use_fast=False is often required for custom tokenizers
processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True, use_fast=False)
model_x = AutoModelForImageTextToText.from_pretrained(
MODEL_ID_X,
trust_remote_code=True,
torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32,
).to(device).eval()
except Exception as e:
print(f"Failed to load UI-TARS: {e}")
model_x = None
processor_x = None
print("✅ Models loading sequence complete.")
# -----------------------------------------------------------------------------
# 3. UTILS & PROMPTS
# -----------------------------------------------------------------------------
def array_to_image(image_array: np.ndarray) -> Image.Image:
if image_array is None: raise ValueError("No image provided.")
return Image.fromarray(np.uint8(image_array))
# --- Fara Prompt ---
def get_fara_prompt(task, image):
OS_SYSTEM_PROMPT = """You are a GUI agent. You are given a task and a screenshot of the current status.
You need to generate the next action to complete the task.
Output your action inside a <tool_call> block using JSON format.
Include "coordinate": [x, y] in pixels for interactions.
Examples:
<tool_call>{"name": "User", "arguments": {"action": "click", "coordinate": [400, 300]}}</tool_call>
<tool_call>{"name": "User", "arguments": {"action": "type", "coordinate": [100, 200], "text": "hello"}}</tool_call>
"""
return [
{"role": "system", "content": [{"type": "text", "text": OS_SYSTEM_PROMPT}]},
{"role": "user", "content": [{"type": "image", "image": image}, {"type": "text", "text": f"Instruction: {task}"}]},
]
# --- UI-TARS Prompt ---
def get_uitars_prompt(task, image):
# UI-TARS generally responds better to a simpler instruction when finetuned
guidelines = (
"Localize an element on the GUI image according to my instructions and "
"output a click position as Click(x, y) with x num pixels from the left edge "
"and y num pixels from the top edge."
)
return [
{
"role": "user",
"content": [
{"type": "image", "image": image},
{"type": "text", "text": f"{guidelines}\n{task}"}
],
}
]
def get_image_proc_params(processor) -> Dict[str, int]:
ip = getattr(processor, "image_processor", None)
return {
"patch_size": getattr(ip, "patch_size", 14),
"merge_size": getattr(ip, "merge_size", 2),
"min_pixels": getattr(ip, "min_pixels", 256 * 256),
"max_pixels": getattr(ip, "max_pixels", 1280 * 1280),
}
# -----------------------------------------------------------------------------
# 4. PARSING LOGIC
# -----------------------------------------------------------------------------
def parse_uitars_response(text: str) -> List[Dict]:
"""Parse various UI-TARS output formats"""
actions = []
text = text.strip()
# Debug print
print(f"Parsing UI-TARS output: {text}")
# Regex 1: Click(x, y) - Standard prompt output
# Matches: Click(123, 456) or Click(123,456)
matches_click = re.findall(r"Click\s*\(\s*(\d+)\s*,\s*(\d+)\s*\)", text, re.IGNORECASE)
for m in matches_click:
actions.append({"type": "click", "x": int(m[0]), "y": int(m[1]), "text": ""})
# Regex 2: point=[x, y] - Common model internal format
matches_point = re.findall(r"point=\[\s*(\d+)\s*,\s*(\d+)\s*\]", text, re.IGNORECASE)
for m in matches_point:
actions.append({"type": "click", "x": int(m[0]), "y": int(m[1]), "text": ""})
# Regex 3: start_box='(x, y)' - Another variant
matches_box = re.findall(r"start_box=['\"]?\(\s*(\d+)\s*,\s*(\d+)\s*\)['\"]?", text, re.IGNORECASE)
for m in matches_box:
actions.append({"type": "click", "x": int(m[0]), "y": int(m[1]), "text": ""})
# Remove duplicates if any logic matched multiple times
unique_actions = []
seen = set()
for a in actions:
key = (a['type'], a['x'], a['y'])
if key not in seen:
seen.add(key)
unique_actions.append(a)
return unique_actions
def parse_fara_response(response: str) -> List[Dict]:
"""Parse Fara <tool_call> JSON format"""
actions = []
matches = re.findall(r"<tool_call>(.*?)</tool_call>", response, re.DOTALL)
for match in matches:
try:
data = json.loads(match.strip())
args = data.get("arguments", {})
coords = args.get("coordinate", [])
action_type = args.get("action", "unknown")
text_content = args.get("text", "")
if coords and len(coords) == 2:
actions.append({
"type": action_type, "x": float(coords[0]), "y": float(coords[1]), "text": text_content
})
except: pass
return actions
def create_localized_image(original_image: Image.Image, actions: list[dict]) -> Optional[Image.Image]:
if not actions: return None
img_copy = original_image.copy()
draw = ImageDraw.Draw(img_copy)
width, height = img_copy.size
try: font = ImageFont.load_default()
except: font = None
for act in actions:
x = act['x']
y = act['y']
# Determine if we need to scale normalized coords (0-1) or use absolute
# UI-TARS usually outputs absolute pixels relative to the image size it saw.
# But we already scaled them in the main loop.
# Double check sanity:
if x < 1.0 and y < 1.0:
pixel_x, pixel_y = int(x * width), int(y * height)
else:
pixel_x, pixel_y = int(x), int(y)
color = 'red' if 'click' in act['type'].lower() else 'blue'
# Draw Target Crosshair/Circle
r = 15
line_width = 4
# Circle
draw.ellipse([pixel_x - r, pixel_y - r, pixel_x + r, pixel_y + r], outline=color, width=line_width)
# Center dot
draw.ellipse([pixel_x - 3, pixel_y - 3, pixel_x + 3, pixel_y + 3], fill=color)
# Label
label = f"{act['type']}"
if act['text']: label += f": {act['text']}"
text_pos = (pixel_x + 20, pixel_y - 10)
# Draw text background
bbox = draw.textbbox(text_pos, label, font=font)
draw.rectangle((bbox[0]-4, bbox[1]-2, bbox[2]+4, bbox[3]+2), fill="black")
draw.text(text_pos, label, fill="white", font=font)
return img_copy
# -----------------------------------------------------------------------------
# 5. CORE LOGIC
# -----------------------------------------------------------------------------
@spaces.GPU(duration=120)
def process_screenshot(input_numpy_image: np.ndarray, task: str, model_choice: str):
if input_numpy_image is None: return "⚠️ Please upload an image.", None
input_pil_image = array_to_image(input_numpy_image)
orig_w, orig_h = input_pil_image.size
# --- UI-TARS Logic ---
if model_choice == "UI-TARS-1.5-7B":
if model_x is None: return "Error: UI-TARS model failed to load on startup.", None
print("Using UI-TARS Pipeline...")
# 1. Smart Resize (Crucial for UI-TARS accuracy)
# We must resize the image to the resolution the model expects/handles best
ip_params = get_image_proc_params(processor_x)
resized_h, resized_w = smart_resize(
input_pil_image.height, input_pil_image.width,
factor=ip_params["patch_size"] * ip_params["merge_size"],
min_pixels=ip_params["min_pixels"], max_pixels=ip_params["max_pixels"]
)
proc_image = input_pil_image.resize((resized_w, resized_h), Image.Resampling.LANCZOS)
# 2. Prompting
messages = get_uitars_prompt(task, proc_image)
text_prompt = processor_x.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
# 3. Inputs
inputs = processor_x(text=[text_prompt], images=[proc_image], padding=True, return_tensors="pt")
inputs = {k: v.to(device) for k, v in inputs.items()}
# 4. Generate
with torch.no_grad():
generated_ids = model_x.generate(**inputs, max_new_tokens=128)
# Decode
generated_ids = [out_ids[len(in_seq):] for in_seq, out_ids in zip(inputs.get("input_ids"), generated_ids)]
raw_response = processor_x.batch_decode(generated_ids, skip_special_tokens=True)[0]
# 5. Parse
actions = parse_uitars_response(raw_response)
# 6. Rescale Coordinates back to Original Image Size
# The model saw 'resized_w' x 'resized_h', so coordinates are in that space.
# We need to map them back to 'orig_w' x 'orig_h' for the visualizer.
scale_x = orig_w / resized_w
scale_y = orig_h / resized_h
for a in actions:
a['x'] = int(a['x'] * scale_x)
a['y'] = int(a['y'] * scale_y)
# --- Fara Logic ---
else:
if model_v is None: return "Error: Fara model failed to load on startup.", None
print("Using Fara Pipeline...")
messages = get_fara_prompt(task, input_pil_image)
text_prompt = processor_v.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor_v(
text=[text_prompt],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt"
)
inputs = inputs.to(device)
with torch.no_grad():
generated_ids = model_v.generate(**inputs, max_new_tokens=512)
generated_ids = [out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
raw_response = processor_v.batch_decode(generated_ids, skip_special_tokens=True)[0]
# Fara usually outputs exact pixels based on original image
actions = parse_fara_response(raw_response)
print(f"Raw Output: {raw_response}")
print(f"Parsed Actions: {actions}")
# 3. Visualize
output_image = input_pil_image
if actions:
vis = create_localized_image(input_pil_image, actions)
if vis: output_image = vis
return raw_response, output_image
# -----------------------------------------------------------------------------
# 6. UI SETUP
# -----------------------------------------------------------------------------
with gr.Blocks(theme=steel_blue_theme, css=css) as demo:
gr.Markdown("# **CUA GUI Agent 🖥️**", elem_id="main-title")
gr.Markdown("Upload a screenshot, select a model, and provide a task. The model will determine the precise UI coordinates and actions.")
with gr.Row():
with gr.Column(scale=2):
input_image = gr.Image(label="Upload Screenshot", height=500)
with gr.Row():
model_choice = gr.Radio(
choices=["Fara-7B", "UI-TARS-1.5-7B"],
label="Select Model",
value="Fara-7B",
interactive=True
)
task_input = gr.Textbox(
label="Task Instruction",
placeholder="e.g. Input the server address readyforquantum.com...",
lines=2
)
submit_btn = gr.Button("Analyze UI & Generate Action", variant="primary")
with gr.Column(scale=3):
output_image = gr.Image(label="Visualized Action Points", elem_id="out_img", height=500)
output_text = gr.Textbox(label="Raw Model Output", lines=8, show_copy_button=True)
submit_btn.click(
fn=process_screenshot,
inputs=[input_image, task_input, model_choice],
outputs=[output_text, output_image]
)
gr.Examples(
examples=[["./assets/google.png", "Search for 'Hugging Face'", "Fara-7B"]],
inputs=[input_image, task_input, model_choice],
label="Quick Examples"
)
if __name__ == "__main__":
demo.queue().launch()