prithivMLmods's picture
Update app.py
bf237a8 verified
raw
history blame
14.5 kB
import os
import re
import json
import time
import unicodedata
from io import BytesIO
from typing import Iterable
from typing import Tuple, Optional, List, Dict, Any
import gradio as gr
import numpy as np
import torch
import spaces
from PIL import Image, ImageDraw, ImageFont
# Transformers & Qwen Utils
from transformers import (
Qwen2_5_VLForConditionalGeneration,
AutoProcessor,
AutoModelForImageTextToText
)
from transformers.models.qwen2_vl.image_processing_qwen2_vl import smart_resize
from qwen_vl_utils import process_vision_info
# Gradio Theme
from gradio.themes import Soft
from gradio.themes.utils import colors, fonts, sizes
# -----------------------------------------------------------------------------
# 1. THEME CONFIGURATION
# -----------------------------------------------------------------------------
colors.steel_blue = colors.Color(
name="steel_blue",
c50="#EBF3F8",
c100="#D3E5F0",
c200="#A8CCE1",
c300="#7DB3D2",
c400="#529AC3",
c500="#4682B4",
c600="#3E72A0",
c700="#36638C",
c800="#2E5378",
c900="#264364",
c950="#1E3450",
)
class SteelBlueTheme(Soft):
def __init__(
self,
*,
primary_hue: colors.Color | str = colors.gray,
secondary_hue: colors.Color | str = colors.steel_blue,
neutral_hue: colors.Color | str = colors.slate,
text_size: sizes.Size | str = sizes.text_lg,
font: fonts.Font | str | Iterable[fonts.Font | str] = (
fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
),
font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
),
):
super().__init__(
primary_hue=primary_hue,
secondary_hue=secondary_hue,
neutral_hue=neutral_hue,
text_size=text_size,
font=font,
font_mono=font_mono,
)
super().set(
background_fill_primary="*primary_50",
background_fill_primary_dark="*primary_900",
body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
button_primary_text_color="white",
button_primary_text_color_hover="white",
button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_800)",
button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_500)",
block_title_text_weight="600",
block_border_width="3px",
block_shadow="*shadow_drop_lg",
button_primary_shadow="*shadow_drop_lg",
button_large_padding="11px",
)
steel_blue_theme = SteelBlueTheme()
css = "#main-title h1 { font-size: 2.3em !important; } #out_img { height: 600px; object-fit: contain; }"
# -----------------------------------------------------------------------------
# 2. GLOBAL MODEL LOADING
# -----------------------------------------------------------------------------
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Running on device: {device}")
# --- Load Fara-7B ---
print("🔄 Loading Fara-7B...")
MODEL_ID_V = "microsoft/Fara-7B"
processor_v = AutoProcessor.from_pretrained(MODEL_ID_V, trust_remote_code=True)
model_v = Qwen2_5_VLForConditionalGeneration.from_pretrained(
MODEL_ID_V,
trust_remote_code=True,
torch_dtype=torch.float16
).to(device).eval()
# --- Load UI-TARS-1.5-7B ---
print("🔄 Loading UI-TARS-1.5-7B...")
# Using official SFT repo for 7B. Adjust if you have a specific private repo.
MODEL_ID_X = "bytedance/UI-TARS-7B-SFT"
processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True, use_fast=False)
model_x = AutoModelForImageTextToText.from_pretrained(
MODEL_ID_X,
trust_remote_code=True,
torch_dtype=torch.bfloat16,
).to(device).eval()
print("✅ All Models Loaded.")
# -----------------------------------------------------------------------------
# 3. UTILS & PROMPTS
# -----------------------------------------------------------------------------
def array_to_image(image_array: np.ndarray) -> Image.Image:
if image_array is None: raise ValueError("No image provided.")
return Image.fromarray(np.uint8(image_array))
# --- Fara Prompt ---
def get_fara_prompt(task, image):
OS_SYSTEM_PROMPT = """You are a GUI agent. You are given a task and a screenshot of the current status.
You need to generate the next action to complete the task.
Output your action inside a <tool_call> block using JSON format.
Include "coordinate": [x, y] in pixels for interactions.
Examples:
<tool_call>{"name": "User", "arguments": {"action": "click", "coordinate": [400, 300]}}</tool_call>
<tool_call>{"name": "User", "arguments": {"action": "type", "coordinate": [100, 200], "text": "hello"}}</tool_call>
"""
return [
{"role": "system", "content": [{"type": "text", "text": OS_SYSTEM_PROMPT}]},
{"role": "user", "content": [{"type": "image", "image": image}, {"type": "text", "text": f"Instruction: {task}"}]},
]
# --- UI-TARS Prompt ---
def get_uitars_prompt(task, image):
guidelines = (
"Localize an element on the GUI image according to my instructions and "
"output a click position as Click(x, y) with x num pixels from the left edge "
"and y num pixels from the top edge."
)
return [
{
"role": "user",
"content": [
{"type": "image", "image": image},
{"type": "text", "text": f"{guidelines}\n{task}"}
],
}
]
def get_image_proc_params(processor) -> Dict[str, int]:
ip = getattr(processor, "image_processor", None)
return {
"patch_size": getattr(ip, "patch_size", 14),
"merge_size": getattr(ip, "merge_size", 2),
"min_pixels": getattr(ip, "min_pixels", 256 * 256),
"max_pixels": getattr(ip, "max_pixels", 1280 * 1280),
}
# -----------------------------------------------------------------------------
# 4. PARSING LOGIC
# -----------------------------------------------------------------------------
def parse_uitars_response(text: str) -> List[Dict]:
"""Parse UI-TARS specific output formats"""
actions = []
# 1. Click(x,y)
m = re.search(r"Click\s*\(\s*(\d+)\s*,\s*(\d+)\s*\)", text)
if m:
x, y = int(m.group(1)), int(m.group(2))
actions.append({"type": "click", "x": x, "y": y, "text": ""})
return actions
# 2. start_box='(x,y)'
m = re.search(r"start_box=['\"]\(\s*(\d+)\s*,\s*(\d+)\s*\)['\"]", text)
if m:
x, y = int(m.group(1)), int(m.group(2))
actions.append({"type": "click", "x": x, "y": y, "text": ""})
return actions
return actions
def parse_fara_response(response: str) -> List[Dict]:
"""Parse Fara <tool_call> JSON format"""
actions = []
matches = re.findall(r"<tool_call>(.*?)</tool_call>", response, re.DOTALL)
for match in matches:
try:
data = json.loads(match.strip())
args = data.get("arguments", {})
coords = args.get("coordinate", [])
action_type = args.get("action", "unknown")
text_content = args.get("text", "")
if coords and len(coords) == 2:
actions.append({
"type": action_type, "x": float(coords[0]), "y": float(coords[1]), "text": text_content
})
except: pass
return actions
def create_localized_image(original_image: Image.Image, actions: list[dict]) -> Optional[Image.Image]:
if not actions: return None
img_copy = original_image.copy()
draw = ImageDraw.Draw(img_copy)
width, height = img_copy.size
try: font = ImageFont.load_default()
except: font = None
for act in actions:
x = act['x']
y = act['y']
# Check if Normalized (0.0 - 1.0) or Absolute
if x <= 1.0 and y <= 1.0 and x > 0:
pixel_x = int(x * width)
pixel_y = int(y * height)
else:
pixel_x = int(x)
pixel_y = int(y)
color = 'red' if 'click' in act['type'] else 'blue'
# Draw Target
r = 15
draw.ellipse([pixel_x - r, pixel_y - r, pixel_x + r, pixel_y + r], outline=color, width=4)
draw.ellipse([pixel_x - 3, pixel_y - 3, pixel_x + 3, pixel_y + 3], fill=color)
# Draw Label
label = f"{act['type']}"
if act['text']: label += f": {act['text']}"
text_pos = (pixel_x + 18, pixel_y - 12)
bbox = draw.textbbox(text_pos, label, font=font)
draw.rectangle((bbox[0]-2, bbox[1]-2, bbox[2]+2, bbox[3]+2), fill="black")
draw.text(text_pos, label, fill="white", font=font)
return img_copy
# -----------------------------------------------------------------------------
# 5. CORE LOGIC
# -----------------------------------------------------------------------------
@spaces.GPU
def process_screenshot(input_numpy_image: np.ndarray, task: str, model_choice: str):
if input_numpy_image is None: return "⚠️ Please upload an image.", None
input_pil_image = array_to_image(input_numpy_image)
orig_w, orig_h = input_pil_image.size
# --- UI-TARS Logic ---
if model_choice == "UI-TARS-1.5-7B":
print("Using UI-TARS Pipeline...")
# 1. Smart Resize (Crucial for UI-TARS accuracy)
ip_params = get_image_proc_params(processor_x)
resized_h, resized_w = smart_resize(
input_pil_image.height, input_pil_image.width,
factor=ip_params["patch_size"] * ip_params["merge_size"],
min_pixels=ip_params["min_pixels"], max_pixels=ip_params["max_pixels"]
)
proc_image = input_pil_image.resize((resized_w, resized_h), Image.Resampling.LANCZOS)
# 2. Prompting
messages = get_uitars_prompt(task, proc_image)
text_prompt = processor_x.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
# 3. Inputs
inputs = processor_x(text=[text_prompt], images=[proc_image], padding=True, return_tensors="pt")
inputs = {k: v.to(device) for k, v in inputs.items()}
# 4. Generate
with torch.no_grad():
generated_ids = model_x.generate(**inputs, max_new_tokens=128)
generated_ids_trimmed = [out_ids[len(in_seq):] for in_seq, out_ids in zip(inputs.get("input_ids"), generated_ids)]
raw_response = processor_x.batch_decode(generated_ids_trimmed, skip_special_tokens=True)[0]
# 5. Parse & Rescale Coordinates
actions = parse_uitars_response(raw_response)
# UI-TARS outputs coords based on RESIZED image, need to map back to ORIGINAL
scale_x = orig_w / resized_w
scale_y = orig_h / resized_h
for a in actions:
a['x'] = int(a['x'] * scale_x)
a['y'] = int(a['y'] * scale_y)
# --- Fara Logic ---
else:
print("Using Fara Pipeline...")
messages = get_fara_prompt(task, input_pil_image)
text_prompt = processor_v.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor_v(
text=[text_prompt],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt"
)
inputs = inputs.to(device)
with torch.no_grad():
generated_ids = model_v.generate(**inputs, max_new_tokens=512)
generated_ids_trimmed = [out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
raw_response = processor_v.batch_decode(generated_ids_trimmed, skip_special_tokens=True)[0]
# Fara usually outputs exact pixels based on original image or normalized
actions = parse_fara_response(raw_response)
print(f"Raw Output: {raw_response}")
# 3. Visualize
output_image = input_pil_image
if actions:
vis = create_localized_image(input_pil_image, actions)
if vis: output_image = vis
return raw_response, output_image
# -----------------------------------------------------------------------------
# 6. UI SETUP
# -----------------------------------------------------------------------------
with gr.Blocks(theme=steel_blue_theme, css=css) as demo:
gr.Markdown("# **CUA GUI Agent 🖥️**", elem_id="main-title")
gr.Markdown("Upload a screenshot, select a model, and provide a task. The model will determine the precise UI coordinates and actions.")
with gr.Row():
with gr.Column(scale=2):
input_image = gr.Image(label="Upload Screenshot", height=500)
with gr.Row():
model_choice = gr.Radio(
choices=["Fara-7B", "UI-TARS-1.5-7B"],
label="Select Model",
value="Fara-7B",
interactive=True
)
task_input = gr.Textbox(
label="Task Instruction",
placeholder="e.g. Input the server address readyforquantum.com...",
lines=2
)
submit_btn = gr.Button("Analyze UI & Generate Action", variant="primary")
with gr.Column(scale=3):
output_image = gr.Image(label="Visualized Action Points", elem_id="out_img", height=500)
output_text = gr.Textbox(label="Raw Model Output", lines=8, show_copy_button=True)
submit_btn.click(
fn=process_screenshot,
inputs=[input_image, task_input, model_choice],
outputs=[output_text, output_image]
)
gr.Examples(
examples=[["./assets/google.png", "Search for 'Hugging Face'", "Fara-7B"]],
inputs=[input_image, task_input, model_choice],
label="Quick Examples"
)
if __name__ == "__main__":
demo.queue().launch()