File size: 19,480 Bytes
7d0d550
 
 
 
 
d27700b
d214428
 
7d0d550
4196f5a
0f4a1d2
c875d85
7d0d550
 
1c19f0a
7d0d550
 
1c19f0a
7d0d550
73b010f
0f4a1d2
73b010f
0f4a1d2
73b010f
0f4a1d2
7d0d550
1c19f0a
0f4a1d2
73b010f
 
 
0f4a1d2
 
 
 
73b010f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f4a1d2
73b010f
7d0d550
bf237a8
7d0d550
1c19f0a
bf237a8
 
 
 
 
 
d27700b
 
 
 
 
 
 
 
 
 
 
bf237a8
 
 
0a60948
d27700b
 
 
 
 
 
 
 
 
 
 
 
d87b209
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d0d550
 
d214428
7d0d550
 
d214428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d0d550
0f4a1d2
7d0d550
 
d214428
 
 
 
bf237a8
0f4a1d2
 
 
 
 
 
 
 
 
c875d85
7d0d550
0f4a1d2
c875d85
 
bf237a8
0f4a1d2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d87b209
d214428
 
 
 
 
 
d87b209
 
 
 
 
d214428
 
d87b209
 
 
0f4a1d2
d214428
0f4a1d2
 
d214428
 
7d0d550
d27700b
7d0d550
d214428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f4a1d2
 
d214428
0f4a1d2
 
fcb0f85
7d0d550
0f4a1d2
7d0d550
 
 
 
0f4a1d2
73b010f
0f4a1d2
73b010f
0f4a1d2
7d0d550
 
 
0f4a1d2
7d0d550
 
 
 
0f4a1d2
 
7d0d550
6aaf210
7d0d550
 
 
d214428
 
d87b209
 
7d0d550
d87b209
 
7d0d550
d214428
7d0d550
d214428
0f4a1d2
d214428
 
d87b209
d214428
 
 
bf237a8
d214428
 
 
 
7d0d550
 
 
 
0f4a1d2
7d0d550
 
d27700b
0f4a1d2
e98b5fc
7d0d550
 
0f4a1d2
d214428
d87b209
d214428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f4a1d2
bf237a8
d214428
e98b5fc
d214428
d27700b
bf237a8
0f4a1d2
 
 
 
 
 
bf237a8
0f4a1d2
bf237a8
 
 
0f4a1d2
 
bf237a8
0f4a1d2
d214428
d27700b
 
0f4a1d2
d214428
bf237a8
 
0f4a1d2
 
 
 
bf237a8
 
e98b5fc
d214428
0f4a1d2
bf237a8
0f4a1d2
d214428
 
 
 
 
 
 
 
bf237a8
0f4a1d2
 
bf237a8
0f4a1d2
d27700b
 
d214428
0f4a1d2
6aaf210
d214428
 
 
d87b209
7d0d550
 
0f4a1d2
 
7d0d550
d214428
7d0d550
 
0f4a1d2
7d0d550
 
73b010f
 
 
7d0d550
 
73b010f
fcb0f85
 
6f12eee
73b010f
d87b209
73b010f
6f12eee
 
 
5c21f23
7d0d550
 
 
 
1c19f0a
7d0d550
9e4971c
73b010f
7d0d550
d214428
7d0d550
 
 
6f12eee
7d0d550
 
 
 
0f4a1d2
6f12eee
7d0d550
 
c875d85
9e4971c
0f4a1d2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
import os
import re
import json
import time
import unicodedata
import gc
import contextlib
import traceback
from io import BytesIO
from typing import Iterable
from typing import Tuple, Optional, List, Dict, Any

import gradio as gr
import numpy as np
import torch
import spaces
from PIL import Image, ImageDraw, ImageFont

# Transformers & Qwen Utils
from transformers import (
    Qwen2_5_VLForConditionalGeneration, 
    AutoProcessor,
    AutoModelForImageTextToText
)
from transformers.models.qwen2_vl.image_processing_qwen2_vl import smart_resize
from qwen_vl_utils import process_vision_info

# Gradio Theme
from gradio.themes import Soft
from gradio.themes.utils import colors, fonts, sizes

# -----------------------------------------------------------------------------
# 1. THEME CONFIGURATION
# -----------------------------------------------------------------------------

colors.steel_blue = colors.Color(
    name="steel_blue",
    c50="#EBF3F8",
    c100="#D3E5F0",
    c200="#A8CCE1",
    c300="#7DB3D2",
    c400="#529AC3",
    c500="#4682B4",
    c600="#3E72A0",
    c700="#36638C",
    c800="#2E5378",
    c900="#264364",
    c950="#1E3450",
)

class SteelBlueTheme(Soft):
    def __init__(
        self,
        *,
        primary_hue: colors.Color | str = colors.gray,
        secondary_hue: colors.Color | str = colors.steel_blue,
        neutral_hue: colors.Color | str = colors.slate,
        text_size: sizes.Size | str = sizes.text_lg,
        font: fonts.Font | str | Iterable[fonts.Font | str] = (
            fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
        ),
        font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
            fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
        ),
    ):
        super().__init__(
            primary_hue=primary_hue,
            secondary_hue=secondary_hue,
            neutral_hue=neutral_hue,
            text_size=text_size,
            font=font,
            font_mono=font_mono,
        )
        super().set(
            background_fill_primary="*primary_50",
            background_fill_primary_dark="*primary_900",
            body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
            body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
            button_primary_text_color="white",
            button_primary_text_color_hover="white",
            button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
            button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
            button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_800)",
            button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_500)",
            block_title_text_weight="600",
            block_border_width="3px",
            block_shadow="*shadow_drop_lg",
            button_primary_shadow="*shadow_drop_lg",
            button_large_padding="11px",
        )

steel_blue_theme = SteelBlueTheme()
css = "#main-title h1 { font-size: 2.3em !important; } #out_img { height: 600px; object-fit: contain; }"

# -----------------------------------------------------------------------------
# 2. GLOBAL MODEL LOADING
# -----------------------------------------------------------------------------

device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Running on device: {device}")

# --- Load Fara-7B ---
print("πŸ”„ Loading Fara-7B...")
MODEL_ID_V = "microsoft/Fara-7B"
try:
    processor_v = AutoProcessor.from_pretrained(MODEL_ID_V, trust_remote_code=True)
    model_v = Qwen2_5_VLForConditionalGeneration.from_pretrained(
        MODEL_ID_V,
        trust_remote_code=True,
        torch_dtype=torch.float16
    ).to(device).eval()
except Exception as e:
    print(f"Failed to load Fara: {e}")
    model_v = None
    processor_v = None

# --- Load UI-TARS-1.5-7B ---
print("πŸ”„ Loading UI-TARS-1.5-7B...")
MODEL_ID_X = "ByteDance-Seed/UI-TARS-1.5-7B" 
try:
    processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True, use_fast=False)
    model_x = AutoModelForImageTextToText.from_pretrained(
        MODEL_ID_X,
        trust_remote_code=True,
        torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32,
    ).to(device).eval()
except Exception as e:
    print(f"Failed to load UI-TARS: {e}")
    model_x = None
    processor_x = None

# --- Load Holo2-8B ---
print("πŸ”„ Loading Holo2-8B...")
MODEL_ID_H = "Hcompany/Holo2-8B"
try:
    processor_h = AutoProcessor.from_pretrained(MODEL_ID_H, trust_remote_code=True)
    model_h = AutoModelForImageTextToText.from_pretrained(
        MODEL_ID_H,
        trust_remote_code=True,
        torch_dtype=torch.float16
    ).to(device).eval()
except Exception as e:
    print(f"Failed to load Holo2: {e}")
    model_h = None
    processor_h = None

print("βœ… All Models Loaded Sequence Complete.")

# -----------------------------------------------------------------------------
# 3. UTILS: COMPATIBILITY & HELPERS (Specific for Holo2)
# -----------------------------------------------------------------------------

def apply_chat_template_compat(processor, messages: List[Dict[str, Any]]) -> str:
    tok = getattr(processor, "tokenizer", None)
    if hasattr(processor, "apply_chat_template"):
        return processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    if tok is not None and hasattr(tok, "apply_chat_template"):
        return tok.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    texts = []
    for m in messages:
        for c in m.get("content", []):
            if isinstance(c, dict) and c.get("type") == "text":
                texts.append(c.get("text", ""))
    return "\n".join(texts)

def batch_decode_compat(processor, token_id_batches, **kw):
    tok = getattr(processor, "tokenizer", None)
    if tok is not None and hasattr(tok, "batch_decode"):
        return tok.batch_decode(token_id_batches, **kw)
    if hasattr(processor, "batch_decode"):
        return processor.batch_decode(token_id_batches, **kw)
    raise AttributeError("No batch_decode available on processor or tokenizer.")

def get_image_proc_params(processor) -> Dict[str, int]:
    ip = getattr(processor, "image_processor", None)
    return {
        "patch_size": getattr(ip, "patch_size", 14),
        "merge_size": getattr(ip, "merge_size", 1),
        "min_pixels": getattr(ip, "min_pixels", 256 * 256),
        "max_pixels": getattr(ip, "max_pixels", 1280 * 1280),
    }

def trim_generated(generated_ids, inputs):
    in_ids = getattr(inputs, "input_ids", None)
    if in_ids is None and isinstance(inputs, dict):
        in_ids = inputs.get("input_ids", None)
    if in_ids is None:
        return [out_ids for out_ids in generated_ids]
    return [out_ids[len(in_seq):] for in_seq, out_ids in zip(in_ids, generated_ids)]

def array_to_image(image_array: np.ndarray) -> Image.Image:
    if image_array is None: raise ValueError("No image provided.")
    return Image.fromarray(np.uint8(image_array))

# -----------------------------------------------------------------------------
# 4. PROMPT BUILDERS
# -----------------------------------------------------------------------------

# --- Fara Prompt ---
def get_fara_prompt(task, image):
    OS_SYSTEM_PROMPT = """You are a GUI agent. You are given a task and a screenshot of the current status. 
    You need to generate the next action to complete the task.
    Output your action inside a <tool_call> block using JSON format.
    Include "coordinate": [x, y] in pixels for interactions.
    Examples:
    <tool_call>{"name": "User", "arguments": {"action": "click", "coordinate": [400, 300]}}</tool_call>
    <tool_call>{"name": "User", "arguments": {"action": "type", "coordinate": [100, 200], "text": "hello"}}</tool_call>
    """
    return [
        {"role": "system", "content": [{"type": "text", "text": OS_SYSTEM_PROMPT}]},
        {"role": "user", "content": [{"type": "image", "image": image}, {"type": "text", "text": f"Instruction: {task}"}]},
    ]

# --- UI-TARS Prompt ---
def get_uitars_prompt(task, image):
    guidelines = (
        "Localize an element on the GUI image according to my instructions and "
        "output a click position as Click(x, y) with x num pixels from the left edge "
        "and y num pixels from the top edge."
    )
    return [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": f"{guidelines}\n{task}"}
            ],
        }
    ]

# --- Holo2 Prompt ---
def get_holo_prompt(image, task) -> List[dict]:
    guidelines: str = (
        "Localize an element on the GUI image according to my instructions and "
        "output a click position as Click(x, y) with x num pixels from the left edge "
        "and y num pixels from the top edge."
    )
    return [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": f"{guidelines}\n{task}"}
            ],
        }
    ]

# -----------------------------------------------------------------------------
# 5. PARSING LOGIC
# -----------------------------------------------------------------------------

def parse_uitars_holo_response(text: str) -> List[Dict]:
    """Parse UI-TARS and Holo2 output formats (usually Click(x,y))"""
    actions = []
    text = text.strip()
    
    # Matches: Click(123, 456)
    matches_click = re.findall(r"Click\s*\(\s*(\d+)\s*,\s*(\d+)\s*\)", text, re.IGNORECASE)
    for m in matches_click:
        actions.append({"type": "click", "x": int(m[0]), "y": int(m[1]), "text": ""})

    # Matches: point=[x, y]
    matches_point = re.findall(r"point=\[\s*(\d+)\s*,\s*(\d+)\s*\]", text, re.IGNORECASE)
    for m in matches_point:
        actions.append({"type": "click", "x": int(m[0]), "y": int(m[1]), "text": ""})

    # Matches: start_box='(x, y)'
    matches_box = re.findall(r"start_box=['\"]?\(\s*(\d+)\s*,\s*(\d+)\s*\)['\"]?", text, re.IGNORECASE)
    for m in matches_box:
        actions.append({"type": "click", "x": int(m[0]), "y": int(m[1]), "text": ""})

    # Remove duplicates
    unique = []
    seen = set()
    for a in actions:
        k = (a['type'], a['x'], a['y'])
        if k not in seen:
            seen.add(k)
            unique.append(a)
    return unique

def parse_fara_response(response: str) -> List[Dict]:
    """Parse Fara <tool_call> JSON format"""
    actions = []
    matches = re.findall(r"<tool_call>(.*?)</tool_call>", response, re.DOTALL)
    for match in matches:
        try:
            data = json.loads(match.strip())
            args = data.get("arguments", {})
            coords = args.get("coordinate", [])
            action_type = args.get("action", "unknown")
            text_content = args.get("text", "")
            if coords and len(coords) == 2:
                actions.append({
                    "type": action_type, "x": float(coords[0]), "y": float(coords[1]), "text": text_content
                })
        except: pass
    return actions

def create_localized_image(original_image: Image.Image, actions: list[dict]) -> Optional[Image.Image]:
    if not actions: return None
    img_copy = original_image.copy()
    draw = ImageDraw.Draw(img_copy)
    width, height = img_copy.size
    
    try: font = ImageFont.load_default()
    except: font = None
    
    for act in actions:
        x = act['x']
        y = act['y']
        
        # Determine if we need to scale normalized coords (0-1) or use absolute
        if x <= 1.0 and y <= 1.0 and x > 0:
            pixel_x = int(x * width)
            pixel_y = int(y * height)
        else:
            pixel_x = int(x)
            pixel_y = int(y)
            
        color = 'red' if 'click' in act['type'].lower() else 'blue'
        
        # Draw Target
        r = 15
        draw.ellipse([pixel_x - r, pixel_y - r, pixel_x + r, pixel_y + r], outline=color, width=4)
        draw.ellipse([pixel_x - 3, pixel_y - 3, pixel_x + 3, pixel_y + 3], fill=color)
        
        # Draw Label
        label = f"{act['type']}"
        if act['text']: label += f": {act['text']}"
        
        text_pos = (pixel_x + 18, pixel_y - 12)
        bbox = draw.textbbox(text_pos, label, font=font)
        draw.rectangle((bbox[0]-2, bbox[1]-2, bbox[2]+2, bbox[3]+2), fill="black")
        draw.text(text_pos, label, fill="white", font=font)

    return img_copy

# -----------------------------------------------------------------------------
# 5. CORE LOGIC
# -----------------------------------------------------------------------------

@spaces.GPU(duration=120)
def process_screenshot(input_numpy_image: np.ndarray, task: str, model_choice: str):
    if input_numpy_image is None: return "⚠️ Please upload an image.", None

    input_pil_image = array_to_image(input_numpy_image)
    orig_w, orig_h = input_pil_image.size
    
    raw_response = ""
    actions = []

    # --- Holo2-8B Logic ---
    if model_choice == "Holo2-8B":
        if model_h is None: return "Error: Holo2 model failed to load.", None
        print("Using Holo2 Pipeline...")
        
        # 1. Resize
        ip_params = get_image_proc_params(processor_h)
        resized_h, resized_w = smart_resize(
            input_pil_image.height, input_pil_image.width,
            factor=ip_params["patch_size"] * ip_params["merge_size"],
            min_pixels=ip_params["min_pixels"], max_pixels=ip_params["max_pixels"]
        )
        proc_image = input_pil_image.resize((resized_w, resized_h), Image.Resampling.LANCZOS)
        
        # 2. Prompt & Generate
        messages = get_holo_prompt(proc_image, task)
        text_prompt = apply_chat_template_compat(processor_h, messages)
        
        inputs = processor_h(text=[text_prompt], images=[proc_image], padding=True, return_tensors="pt")
        inputs = {k: v.to(device) for k, v in inputs.items()}
        
        with torch.no_grad():
            generated_ids = model_h.generate(**inputs, max_new_tokens=128)
            
        generated_ids_trimmed = trim_generated(generated_ids, inputs)
        raw_response = batch_decode_compat(
            processor_h, generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
        )[0]
        
        # 3. Parse & Rescale
        actions = parse_uitars_holo_response(raw_response)
        scale_x = orig_w / resized_w
        scale_y = orig_h / resized_h
        for a in actions:
            a['x'] = int(a['x'] * scale_x)
            a['y'] = int(a['y'] * scale_y)

    # --- UI-TARS Logic ---
    elif model_choice == "UI-TARS-1.5-7B":
        if model_x is None: return "Error: UI-TARS model failed to load.", None
        print("Using UI-TARS Pipeline...")
        
        ip_params = get_image_proc_params(processor_x)
        resized_h, resized_w = smart_resize(
            input_pil_image.height, input_pil_image.width,
            factor=ip_params["patch_size"] * ip_params["merge_size"],
            min_pixels=ip_params["min_pixels"], max_pixels=ip_params["max_pixels"]
        )
        proc_image = input_pil_image.resize((resized_w, resized_h), Image.Resampling.LANCZOS)
        
        messages = get_uitars_prompt(task, proc_image)
        text_prompt = processor_x.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
        inputs = processor_x(text=[text_prompt], images=[proc_image], padding=True, return_tensors="pt")
        inputs = {k: v.to(device) for k, v in inputs.items()}
        
        with torch.no_grad():
            generated_ids = model_x.generate(**inputs, max_new_tokens=128)
            
        # Manually decode if compat functions fail or use standard
        generated_ids = [out_ids[len(in_seq):] for in_seq, out_ids in zip(inputs.get("input_ids"), generated_ids)]
        raw_response = processor_x.batch_decode(generated_ids, skip_special_tokens=True)[0]
        
        actions = parse_uitars_holo_response(raw_response)
        scale_x = orig_w / resized_w
        scale_y = orig_h / resized_h
        for a in actions:
            a['x'] = int(a['x'] * scale_x)
            a['y'] = int(a['y'] * scale_y)

    # --- Fara Logic ---
    else: 
        if model_v is None: return "Error: Fara model failed to load.", None
        print("Using Fara Pipeline...")
        messages = get_fara_prompt(task, input_pil_image)
        text_prompt = processor_v.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
        image_inputs, video_inputs = process_vision_info(messages)
        
        inputs = processor_v(
            text=[text_prompt],
            images=image_inputs,
            videos=video_inputs,
            padding=True,
            return_tensors="pt"
        )
        inputs = inputs.to(device)
        
        with torch.no_grad():
            generated_ids = model_v.generate(**inputs, max_new_tokens=512)
            
        generated_ids = [out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
        raw_response = processor_v.batch_decode(generated_ids, skip_special_tokens=True)[0]
        
        actions = parse_fara_response(raw_response)

    print(f"Raw Output: {raw_response}")
    print(f"Parsed Actions: {actions}")

    # Visualize
    output_image = input_pil_image
    if actions:
        vis = create_localized_image(input_pil_image, actions)
        if vis: output_image = vis
            
    return raw_response, output_image

# -----------------------------------------------------------------------------
# 6. UI SETUP
# -----------------------------------------------------------------------------

with gr.Blocks(theme=steel_blue_theme, css=css) as demo:
    gr.Markdown("# **CUA GUI Agent πŸ–₯️**", elem_id="main-title")
    gr.Markdown("Upload a screenshot, select a model, and provide a task. The model will determine the precise UI coordinates and actions.")

    with gr.Row():
        with gr.Column(scale=2):
            input_image = gr.Image(label="Upload Screenshot", height=500)
            
            with gr.Row():
                model_choice = gr.Radio(
                    choices=["Fara-7B", "UI-TARS-1.5-7B", "Holo2-8B"],
                    label="Select Model",
                    value="Fara-7B",
                    interactive=True
                )
            
            task_input = gr.Textbox(
                label="Task Instruction",
                placeholder="e.g. Input the server address readyforquantum.com...",
                lines=2
            )
            submit_btn = gr.Button("Analyze UI & Generate Action", variant="primary")

        with gr.Column(scale=3):
            output_image = gr.Image(label="Visualized Action Points", elem_id="out_img", height=500)
            output_text = gr.Textbox(label="Raw Model Output", lines=8, show_copy_button=True)

    submit_btn.click(
        fn=process_screenshot,
        inputs=[input_image, task_input, model_choice],
        outputs=[output_text, output_image]
    )
    
    gr.Examples(
        examples=[["./assets/google.png", "Search for 'Hugging Face'", "Fara-7B"]],
        inputs=[input_image, task_input, model_choice],
        label="Quick Examples"
    )

if __name__ == "__main__":
    demo.queue().launch()